geode-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sai_boorlaga...@apache.org
Subject [23/55] [abbrv] incubator-geode git commit: GEODE-1340: Refactored the names of lucene integration tests.
Date Tue, 24 May 2016 18:54:04 GMT
GEODE-1340: Refactored the names of lucene integration tests.

* Renamed the names of integration tests to not end with JUnitTest but just IntegrationTest
* Modified LuceneIndexXmlParserJUnitTest code to use mockito so that the test is more aligned to Unit tests.

This closes #141


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/eaa15939
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/eaa15939
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/eaa15939

Branch: refs/heads/feature/GEODE-1153
Commit: eaa1593949556e515feb886a13fd93f6f3e17016
Parents: f24b9fb
Author: nabarun <nnag@pivotal.io>
Authored: Wed May 4 10:07:23 2016 -0700
Committer: Dan Smith <upthewaterspout@apache.org>
Committed: Mon May 9 12:08:24 2016 -0700

----------------------------------------------------------------------
 .../LuceneIndexRecoveryHAIntegrationTest.java   | 211 ++++++++++++++
 .../LuceneIndexRecoveryHAJUnitTest.java         | 211 --------------
 .../LuceneQueryImplIntegrationTest.java         | 123 ++++++++
 .../internal/LuceneQueryImplJUnitTest.java      | 123 --------
 .../LuceneServiceImplIntegrationTest.java       | 292 +++++++++++++++++++
 .../internal/LuceneServiceImplJUnitTest.java    | 292 -------------------
 .../xml/LuceneIndexXmlParserJUnitTest.java      |  13 +-
 7 files changed, 637 insertions(+), 628 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eaa15939/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java
new file mode 100644
index 0000000..77d2a5c
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gemstone.gemfire.cache.lucene.internal;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.EvictionAction;
+import com.gemstone.gemfire.cache.EvictionAlgorithm;
+import com.gemstone.gemfire.cache.PartitionAttributes;
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
+import com.gemstone.gemfire.cache.lucene.LuceneQuery;
+import com.gemstone.gemfire.cache.lucene.LuceneQueryResults;
+import com.gemstone.gemfire.cache.lucene.LuceneService;
+import com.gemstone.gemfire.cache.lucene.LuceneServiceProvider;
+import com.gemstone.gemfire.cache.lucene.internal.repository.IndexRepository;
+import com.gemstone.gemfire.cache.lucene.internal.repository.RepositoryManager;
+import com.gemstone.gemfire.cache.lucene.internal.repository.serializer.HeterogeneousLuceneSerializer;
+import com.gemstone.gemfire.cache.lucene.internal.repository.serializer.Type1;
+import com.gemstone.gemfire.internal.cache.BucketNotFoundException;
+import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.test.junit.categories.FlakyTest;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+import com.jayway.awaitility.Awaitility;
+
+@Category(IntegrationTest.class)
+public class LuceneIndexRecoveryHAIntegrationTest {
+
+  private static final String INDEX = "index";
+  private static final String REGION = "indexedRegion";
+  String[] indexedFields = new String[] { "txt" };
+  HeterogeneousLuceneSerializer mapper = new HeterogeneousLuceneSerializer(indexedFields);
+  Analyzer analyzer = new StandardAnalyzer();
+
+  Cache cache;
+
+  @Before
+  public void setup() {
+    indexedFields = new String[] { "txt" };
+    mapper = new HeterogeneousLuceneSerializer(indexedFields);
+    analyzer = new StandardAnalyzer();
+    LuceneServiceImpl.registerDataSerializables();
+
+    cache = new CacheFactory().set("mcast-port", "0").create();
+  }
+
+  @After
+  public void tearDown() {
+    Cache cache = GemFireCacheImpl.getInstance();
+    if (cache != null) {
+      cache.close();
+    }
+  }
+
+  /**
+   * On rebalance, new repository manager will be created. It will try to read fileRegion and construct index. This test
+   * simulates the same.
+   */
+  @Test
+  public void recoverRepoInANewNode() throws BucketNotFoundException, IOException {
+    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory().setTotalNumBuckets(1).create();
+    RegionFactory<String, String> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+    regionfactory.setPartitionAttributes(attrs);
+
+    PartitionedRegion userRegion = (PartitionedRegion) regionfactory.create("userRegion");
+    // put an entry to create the bucket
+    userRegion.put("rebalance", "test");
+
+    PartitionedRegion fileRegion = (PartitionedRegion) regionfactory.create("fileRegion");
+    PartitionedRegion chunkRegion = (PartitionedRegion) regionfactory.create("chunkRegion");
+
+    RepositoryManager manager = new PartitionedRepositoryManager(userRegion, fileRegion, chunkRegion, mapper, analyzer);
+    IndexRepository repo = manager.getRepository(userRegion, 0, null);
+    assertNotNull(repo);
+
+    repo.create("rebalance", "test");
+    repo.commit();
+
+    // close the region to simulate bucket movement. New node will create repo using data persisted by old region
+    userRegion.close();
+
+    userRegion = (PartitionedRegion) regionfactory.create("userRegion");
+    userRegion.put("rebalance", "test");
+    manager = new PartitionedRepositoryManager(userRegion, fileRegion, chunkRegion, mapper, analyzer);
+    IndexRepository newRepo = manager.getRepository(userRegion, 0, null);
+
+    Assert.assertNotEquals(newRepo, repo);
+  }
+
+  @Category(FlakyTest.class) // GEODE-1012: time sensitive, awaitility, short timeout
+  @Test
+  public void recoverPersistentIndex() throws Exception {
+    String aeqId = LuceneServiceImpl.getUniqueIndexName(INDEX, REGION);
+
+    LuceneService service = LuceneServiceProvider.get(cache);
+    service.createIndex(INDEX, REGION, Type1.fields);
+
+    RegionFactory<String, Type1> regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT);
+    Region<String, Type1> userRegion = regionFactory.create(REGION);
+
+    Type1 value = new Type1("hello world", 1, 2L, 3.0, 4.0f);
+    userRegion.put("value1", value);
+    value = new Type1("test world", 1, 2L, 3.0, 4.0f);
+    userRegion.put("value2", value);
+    value = new Type1("lucene world", 1, 2L, 3.0, 4.0f);
+    userRegion.put("value3", value);
+
+    waitUntilQueueEmpty(aeqId);
+
+    LuceneQuery<Integer, Type1> query = service.createLuceneQueryFactory().create(INDEX, REGION, "s:world");
+    LuceneQueryResults<Integer, Type1> results = query.search();
+    Assert.assertEquals(3, results.size());
+
+    // close the cache and all the regions
+    cache.close();
+
+    cache = new CacheFactory().set("mcast-port", "0").create();
+    service = LuceneServiceProvider.get(cache);
+    service.createIndex(INDEX, REGION, Type1.fields);
+    regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT);
+    userRegion = regionFactory.create(REGION);
+
+    query = service.createLuceneQueryFactory().create(INDEX, REGION, "s:world");
+    results = query.search();
+    Assert.assertEquals(3, results.size());
+
+    PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
+    assertNotNull(chunkRegion);
+    chunkRegion.destroyRegion();
+    PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
+    assertNotNull(fileRegion);
+    fileRegion.destroyRegion();
+    userRegion.destroyRegion();
+  }
+
+  @Category(FlakyTest.class) // GEODE-1013: time sensitive, awaitility, short timeout, possible disk pollution
+  @Test
+  public void overflowRegionIndex() throws Exception {
+    String aeqId = LuceneServiceImpl.getUniqueIndexName(INDEX, REGION);
+
+    LuceneService service = LuceneServiceProvider.get(cache);
+    service.createIndex(INDEX, REGION, Type1.fields);
+
+    RegionFactory<String, Type1> regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
+    EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
+    evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
+    regionFactory.setEvictionAttributes(evicAttr);
+
+    PartitionedRegion userRegion = (PartitionedRegion) regionFactory.create(REGION);
+    Assert.assertEquals(0, userRegion.getDiskRegionStats().getNumOverflowOnDisk());
+
+    Type1 value = new Type1("hello world", 1, 2L, 3.0, 4.0f);
+    userRegion.put("value1", value);
+    value = new Type1("test world", 1, 2L, 3.0, 4.0f);
+    userRegion.put("value2", value);
+    value = new Type1("lucene world", 1, 2L, 3.0, 4.0f);
+    userRegion.put("value3", value);
+
+    waitUntilQueueEmpty(aeqId);
+
+    PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
+    assertNotNull(fileRegion);
+    PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
+    assertNotNull(chunkRegion);
+    Assert.assertTrue(0 < userRegion.getDiskRegionStats().getNumOverflowOnDisk());
+
+    LuceneQuery<Integer, Type1> query = service.createLuceneQueryFactory().create(INDEX, REGION, "s:world");
+    LuceneQueryResults<Integer, Type1> results = query.search();
+    Assert.assertEquals(3, results.size());
+  }
+
+  private void waitUntilQueueEmpty(final String aeqId) {
+    // TODO flush queue
+    AsyncEventQueue queue = cache.getAsyncEventQueue(aeqId);
+    Awaitility.waitAtMost(1000, TimeUnit.MILLISECONDS).until(() -> assertEquals(0, queue.size()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eaa15939/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAJUnitTest.java
deleted file mode 100644
index ebde9d4..0000000
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneIndexRecoveryHAJUnitTest.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package com.gemstone.gemfire.cache.lucene.internal;
-
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.EvictionAction;
-import com.gemstone.gemfire.cache.EvictionAlgorithm;
-import com.gemstone.gemfire.cache.PartitionAttributes;
-import com.gemstone.gemfire.cache.PartitionAttributesFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
-import com.gemstone.gemfire.cache.lucene.LuceneQuery;
-import com.gemstone.gemfire.cache.lucene.LuceneQueryResults;
-import com.gemstone.gemfire.cache.lucene.LuceneService;
-import com.gemstone.gemfire.cache.lucene.LuceneServiceProvider;
-import com.gemstone.gemfire.cache.lucene.internal.repository.IndexRepository;
-import com.gemstone.gemfire.cache.lucene.internal.repository.RepositoryManager;
-import com.gemstone.gemfire.cache.lucene.internal.repository.serializer.HeterogeneousLuceneSerializer;
-import com.gemstone.gemfire.cache.lucene.internal.repository.serializer.Type1;
-import com.gemstone.gemfire.internal.cache.BucketNotFoundException;
-import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.test.junit.categories.FlakyTest;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-import com.jayway.awaitility.Awaitility;
-
-@Category(IntegrationTest.class)
-public class LuceneIndexRecoveryHAJUnitTest {
-
-  private static final String INDEX = "index";
-  private static final String REGION = "indexedRegion";
-  String[] indexedFields = new String[] { "txt" };
-  HeterogeneousLuceneSerializer mapper = new HeterogeneousLuceneSerializer(indexedFields);
-  Analyzer analyzer = new StandardAnalyzer();
-
-  Cache cache;
-
-  @Before
-  public void setup() {
-    indexedFields = new String[] { "txt" };
-    mapper = new HeterogeneousLuceneSerializer(indexedFields);
-    analyzer = new StandardAnalyzer();
-    LuceneServiceImpl.registerDataSerializables();
-
-    cache = new CacheFactory().set("mcast-port", "0").create();
-  }
-
-  @After
-  public void tearDown() {
-    Cache cache = GemFireCacheImpl.getInstance();
-    if (cache != null) {
-      cache.close();
-    }
-  }
-
-  /**
-   * On rebalance, new repository manager will be created. It will try to read fileRegion and construct index. This test
-   * simulates the same.
-   */
-  @Test
-  public void recoverRepoInANewNode() throws BucketNotFoundException, IOException {
-    PartitionAttributes<String, String> attrs = new PartitionAttributesFactory().setTotalNumBuckets(1).create();
-    RegionFactory<String, String> regionfactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-    regionfactory.setPartitionAttributes(attrs);
-
-    PartitionedRegion userRegion = (PartitionedRegion) regionfactory.create("userRegion");
-    // put an entry to create the bucket
-    userRegion.put("rebalance", "test");
-
-    PartitionedRegion fileRegion = (PartitionedRegion) regionfactory.create("fileRegion");
-    PartitionedRegion chunkRegion = (PartitionedRegion) regionfactory.create("chunkRegion");
-
-    RepositoryManager manager = new PartitionedRepositoryManager(userRegion, fileRegion, chunkRegion, mapper, analyzer);
-    IndexRepository repo = manager.getRepository(userRegion, 0, null);
-    assertNotNull(repo);
-
-    repo.create("rebalance", "test");
-    repo.commit();
-
-    // close the region to simulate bucket movement. New node will create repo using data persisted by old region
-    userRegion.close();
-
-    userRegion = (PartitionedRegion) regionfactory.create("userRegion");
-    userRegion.put("rebalance", "test");
-    manager = new PartitionedRepositoryManager(userRegion, fileRegion, chunkRegion, mapper, analyzer);
-    IndexRepository newRepo = manager.getRepository(userRegion, 0, null);
-
-    Assert.assertNotEquals(newRepo, repo);
-  }
-
-  @Category(FlakyTest.class) // GEODE-1012: time sensitive, awaitility, short timeout
-  @Test
-  public void recoverPersistentIndex() throws Exception {
-    String aeqId = LuceneServiceImpl.getUniqueIndexName(INDEX, REGION);
-
-    LuceneService service = LuceneServiceProvider.get(cache);
-    service.createIndex(INDEX, REGION, Type1.fields);
-
-    RegionFactory<String, Type1> regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT);
-    Region<String, Type1> userRegion = regionFactory.create(REGION);
-
-    Type1 value = new Type1("hello world", 1, 2L, 3.0, 4.0f);
-    userRegion.put("value1", value);
-    value = new Type1("test world", 1, 2L, 3.0, 4.0f);
-    userRegion.put("value2", value);
-    value = new Type1("lucene world", 1, 2L, 3.0, 4.0f);
-    userRegion.put("value3", value);
-
-    waitUntilQueueEmpty(aeqId);
-
-    LuceneQuery<Integer, Type1> query = service.createLuceneQueryFactory().create(INDEX, REGION, "s:world");
-    LuceneQueryResults<Integer, Type1> results = query.search();
-    Assert.assertEquals(3, results.size());
-
-    // close the cache and all the regions
-    cache.close();
-
-    cache = new CacheFactory().set("mcast-port", "0").create();
-    service = LuceneServiceProvider.get(cache);
-    service.createIndex(INDEX, REGION, Type1.fields);
-    regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT);
-    userRegion = regionFactory.create(REGION);
-
-    query = service.createLuceneQueryFactory().create(INDEX, REGION, "s:world");
-    results = query.search();
-    Assert.assertEquals(3, results.size());
-
-    PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
-    assertNotNull(chunkRegion);
-    chunkRegion.destroyRegion();
-    PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
-    assertNotNull(fileRegion);
-    fileRegion.destroyRegion();
-    userRegion.destroyRegion();
-  }
-
-  @Category(FlakyTest.class) // GEODE-1013: time sensitive, awaitility, short timeout, possible disk pollution
-  @Test
-  public void overflowRegionIndex() throws Exception {
-    String aeqId = LuceneServiceImpl.getUniqueIndexName(INDEX, REGION);
-
-    LuceneService service = LuceneServiceProvider.get(cache);
-    service.createIndex(INDEX, REGION, Type1.fields);
-
-    RegionFactory<String, Type1> regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
-    EvictionAttributesImpl evicAttr = new EvictionAttributesImpl().setAction(EvictionAction.OVERFLOW_TO_DISK);
-    evicAttr.setAlgorithm(EvictionAlgorithm.LRU_ENTRY).setMaximum(1);
-    regionFactory.setEvictionAttributes(evicAttr);
-
-    PartitionedRegion userRegion = (PartitionedRegion) regionFactory.create(REGION);
-    Assert.assertEquals(0, userRegion.getDiskRegionStats().getNumOverflowOnDisk());
-
-    Type1 value = new Type1("hello world", 1, 2L, 3.0, 4.0f);
-    userRegion.put("value1", value);
-    value = new Type1("test world", 1, 2L, 3.0, 4.0f);
-    userRegion.put("value2", value);
-    value = new Type1("lucene world", 1, 2L, 3.0, 4.0f);
-    userRegion.put("value3", value);
-
-    waitUntilQueueEmpty(aeqId);
-
-    PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
-    assertNotNull(fileRegion);
-    PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
-    assertNotNull(chunkRegion);
-    Assert.assertTrue(0 < userRegion.getDiskRegionStats().getNumOverflowOnDisk());
-
-    LuceneQuery<Integer, Type1> query = service.createLuceneQueryFactory().create(INDEX, REGION, "s:world");
-    LuceneQueryResults<Integer, Type1> results = query.search();
-    Assert.assertEquals(3, results.size());
-  }
-
-  private void waitUntilQueueEmpty(final String aeqId) {
-    // TODO flush queue
-    AsyncEventQueue queue = cache.getAsyncEventQueue(aeqId);
-    Awaitility.waitAtMost(1000, TimeUnit.MILLISECONDS).until(() -> assertEquals(0, queue.size()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eaa15939/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplIntegrationTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplIntegrationTest.java
new file mode 100644
index 0000000..90ab022
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplIntegrationTest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.gemstone.gemfire.cache.lucene.internal;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.execute.FunctionAdapter;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.cache.lucene.LuceneQueryResults;
+import com.gemstone.gemfire.cache.lucene.LuceneResultStruct;
+import com.gemstone.gemfire.cache.lucene.internal.distributed.LuceneFunction;
+import com.gemstone.gemfire.cache.lucene.internal.distributed.LuceneFunctionContext;
+import com.gemstone.gemfire.cache.lucene.internal.distributed.TopEntriesCollector;
+import com.gemstone.gemfire.cache.lucene.internal.distributed.TopEntriesCollectorManager;
+import com.gemstone.gemfire.cache.lucene.internal.repository.IndexResultCollector;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class LuceneQueryImplIntegrationTest {
+  private static int LIMIT = 123;
+  private Cache cache;
+  private Region<Object, Object> region;
+
+  @Before
+  public void createCache() {
+    cache = new CacheFactory().set("mcast-port", "0").create();
+    region = cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT).create("region");
+  }
+
+  @After
+  public void removeCache() {
+    FunctionService.unregisterFunction(LuceneFunction.ID);
+    cache.close();
+  }
+
+  @Test
+  public void test() {
+    // Register a fake function to observe the function invocation
+    FunctionService.unregisterFunction(LuceneFunction.ID);
+    TestLuceneFunction function = new TestLuceneFunction();
+    FunctionService.registerFunction(function);
+
+    StringQueryProvider provider = new StringQueryProvider();
+    LuceneQueryImpl<Object, Object> query = new LuceneQueryImpl<>("index", region, provider, null, LIMIT, 20);
+    LuceneQueryResults<Object, Object> results = query.search();
+
+    assertTrue(function.wasInvoked);
+    assertEquals(2f * LIMIT, results.getMaxScore(), 0.01);
+    int resultCount = 0;
+    while (results.hasNextPage()) {
+      List<LuceneResultStruct<Object, Object>> nextPage = results.getNextPage();
+      resultCount += nextPage.size();
+      if (results.hasNextPage()) {
+        assertEquals(20, nextPage.size());
+      }
+    }
+    assertEquals(LIMIT, resultCount);
+
+    LuceneFunctionContext<? extends IndexResultCollector> funcArgs = function.args;
+    assertEquals(provider.getQueryString(), ((StringQueryProvider) funcArgs.getQueryProvider()).getQueryString());
+    assertEquals("index", funcArgs.getIndexName());
+    assertEquals(LIMIT, funcArgs.getLimit());
+  }
+
+  private static class TestLuceneFunction extends FunctionAdapter {
+    private static final long serialVersionUID = 1L;
+    private boolean wasInvoked;
+    private LuceneFunctionContext<? extends IndexResultCollector> args;
+
+    @Override
+    public void execute(FunctionContext context) {
+      this.args = (LuceneFunctionContext<?>) context.getArguments();
+      TopEntriesCollectorManager manager = (TopEntriesCollectorManager) args.getCollectorManager();
+
+      assertEquals(LIMIT, manager.getLimit());
+
+      wasInvoked = true;
+      TopEntriesCollector lastResult = new TopEntriesCollector(null, 2 * LIMIT);
+      // put more than LIMIT entries. The resultCollector should trim the results
+      for (int i = LIMIT * 2; i >= 0; i--) {
+        lastResult.collect(i, i * 1f);
+      }
+      assertEquals(LIMIT * 2, lastResult.getEntries().getHits().size());
+
+      context.getResultSender().lastResult(lastResult);
+    }
+
+    @Override
+    public String getId() {
+      return LuceneFunction.ID;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eaa15939/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplJUnitTest.java
deleted file mode 100644
index 2439645..0000000
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneQueryImplJUnitTest.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.gemstone.gemfire.cache.lucene.internal;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.List;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.Region;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.execute.FunctionAdapter;
-import com.gemstone.gemfire.cache.execute.FunctionContext;
-import com.gemstone.gemfire.cache.execute.FunctionService;
-import com.gemstone.gemfire.cache.lucene.LuceneQueryResults;
-import com.gemstone.gemfire.cache.lucene.LuceneResultStruct;
-import com.gemstone.gemfire.cache.lucene.internal.distributed.LuceneFunction;
-import com.gemstone.gemfire.cache.lucene.internal.distributed.LuceneFunctionContext;
-import com.gemstone.gemfire.cache.lucene.internal.distributed.TopEntriesCollector;
-import com.gemstone.gemfire.cache.lucene.internal.distributed.TopEntriesCollectorManager;
-import com.gemstone.gemfire.cache.lucene.internal.repository.IndexResultCollector;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category(IntegrationTest.class)
-public class LuceneQueryImplJUnitTest {
-  private static int LIMIT = 123;
-  private Cache cache;
-  private Region<Object, Object> region;
-
-  @Before
-  public void createCache() {
-    cache = new CacheFactory().set("mcast-port", "0").create();
-    region = cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT).create("region");
-  }
-
-  @After
-  public void removeCache() {
-    FunctionService.unregisterFunction(LuceneFunction.ID);
-    cache.close();
-  }
-
-  @Test
-  public void test() {
-    // Register a fake function to observe the function invocation
-    FunctionService.unregisterFunction(LuceneFunction.ID);
-    TestLuceneFunction function = new TestLuceneFunction();
-    FunctionService.registerFunction(function);
-
-    StringQueryProvider provider = new StringQueryProvider();
-    LuceneQueryImpl<Object, Object> query = new LuceneQueryImpl<>("index", region, provider, null, LIMIT, 20);
-    LuceneQueryResults<Object, Object> results = query.search();
-
-    assertTrue(function.wasInvoked);
-    assertEquals(2f * LIMIT, results.getMaxScore(), 0.01);
-    int resultCount = 0;
-    while (results.hasNextPage()) {
-      List<LuceneResultStruct<Object, Object>> nextPage = results.getNextPage();
-      resultCount += nextPage.size();
-      if (results.hasNextPage()) {
-        assertEquals(20, nextPage.size());
-      }
-    }
-    assertEquals(LIMIT, resultCount);
-
-    LuceneFunctionContext<? extends IndexResultCollector> funcArgs = function.args;
-    assertEquals(provider.getQueryString(), ((StringQueryProvider) funcArgs.getQueryProvider()).getQueryString());
-    assertEquals("index", funcArgs.getIndexName());
-    assertEquals(LIMIT, funcArgs.getLimit());
-  }
-
-  private static class TestLuceneFunction extends FunctionAdapter {
-    private static final long serialVersionUID = 1L;
-    private boolean wasInvoked;
-    private LuceneFunctionContext<? extends IndexResultCollector> args;
-
-    @Override
-    public void execute(FunctionContext context) {
-      this.args = (LuceneFunctionContext<?>) context.getArguments();
-      TopEntriesCollectorManager manager = (TopEntriesCollectorManager) args.getCollectorManager();
-
-      assertEquals(LIMIT, manager.getLimit());
-
-      wasInvoked = true;
-      TopEntriesCollector lastResult = new TopEntriesCollector(null, 2 * LIMIT);
-      // put more than LIMIT entries. The resultCollector should trim the results
-      for (int i = LIMIT * 2; i >= 0; i--) {
-        lastResult.collect(i, i * 1f);
-      }
-      assertEquals(LIMIT * 2, lastResult.getEntries().getHits().size());
-
-      context.getResultSender().lastResult(lastResult);
-    }
-
-    @Override
-    public String getId() {
-      return LuceneFunction.ID;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eaa15939/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplIntegrationTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplIntegrationTest.java
new file mode 100644
index 0000000..fa3392c
--- /dev/null
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplIntegrationTest.java
@@ -0,0 +1,292 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gemstone.gemfire.cache.lucene.internal;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.gemstone.gemfire.cache.Region;
+
+import org.apache.logging.log4j.Logger;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queryparser.classic.ParseException;
+import org.junit.After;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.ExpectedException;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
+import com.gemstone.gemfire.cache.client.ClientCache;
+import com.gemstone.gemfire.cache.client.ClientCacheFactory;
+import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.cache.lucene.LuceneService;
+import com.gemstone.gemfire.cache.lucene.LuceneServiceProvider;
+import com.gemstone.gemfire.cache.lucene.internal.distributed.LuceneFunction;
+import com.gemstone.gemfire.cache.lucene.internal.repository.RepositoryManager;
+import com.gemstone.gemfire.cache.lucene.internal.repository.serializer.HeterogeneousLuceneSerializer;
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class LuceneServiceImplIntegrationTest {
+
+  Cache cache;
+  ClientCache clientCache;
+  private LuceneIndexImpl repo;
+  private HeterogeneousLuceneSerializer mapper;
+  private StandardAnalyzer analyzer = new StandardAnalyzer();
+  private IndexWriter writer;
+  LuceneServiceImpl service = null;
+  private static final Logger logger = LogService.getLogger();
+
+  @Rule
+  public ExpectedException expectedException = ExpectedException.none();
+
+  @Test
+  public void luceneServiceProviderGetShouldAcceptClientCacheAsAParameter(){
+    clientCache = getClientCache();
+    LuceneService luceneService = LuceneServiceProvider.get(clientCache);
+    assertNotNull(luceneService);
+  }
+
+  // lucene service will register query execution function on initialization
+  @Test
+  public void shouldRegisterQueryFunction() {
+    Function function = FunctionService.getFunction(LuceneFunction.ID);
+    assertNull(function);
+
+    cache = getCache();
+    new LuceneServiceImpl().init(cache);
+
+    function = FunctionService.getFunction(LuceneFunction.ID);
+    assertNotNull(function);
+  }
+
+  @After
+  public void destroyService() {
+    if (null != service) {
+      service = null;
+    }
+  }
+
+  @After
+  public void destroyCache() {
+    if (null != cache && !cache.isClosed()) {
+      cache.close();
+      cache = null;
+    }
+    if (null != clientCache  && !clientCache.isClosed()) {
+      clientCache.close();
+      clientCache = null;
+    }
+  }
+
+  private ClientCache getClientCache() {
+    if (null == clientCache) {
+      clientCache = new ClientCacheFactory().set("mcast-port", "0").create();
+    }
+    else{
+      return clientCache;
+    }
+    return clientCache;
+  }
+
+  private Cache getCache() {
+    if (null == cache) {
+      cache = new CacheFactory().set("mcast-port", "0").create();
+    }
+    return cache;
+  }
+
+  private LuceneService getService() {
+    if (null == cache) {
+      getCache();
+    }
+    if (null == service) {
+      service = (LuceneServiceImpl)LuceneServiceProvider.get(cache);
+    }
+    return service;
+  }
+
+
+  private Region createRegion(String regionName, RegionShortcut shortcut) {
+    return cache.createRegionFactory(shortcut).create(regionName);
+  }
+
+  private LocalRegion createPR(String regionName, boolean isSubRegion) {
+    if (isSubRegion) {
+      LocalRegion root = (LocalRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create("root");
+      LocalRegion region = (LocalRegion) cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT).
+        createSubregion(root, regionName);
+      return region;
+    }
+    else {
+      LocalRegion region = (LocalRegion) createRegion(regionName, RegionShortcut.PARTITION_PERSISTENT);
+      return region;
+    }
+  }
+
+  private LocalRegion createRR(String regionName, boolean isSubRegion) {
+    if (isSubRegion) {
+
+      LocalRegion root = (LocalRegion) cache.createRegionFactory(RegionShortcut.REPLICATE).create("root");
+      LocalRegion region = (LocalRegion) cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT).
+        createSubregion(root, regionName);
+      return region;
+    }
+    else {
+      LocalRegion region = (LocalRegion) createRegion(regionName, RegionShortcut.REPLICATE_PERSISTENT);
+      return region;
+    }
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void cannotCreateLuceneIndexAfterRegionHasBeenCreated() throws IOException, ParseException {
+    getService();
+
+    LocalRegion userRegion = createPR("PR1", false);
+    service.createIndex("index1", "PR1", "field1", "field2", "field3");
+  }
+
+  @Test
+  public void canCreateLuceneIndexForPRWithAnalyzer() throws IOException, ParseException {
+    getService();
+    StandardAnalyzer sa = new StandardAnalyzer();
+    KeywordAnalyzer ka = new KeywordAnalyzer();
+    Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
+    analyzerPerField.put("field1", ka);
+    analyzerPerField.put("field2", sa);
+    analyzerPerField.put("field3", sa);
+    //  field2 and field3 will use StandardAnalyzer
+    PerFieldAnalyzerWrapper analyzer2 = new PerFieldAnalyzerWrapper(sa, analyzerPerField);
+
+    service.createIndex("index1", "PR1", analyzerPerField);
+    createPR("PR1", false);
+    LuceneIndexImpl index1 = (LuceneIndexImpl) service.getIndex("index1", "PR1");
+    assertTrue(index1 instanceof LuceneIndexForPartitionedRegion);
+    LuceneIndexForPartitionedRegion index1PR = (LuceneIndexForPartitionedRegion) index1;
+    assertEquals("index1", index1.getName());
+    assertEquals("/PR1", index1.getRegionPath());
+    String[] fields1 = index1.getFieldNames();
+    assertEquals(3, fields1.length);
+    Analyzer analyzer = index1PR.getAnalyzer();
+    assertTrue(analyzer instanceof PerFieldAnalyzerWrapper);
+    RepositoryManager RepositoryManager = index1PR.getRepositoryManager();
+    assertTrue(RepositoryManager != null);
+
+    final String fileRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".files";
+    final String chunkRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".chunks";
+    PartitionedRegion filePR = (PartitionedRegion) cache.getRegion(fileRegionName);
+    PartitionedRegion chunkPR = (PartitionedRegion) cache.getRegion(chunkRegionName);
+    assertTrue(filePR != null);
+    assertTrue(chunkPR != null);
+  }
+
+  @Test
+  public void cannotCreateLuceneIndexForReplicateRegion() throws IOException, ParseException {
+    expectedException.expect(UnsupportedOperationException.class);
+    expectedException.expectMessage("Lucene indexes on replicated regions are not supported");
+    getService();
+    service.createIndex("index1", "RR1", "field1", "field2", "field3");
+    createRR("RR1", false);
+  }
+
+  @Test
+  public void canCreateIndexForAllNonProxyPartitionRegionTypes() {
+    for (RegionShortcut shortcut : RegionShortcut.values()) {
+      String sname = shortcut.name().toLowerCase();
+      if (sname.contains("partition") && !sname.contains("proxy")) {
+        canCreateLuceneIndexForPRType(shortcut);
+        //Destroying cache and service for now because aeq's are not completely being cleaned up correctly after
+        // being destroyed.  Instead we should close the aeq and clean up any regions associated with this lucene
+        //index but only after aeq destroy works properly
+        destroyCache();
+        destroyService();
+      }
+    }
+  }
+
+  private void canCreateLuceneIndexForPRType(RegionShortcut regionShortcut) {
+    getService();
+    service.createIndex("index1", "PR1", "field1", "field2", "field3");
+    Region region = null;
+    AsyncEventQueueImpl aeq = null;
+    try {
+      region = createRegion("PR1", regionShortcut);
+      LuceneIndexImpl index1 = (LuceneIndexImpl) service.getIndex("index1", "PR1");
+      assertTrue(index1 instanceof LuceneIndexForPartitionedRegion);
+      LuceneIndexForPartitionedRegion index1PR = (LuceneIndexForPartitionedRegion) index1;
+      assertEquals("index1", index1.getName());
+      assertEquals("/PR1", index1.getRegionPath());
+      String[] fields1 = index1.getFieldNames();
+      assertEquals(3, fields1.length);
+      Analyzer analyzer = index1PR.getAnalyzer();
+      assertTrue(analyzer instanceof StandardAnalyzer);
+      RepositoryManager RepositoryManager = index1PR.getRepositoryManager();
+      assertTrue(RepositoryManager != null);
+
+      final String fileRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".files";
+      final String chunkRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".chunks";
+      PartitionedRegion filePR = (PartitionedRegion) cache.getRegion(fileRegionName);
+      PartitionedRegion chunkPR = (PartitionedRegion) cache.getRegion(chunkRegionName);
+      assertTrue(filePR != null);
+      assertTrue(chunkPR != null);
+
+      String aeqId = LuceneServiceImpl.getUniqueIndexName(index1.getName(), index1.getRegionPath());
+      aeq = (AsyncEventQueueImpl) cache.getAsyncEventQueue(aeqId);
+      assertTrue(aeq != null);
+
+      //Make sure our queue doesn't show up in the list of async event queues
+      assertEquals(Collections.emptySet(), cache.getAsyncEventQueues());
+    }
+    finally {
+      String aeqId = LuceneServiceImpl.getUniqueIndexName("index1", "PR1");
+      PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
+      if (chunkRegion != null) {
+        chunkRegion.destroyRegion();
+      }
+      PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
+      if (fileRegion != null) {
+        fileRegion.destroyRegion();
+      }
+      ((GemFireCacheImpl) cache).removeAsyncEventQueue(aeq);
+      if (aeq != null) {
+        aeq.destroy();
+      }
+      region.destroyRegion();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eaa15939/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplJUnitTest.java
deleted file mode 100644
index dec4572..0000000
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/LuceneServiceImplJUnitTest.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package com.gemstone.gemfire.cache.lucene.internal;
-
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import com.gemstone.gemfire.cache.Region;
-
-import org.apache.logging.log4j.Logger;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.core.KeywordAnalyzer;
-import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.junit.After;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.ExpectedException;
-
-import com.gemstone.gemfire.cache.Cache;
-import com.gemstone.gemfire.cache.CacheFactory;
-import com.gemstone.gemfire.cache.RegionShortcut;
-import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueImpl;
-import com.gemstone.gemfire.cache.client.ClientCache;
-import com.gemstone.gemfire.cache.client.ClientCacheFactory;
-import com.gemstone.gemfire.cache.execute.Function;
-import com.gemstone.gemfire.cache.execute.FunctionService;
-import com.gemstone.gemfire.cache.lucene.LuceneService;
-import com.gemstone.gemfire.cache.lucene.LuceneServiceProvider;
-import com.gemstone.gemfire.cache.lucene.internal.distributed.LuceneFunction;
-import com.gemstone.gemfire.cache.lucene.internal.repository.RepositoryManager;
-import com.gemstone.gemfire.cache.lucene.internal.repository.serializer.HeterogeneousLuceneSerializer;
-import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
-
-@Category(IntegrationTest.class)
-public class LuceneServiceImplJUnitTest {
-
-  Cache cache;
-  ClientCache clientCache;
-  private LuceneIndexImpl repo;
-  private HeterogeneousLuceneSerializer mapper;
-  private StandardAnalyzer analyzer = new StandardAnalyzer();
-  private IndexWriter writer;
-  LuceneServiceImpl service = null;
-  private static final Logger logger = LogService.getLogger();
-
-  @Rule
-  public ExpectedException expectedException = ExpectedException.none();
-
-  @Test
-  public void luceneServiceProviderGetShouldAcceptClientCacheAsAParameter(){
-    clientCache = getClientCache();
-    LuceneService luceneService = LuceneServiceProvider.get(clientCache);
-    assertNotNull(luceneService);
-  }
-
-  // lucene service will register query execution function on initialization
-  @Test
-  public void shouldRegisterQueryFunction() {
-    Function function = FunctionService.getFunction(LuceneFunction.ID);
-    assertNull(function);
-
-    cache = getCache();
-    new LuceneServiceImpl().init(cache);
-
-    function = FunctionService.getFunction(LuceneFunction.ID);
-    assertNotNull(function);
-  }
-
-  @After
-  public void destroyService() {
-    if (null != service) {
-      service = null;
-    }
-  }
-
-  @After
-  public void destroyCache() {
-    if (null != cache && !cache.isClosed()) {
-      cache.close();
-      cache = null;
-    }
-    if (null != clientCache  && !clientCache.isClosed()) {
-      clientCache.close();
-      clientCache = null;
-    }
-  }
-
-  private ClientCache getClientCache() {
-    if (null == clientCache) {
-      clientCache = new ClientCacheFactory().set("mcast-port", "0").create();
-    }
-    else{
-      return clientCache;
-    }
-    return clientCache;
-  }
-
-  private Cache getCache() {
-    if (null == cache) {
-      cache = new CacheFactory().set("mcast-port", "0").create();
-    }
-    return cache;
-  }
-
-  private LuceneService getService() {
-    if (null == cache) {
-      getCache();
-    }
-    if (null == service) {
-      service = (LuceneServiceImpl)LuceneServiceProvider.get(cache);
-    }
-    return service;
-  }
-
-
-  private Region createRegion(String regionName, RegionShortcut shortcut) {
-    return cache.createRegionFactory(shortcut).create(regionName);
-  }
-
-  private LocalRegion createPR(String regionName, boolean isSubRegion) {
-    if (isSubRegion) {
-      LocalRegion root = (LocalRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create("root");
-      LocalRegion region = (LocalRegion) cache.createRegionFactory(RegionShortcut.PARTITION_PERSISTENT).
-        createSubregion(root, regionName);
-      return region;
-    }
-    else {
-      LocalRegion region = (LocalRegion) createRegion(regionName, RegionShortcut.PARTITION_PERSISTENT);
-      return region;
-    }
-  }
-
-  private LocalRegion createRR(String regionName, boolean isSubRegion) {
-    if (isSubRegion) {
-
-      LocalRegion root = (LocalRegion) cache.createRegionFactory(RegionShortcut.REPLICATE).create("root");
-      LocalRegion region = (LocalRegion) cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT).
-        createSubregion(root, regionName);
-      return region;
-    }
-    else {
-      LocalRegion region = (LocalRegion) createRegion(regionName, RegionShortcut.REPLICATE_PERSISTENT);
-      return region;
-    }
-  }
-
-  @Test(expected = IllegalStateException.class)
-  public void cannotCreateLuceneIndexAfterRegionHasBeenCreated() throws IOException, ParseException {
-    getService();
-
-    LocalRegion userRegion = createPR("PR1", false);
-    service.createIndex("index1", "PR1", "field1", "field2", "field3");
-  }
-
-  @Test
-  public void canCreateLuceneIndexForPRWithAnalyzer() throws IOException, ParseException {
-    getService();
-    StandardAnalyzer sa = new StandardAnalyzer();
-    KeywordAnalyzer ka = new KeywordAnalyzer();
-    Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
-    analyzerPerField.put("field1", ka);
-    analyzerPerField.put("field2", sa);
-    analyzerPerField.put("field3", sa);
-    //  field2 and field3 will use StandardAnalyzer
-    PerFieldAnalyzerWrapper analyzer2 = new PerFieldAnalyzerWrapper(sa, analyzerPerField);
-
-    service.createIndex("index1", "PR1", analyzerPerField);
-    createPR("PR1", false);
-    LuceneIndexImpl index1 = (LuceneIndexImpl) service.getIndex("index1", "PR1");
-    assertTrue(index1 instanceof LuceneIndexForPartitionedRegion);
-    LuceneIndexForPartitionedRegion index1PR = (LuceneIndexForPartitionedRegion) index1;
-    assertEquals("index1", index1.getName());
-    assertEquals("/PR1", index1.getRegionPath());
-    String[] fields1 = index1.getFieldNames();
-    assertEquals(3, fields1.length);
-    Analyzer analyzer = index1PR.getAnalyzer();
-    assertTrue(analyzer instanceof PerFieldAnalyzerWrapper);
-    RepositoryManager RepositoryManager = index1PR.getRepositoryManager();
-    assertTrue(RepositoryManager != null);
-
-    final String fileRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".files";
-    final String chunkRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".chunks";
-    PartitionedRegion filePR = (PartitionedRegion) cache.getRegion(fileRegionName);
-    PartitionedRegion chunkPR = (PartitionedRegion) cache.getRegion(chunkRegionName);
-    assertTrue(filePR != null);
-    assertTrue(chunkPR != null);
-  }
-
-  @Test
-  public void cannotCreateLuceneIndexForReplicateRegion() throws IOException, ParseException {
-    expectedException.expect(UnsupportedOperationException.class);
-    expectedException.expectMessage("Lucene indexes on replicated regions are not supported");
-    getService();
-    service.createIndex("index1", "RR1", "field1", "field2", "field3");
-    createRR("RR1", false);
-  }
-
-  @Test
-  public void canCreateIndexForAllNonProxyPartitionRegionTypes() {
-    for (RegionShortcut shortcut : RegionShortcut.values()) {
-      String sname = shortcut.name().toLowerCase();
-      if (sname.contains("partition") && !sname.contains("proxy")) {
-        canCreateLuceneIndexForPRType(shortcut);
-        //Destroying cache and service for now because aeq's are not completely being cleaned up correctly after
-        // being destroyed.  Instead we should close the aeq and clean up any regions associated with this lucene
-        //index but only after aeq destroy works properly
-        destroyCache();
-        destroyService();
-      }
-    }
-  }
-
-  private void canCreateLuceneIndexForPRType(RegionShortcut regionShortcut) {
-    getService();
-    service.createIndex("index1", "PR1", "field1", "field2", "field3");
-    Region region = null;
-    AsyncEventQueueImpl aeq = null;
-    try {
-      region = createRegion("PR1", regionShortcut);
-      LuceneIndexImpl index1 = (LuceneIndexImpl) service.getIndex("index1", "PR1");
-      assertTrue(index1 instanceof LuceneIndexForPartitionedRegion);
-      LuceneIndexForPartitionedRegion index1PR = (LuceneIndexForPartitionedRegion) index1;
-      assertEquals("index1", index1.getName());
-      assertEquals("/PR1", index1.getRegionPath());
-      String[] fields1 = index1.getFieldNames();
-      assertEquals(3, fields1.length);
-      Analyzer analyzer = index1PR.getAnalyzer();
-      assertTrue(analyzer instanceof StandardAnalyzer);
-      RepositoryManager RepositoryManager = index1PR.getRepositoryManager();
-      assertTrue(RepositoryManager != null);
-
-      final String fileRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".files";
-      final String chunkRegionName = LuceneServiceImpl.getUniqueIndexName("index1", "/PR1") + ".chunks";
-      PartitionedRegion filePR = (PartitionedRegion) cache.getRegion(fileRegionName);
-      PartitionedRegion chunkPR = (PartitionedRegion) cache.getRegion(chunkRegionName);
-      assertTrue(filePR != null);
-      assertTrue(chunkPR != null);
-
-      String aeqId = LuceneServiceImpl.getUniqueIndexName(index1.getName(), index1.getRegionPath());
-      aeq = (AsyncEventQueueImpl) cache.getAsyncEventQueue(aeqId);
-      assertTrue(aeq != null);
-
-      //Make sure our queue doesn't show up in the list of async event queues
-      assertEquals(Collections.emptySet(), cache.getAsyncEventQueues());
-    }
-    finally {
-      String aeqId = LuceneServiceImpl.getUniqueIndexName("index1", "PR1");
-      PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
-      if (chunkRegion != null) {
-        chunkRegion.destroyRegion();
-      }
-      PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
-      if (fileRegion != null) {
-        fileRegion.destroyRegion();
-      }
-      ((GemFireCacheImpl) cache).removeAsyncEventQueue(aeq);
-      if (aeq != null) {
-        aeq.destroy();
-      }
-      region.destroyRegion();
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/eaa15939/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
index cae2142..c3e9c0e 100644
--- a/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
+++ b/geode-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/xml/LuceneIndexXmlParserJUnitTest.java
@@ -30,10 +30,14 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
 
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.internal.cache.extension.SimpleExtensionPoint;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
+import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.XmlGeneratorUtils;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
@@ -50,8 +54,13 @@ public class LuceneIndexXmlParserJUnitTest {
   @Before
   public void setUp() {
     this.parser = new LuceneXmlParser();
-    CacheCreation cache = new CacheCreation();
-    this.rc = new RegionCreation(cache, "region");
+    CacheCreation cache = Mockito.mock(CacheCreation.class);
+    RegionCreation regionCreation = Mockito.mock(RegionCreation.class);
+    RegionAttributesCreation rac = Mockito.mock(RegionAttributesCreation.class);
+    Mockito.when(regionCreation.getFullPath()).thenReturn("/region");
+    Mockito.when(regionCreation.getAttributes()).thenReturn(rac);
+    Mockito.when(regionCreation.getExtensionPoint()).thenReturn(new SimpleExtensionPoint(this.rc,this.rc));
+    this.rc = regionCreation;
     this.stack = new Stack<Object>();
     stack.push(cache);
     stack.push(rc);


Mime
View raw message