lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a.@apache.org
Subject lucene-solr:jira/solr-12509: SOLR-12509: Fix bugs and add more unit tests. Make the offline method optional.
Date Sat, 07 Jul 2018 09:02:08 GMT
Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-12509 0b894b0cc -> 7b5e6b42d


SOLR-12509: Fix bugs and add more unit tests. Make the offline method optional.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7b5e6b42
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7b5e6b42
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7b5e6b42

Branch: refs/heads/jira/solr-12509
Commit: 7b5e6b42d1a3a384fcdc33a5255ac4942fe5e875
Parents: 0b894b0
Author: Andrzej Bialecki <ab@apache.org>
Authored: Sat Jul 7 11:01:19 2018 +0200
Committer: Andrzej Bialecki <ab@apache.org>
Committed: Sat Jul 7 11:01:19 2018 +0200

----------------------------------------------------------------------
 .../cloud/api/collections/SplitShardCmd.java    |  96 +++---
 .../src/java/org/apache/solr/core/SolrCore.java |   1 +
 .../org/apache/solr/handler/admin/SplitOp.java  |  30 +-
 .../solr/update/DefaultSolrCoreState.java       |   6 +
 .../org/apache/solr/update/SolrCoreState.java   |   3 +
 .../apache/solr/update/SolrIndexSplitter.java   | 334 +++++++++++--------
 .../apache/solr/update/SplitIndexCommand.java   |   8 +-
 .../solr/cloud/ChaosMonkeyShardSplitTest.java   |   2 +-
 .../cloud/api/collections/ShardSplitTest.java   |  58 +++-
 .../solr/update/SolrIndexSplitterTest.java      |  92 ++++-
 .../solrj/request/CollectionAdminRequest.java   |  11 +
 .../solr/common/params/CoreAdminParams.java     |   2 +-
 12 files changed, 430 insertions(+), 213 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index 052c279..7922d3a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -144,6 +144,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
       propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
       ZkNodeProps m = new ZkNodeProps(propMap);
       inQueue.offer(Utils.toJSON(m));
+      log.debug("Offline mode - deactivated slices: " + offlineSlices);
     }
 
     List<DocRouter.Range> subRanges = new ArrayList<>();
@@ -168,22 +169,22 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
     });
     int repFactor = numNrt.get() + numTlog.get() + numPull.get();
 
-    // type of the first subreplica will be the same as leader
-    boolean firstNrtReplica = parentShardLeader.getType() == Replica.Type.NRT;
-    // verify that we indeed have the right number of correct replica types
-    if ((firstNrtReplica && numNrt.get() < 1) || (!firstNrtReplica && numTlog.get() < 1)) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "aborting split - inconsistent replica types in collection " + collectionName +
-          ": nrt=" + numNrt.get() + ", tlog=" + numTlog.get() + ", pull=" + numPull.get() + ", shard leader type is " +
-      parentShardLeader.getType());
-    }
-
-    List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
+    boolean success = false;
+    try {
+      // type of the first subreplica will be the same as leader
+      boolean firstNrtReplica = parentShardLeader.getType() == Replica.Type.NRT;
+      // verify that we indeed have the right number of correct replica types
+      if ((firstNrtReplica && numNrt.get() < 1) || (!firstNrtReplica && numTlog.get() < 1)) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "aborting split - inconsistent replica types in collection " + collectionName +
+            ": nrt=" + numNrt.get() + ", tlog=" + numTlog.get() + ", pull=" + numPull.get() + ", shard leader type is " +
+            parentShardLeader.getType());
+      }
 
-    t = timings.sub("fillRanges");
-    String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames, firstNrtReplica);
-    t.stop();
+      List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
 
-    try {
+      t = timings.sub("fillRanges");
+      String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames, firstNrtReplica);
+      t.stop();
 
       boolean oldShardsDeleted = false;
       for (String subSlice : subSlices) {
@@ -307,7 +308,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
 
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
-      params.set("hardLink", "true");
+      params.set("offline", String.valueOf(offline));
       params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
       for (int i = 0; i < subShardNames.size(); i++) {
         String subShardName = subShardNames.get(i);
@@ -326,25 +327,27 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
 
       log.debug("Index on shard: " + nodeName + " split into two successfully");
 
-      t = timings.sub("applyBufferedUpdates");
-      // apply buffered updates on sub-shards
-      for (int i = 0; i < subShardNames.size(); i++) {
-        String subShardName = subShardNames.get(i);
+      if (!offline) {
+        t = timings.sub("applyBufferedUpdates");
+        // apply buffered updates on sub-shards
+        for (int i = 0; i < subShardNames.size(); i++) {
+          String subShardName = subShardNames.get(i);
 
-        log.debug("Applying buffered updates on : " + subShardName);
+          log.debug("Applying buffered updates on : " + subShardName);
 
-        params = new ModifiableSolrParams();
-        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
-        params.set(CoreAdminParams.NAME, subShardName);
+          params = new ModifiableSolrParams();
+          params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
+          params.set(CoreAdminParams.NAME, subShardName);
 
-        ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
-      }
+          ocmh.sendShardRequest(nodeName, params, shardHandler, asyncId, requestMap);
+        }
 
-      ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
-          " to apply buffered updates", asyncId, requestMap);
-      t.stop();
+        ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking sub shard leaders" +
+            " to apply buffered updates", asyncId, requestMap);
+        t.stop();
 
-      log.debug("Successfully applied buffered updates on : " + subShardNames);
+        log.debug("Successfully applied buffered updates on : " + subShardNames);
+      }
 
       // Replica creation for the new Slices
       // replica placement is controlled by the autoscaling policy framework
@@ -504,15 +507,18 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
       if (withTiming) {
         results.add(CommonParams.TIMING, timings.asNamedList());
       }
+      success = true;
       return true;
     } catch (SolrException e) {
-      cleanupAfterFailure(zkStateReader, collectionName, parentSlice.getName(), subSlices, offlineSlices);
       throw e;
     } catch (Exception e) {
       log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
     } finally {
       if (sessionWrapper != null) sessionWrapper.release();
+      if (!success) {
+        cleanupAfterFailure(zkStateReader, collectionName, parentSlice.getName(), subSlices, offlineSlices);
+      }
     }
   }
 
@@ -555,12 +561,12 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
 
   private void cleanupAfterFailure(ZkStateReader zkStateReader, String collectionName, String parentShard,
                                    List<String> subSlices, Set<String> offlineSlices) {
-    log.debug("- cleanup after failed split of " + collectionName + "/" + parentShard);
+    log.info("Cleaning up after a failed split of " + collectionName + "/" + parentShard);
     // get the latest state
     try {
       zkStateReader.forceUpdateCollection(collectionName);
     } catch (KeeperException | InterruptedException e) {
-      log.warn("Cleanup after failed split of " + collectionName + "/" + parentShard + ": (force update collection)", e);
+      log.warn("Cleanup failed after failed split of " + collectionName + "/" + parentShard + ": (force update collection)", e);
       return;
     }
     ClusterState clusterState = zkStateReader.getClusterState();
@@ -574,6 +580,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
     // from entering into RECOVERY or ACTIVE (SOLR-9455)
     DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
     final Map<String, Object> propMap = new HashMap<>();
+    boolean sendUpdateState = false;
     propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
     propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
     for (Slice s : coll.getSlices()) {
@@ -581,22 +588,29 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
         continue;
       }
       propMap.put(s.getName(), Slice.State.CONSTRUCTION.toString());
+      sendUpdateState = true;
     }
 
     // if parent is inactive activate it again
     Slice parentSlice = coll.getSlice(parentShard);
     if (parentSlice.getState() == Slice.State.INACTIVE) {
+      sendUpdateState = true;
       propMap.put(parentShard, Slice.State.ACTIVE.toString());
     }
     // plus any other previously deactivated slices
-    offlineSlices.forEach(s -> propMap.put(s, Slice.State.ACTIVE.toString()));
+    for (String sliceName : offlineSlices) {
+      propMap.put(sliceName, Slice.State.ACTIVE.toString());
+      sendUpdateState = true;
+    }
 
-    try {
-      ZkNodeProps m = new ZkNodeProps(propMap);
-      inQueue.offer(Utils.toJSON(m));
-    } catch (Exception e) {
-      // don't give up yet - just log the error, we may still be able to clean up
-      log.warn("Cleanup after failed split of " + collectionName + "/" + parentShard + ": (slice state changes)", e);
+    if (sendUpdateState) {
+      try {
+        ZkNodeProps m = new ZkNodeProps(propMap);
+        inQueue.offer(Utils.toJSON(m));
+      } catch (Exception e) {
+        // don't give up yet - just log the error, we may still be able to clean up
+        log.warn("Cleanup failed after failed split of " + collectionName + "/" + parentShard + ": (slice state changes)", e);
+      }
     }
 
     // delete existing subShards
@@ -605,7 +619,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
       if (s == null) {
         continue;
       }
-      log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
+      log.info("- sub-shard: {} already exists therefore requesting its deletion", subSlice);
       HashMap<String, Object> props = new HashMap<>();
       props.put(Overseer.QUEUE_OPERATION, "deleteshard");
       props.put(COLLECTION_PROP, collectionName);
@@ -614,7 +628,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
       try {
         ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
       } catch (Exception e) {
-        log.warn("Cleanup after failed split of " + collectionName + "/" + parentShard + ": (deleting existing sub shard " + subSlice + ")", e);
+        log.warn("Cleanup failed after failed split of " + collectionName + "/" + parentShard + ": (deleting existing sub shard " + subSlice + ")", e);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index feab22d..00dbe0f 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -55,6 +55,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
 import com.codahale.metrics.Counter;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java b/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
index 66d3aae..ddd47b6 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
@@ -22,7 +22,9 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
 
+import org.apache.lucene.store.Directory;
 import org.apache.solr.cloud.CloudDescriptor;
 import org.apache.solr.cloud.ZkShardTerms;
 import org.apache.solr.common.SolrException;
@@ -31,6 +33,7 @@ import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.LocalSolrQueryRequest;
@@ -78,9 +81,10 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
     }
 
     log.info("Invoked split action for core: " + cname);
-    SolrCore core = it.handler.coreContainer.getCore(cname);
-    SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
+    boolean offline = params.getBool("offline", true);
+    SolrCore parentCore = it.handler.coreContainer.getCore(cname);
     List<SolrCore> newCores = null;
+    SolrQueryRequest req = null;
 
     try {
       // TODO: allow use of rangesStr in the future
@@ -91,9 +95,9 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
       String routeFieldName = null;
       if (it.handler.coreContainer.isZooKeeperAware()) {
         ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
-        String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
+        String collectionName = parentCore.getCoreDescriptor().getCloudDescriptor().getCollectionName();
         DocCollection collection = clusterState.getCollection(collectionName);
-        String sliceName = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
+        String sliceName = parentCore.getCoreDescriptor().getCloudDescriptor().getShardId();
         Slice slice = collection.getSlice(sliceName);
         router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
         if (ranges == null) {
@@ -131,10 +135,20 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
         paths = Arrays.asList(pathsArr);
       }
 
+      req = new LocalSolrQueryRequest(parentCore, params);
 
-      boolean hardLink = req.getParams().getBool("hardLink", true);
-      SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router, routeFieldName, splitKey, hardLink);
-      core.getUpdateHandler().split(cmd);
+      ReadWriteLock iwLock = parentCore.getSolrCoreState().getIndexWriterLock();
+      SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router, routeFieldName, splitKey, offline);
+      if (offline) {
+        iwLock.writeLock().lockInterruptibly();
+      }
+      try {
+        parentCore.getUpdateHandler().split(cmd);
+      } finally {
+        if (offline) {
+          iwLock.writeLock().unlock();
+        }
+      }
 
       if (it.handler.coreContainer.isZooKeeperAware()) {
         for (SolrCore newcore : newCores) {
@@ -151,7 +165,7 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
       throw e;
     } finally {
       if (req != null) req.close();
-      if (core != null) core.close();
+      if (parentCore != null) parentCore.close();
       if (newCores != null) {
         for (SolrCore newCore : newCores) {
           newCore.close();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
index cc79e3c..baeedc6 100644
--- a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
+++ b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
@@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -425,6 +426,11 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
   }
 
   @Override
+  public ReadWriteLock getIndexWriterLock() {
+    return iwLock;
+  }
+
+  @Override
   public boolean getCdcrBootstrapRunning() {
     return cdcrRunning.get();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/java/org/apache/solr/update/SolrCoreState.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SolrCoreState.java b/solr/core/src/java/org/apache/solr/update/SolrCoreState.java
index 9da2516..5ae735c 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrCoreState.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCoreState.java
@@ -21,6 +21,7 @@ import java.lang.invoke.MethodHandles;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
 
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.search.Sort;
@@ -180,6 +181,8 @@ public abstract class SolrCoreState {
 
   public abstract Lock getRecoveryLock();
 
+  public abstract ReadWriteLock getIndexWriterLock();
+
   // These are needed to properly synchronize the bootstrapping when the
   // in the target DC require a full sync.
   public abstract boolean getCdcrBootstrapRunning();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
index 841e835..a7d5cbc 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
@@ -19,22 +19,33 @@ package org.apache.solr.update;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Future;
 
 import org.apache.lucene.index.CodecReader;
 import org.apache.lucene.index.FilterCodecReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.SlowCodecReaderWrapper;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.ConstantScoreScorer;
+import org.apache.lucene.search.ConstantScoreWeight;
 import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.HardlinkCopyDirectoryWrapper;
-import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.BitSetIterator;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRefBuilder;
@@ -44,8 +55,6 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.CompositeIdRouter;
 import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.HashBasedRouter;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.CachingDirectoryFactory;
 import org.apache.solr.core.DirectoryFactory;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.IndexFetcher;
@@ -71,110 +80,7 @@ public class SolrIndexSplitter {
   int currPartition = 0;
   String routeFieldName;
   String splitKey;
-  boolean hardLink;
-
-  private static class HardLinkDirectoryFactoryWrapper extends DirectoryFactory {
-
-    private final DirectoryFactory delegate;
-
-    HardLinkDirectoryFactoryWrapper(DirectoryFactory delegate) {
-      this.delegate = delegate;
-    }
-
-    public DirectoryFactory getDelegate() {
-      return delegate;
-    }
-
-    private Directory unwrap(Directory dir) {
-      while (dir instanceof HardlinkCopyDirectoryWrapper) {
-        dir = ((HardlinkCopyDirectoryWrapper)dir).getDelegate();
-      }
-      return dir;
-    }
-
-    @Override
-    public void doneWithDirectory(Directory directory) throws IOException {
-      delegate.doneWithDirectory(unwrap(directory));
-    }
-
-    @Override
-    public void addCloseListener(Directory dir, CachingDirectoryFactory.CloseListener closeListener) {
-      delegate.addCloseListener(unwrap(dir), closeListener);
-    }
-
-    @Override
-    public void close() throws IOException {
-      delegate.close();
-    }
-
-    @Override
-    protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
-      throw new UnsupportedOperationException("create");
-    }
-
-    @Override
-    protected LockFactory createLockFactory(String rawLockType) throws IOException {
-      throw new UnsupportedOperationException("createLockFactory");
-    }
-
-    @Override
-    public boolean exists(String path) throws IOException {
-      return delegate.exists(path);
-    }
-
-    @Override
-    public void remove(Directory dir) throws IOException {
-      delegate.remove(unwrap(dir));
-    }
-
-    @Override
-    public void remove(Directory dir, boolean afterCoreClose) throws IOException {
-      delegate.remove(unwrap(dir), afterCoreClose);
-    }
-
-    @Override
-    public void remove(String path, boolean afterCoreClose) throws IOException {
-      delegate.remove(path, afterCoreClose);
-    }
-
-    @Override
-    public void remove(String path) throws IOException {
-      delegate.remove(path);
-    }
-
-    private Directory wrap(Directory dir) {
-      if (dir instanceof HardlinkCopyDirectoryWrapper) {
-        return dir;
-      } else {
-        return new HardlinkCopyDirectoryWrapper(dir);
-      }
-    }
-    @Override
-    public Directory get(String path, DirContext dirContext, String rawLockType) throws IOException {
-      Directory dir = delegate.get(path, dirContext, rawLockType);
-      return wrap(dir);
-    }
-
-    @Override
-    public void incRef(Directory directory) {
-      delegate.incRef(unwrap(directory));
-    }
-
-    @Override
-    public boolean isPersistent() {
-      return delegate.isPersistent();
-    }
-
-    @Override
-    public void release(Directory directory) throws IOException {
-      delegate.release(unwrap(directory));
-    }
-
-    @Override
-    public void init(NamedList args) {
-      delegate.init(args);
-    }
-  }
+  boolean offline;
 
   public SolrIndexSplitter(SplitIndexCommand cmd) {
     searcher = cmd.getReq().getSearcher();
@@ -199,61 +105,88 @@ public class SolrIndexSplitter {
     if (cmd.splitKey != null) {
       splitKey = getRouteKey(cmd.splitKey);
     }
-    this.hardLink = cmd.hardLink;
+    if (cores == null) {
+      this.offline = false;
+    } else {
+      this.offline = cmd.offline;
+    }
   }
 
   public void split() throws IOException {
 
     List<LeafReaderContext> leaves = searcher.getRawReader().leaves();
+    Directory parentDirectory = searcher.getRawReader().directory();
     List<FixedBitSet[]> segmentDocSets = new ArrayList<>(leaves.size());
+    SolrIndexConfig parentConfig = searcher.getCore().getSolrConfig().indexConfig;
 
     log.info("SolrIndexSplitter: partitions=" + numPieces + " segments="+leaves.size());
 
-    for (LeafReaderContext readerContext : leaves) {
-      assert readerContext.ordInParent == segmentDocSets.size();  // make sure we're going in order
-      FixedBitSet[] docSets = split(readerContext);
-      segmentDocSets.add( docSets );
+    if (offline) {
+      // close the searcher if using offline method
+      // caller should have already locked the SolrCoreState.indexWriterLock at this point
+      // thus preventing the creation of new IndexWriter
+      searcher.getCore().closeSearcher();
+      searcher = null;
+    } else {
+      for (LeafReaderContext readerContext : leaves) {
+        assert readerContext.ordInParent == segmentDocSets.size();  // make sure we're going in order
+        FixedBitSet[] docSets = split(readerContext);
+        segmentDocSets.add(docSets);
+      }
     }
 
-
     // would it be more efficient to write segment-at-a-time to each new index?
     // - need to worry about number of open descriptors
     // - need to worry about if IW.addIndexes does a sync or not...
     // - would be more efficient on the read side, but prob less efficient merging
 
     for (int partitionNumber=0; partitionNumber<numPieces; partitionNumber++) {
-      log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : ""));
+      String partitionName = "SolrIndexSplitter:partition=" + partitionNumber + ",partitionCount=" + numPieces + (ranges != null ? ",range=" + ranges.get(partitionNumber) : "");
+      log.info(partitionName);
 
       boolean success = false;
 
       RefCounted<IndexWriter> iwRef = null;
       IndexWriter iw;
-      if (cores != null && !hardLink) {
+      if (cores != null && !offline) {
         SolrCore subCore = cores.get(partitionNumber);
         iwRef = subCore.getUpdateHandler().getSolrCoreState().getIndexWriter(subCore);
         iw = iwRef.get();
       } else {
-        SolrCore core = searcher.getCore();
-        String path;
-        DirectoryFactory factory;
-        if (hardLink && cores != null) {
-          path =  cores.get(partitionNumber).getDataDir() + "index.split";
-          factory = new HardLinkDirectoryFactoryWrapper(core.getDirectoryFactory());
+        if (offline) {
+          SolrCore subCore = cores.get(partitionNumber);
+          String path = subCore.getDataDir() + "index.split";
+          // copy by hard-linking
+          Directory splitDir = subCore.getDirectoryFactory().get(path, DirectoryFactory.DirContext.DEFAULT, subCore.getSolrConfig().indexConfig.lockType);
+          Directory hardLinkedDir = new HardlinkCopyDirectoryWrapper(splitDir);
+          for (String file : parentDirectory.listAll()) {
+            // there should be no write.lock
+            if (file.equals(IndexWriter.WRITE_LOCK_NAME)) {
+              throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Splitting in 'offline' mode but parent write.lock exists!");
+            }
+            hardLinkedDir.copyFrom(parentDirectory, file, file, IOContext.DEFAULT);
+          }
+          IndexWriterConfig iwConfig = parentConfig.toIndexWriterConfig(subCore);
+          iw = new SolrIndexWriter(partitionName, splitDir, iwConfig);
         } else {
-          factory = core.getDirectoryFactory();
-          path = paths.get(partitionNumber);
+          SolrCore core = searcher.getCore();
+          String path = paths.get(partitionNumber);
+          iw = SolrIndexWriter.create(core, partitionName, path,
+              core.getDirectoryFactory(), true, core.getLatestSchema(),
+              core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
         }
-        iw = SolrIndexWriter.create(core, "SplittingIndexWriter"+partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path,
-                                    factory, true, core.getLatestSchema(),
-                                    core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
       }
 
       try {
-        // This removes deletions but optimize might still be needed because sub-shards will have the same number of segments as the parent shard.
-        for (int segmentNumber = 0; segmentNumber<leaves.size(); segmentNumber++) {
-          log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : "") + " segment #"+segmentNumber + " segmentCount=" + leaves.size());
-          CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
-          iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(segmentNumber)[partitionNumber]));
+        if (offline) {
+          iw.deleteDocuments(new ShardSplitingQuery(partitionNumber, field, rangesArr, router, splitKey));
+        } else {
+          // This removes deletions but optimize might still be needed because sub-shards will have the same number of segments as the parent shard.
+          for (int segmentNumber = 0; segmentNumber<leaves.size(); segmentNumber++) {
+            log.info("SolrIndexSplitter: partition #" + partitionNumber + " partitionCount=" + numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : "") + " segment #"+segmentNumber + " segmentCount=" + leaves.size());
+            CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
+            iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(leaves.get(segmentNumber).ord)[partitionNumber]));
+          }
         }
         // we commit explicitly instead of sending a CommitUpdateCommand through the processor chain
         // because the sub-shard cores will just ignore such a commit because the update log is not
@@ -271,12 +204,16 @@ public class SolrIndexSplitter {
           } else {
             IOUtils.closeWhileHandlingException(iw);
           }
+          if (offline) {
+            SolrCore subCore = cores.get(partitionNumber);
+            subCore.getDirectoryFactory().release(iw.getDirectory());
+          }
         }
       }
     }
     // all sub-indexes created ok
     // when using hard-linking switch directories & refresh cores
-    if (hardLink && cores != null) {
+    if (offline && cores != null) {
       boolean switchOk = true;
       for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) {
         SolrCore subCore = cores.get(partitionNumber);
@@ -329,7 +266,7 @@ public class SolrIndexSplitter {
         }
         throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "There were errors during index split");
       } else {
-        // complete the switch remove original index
+        // complete the switch - remove original index
         for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) {
           SolrCore subCore = cores.get(partitionNumber);
           String oldIndexPath = subCore.getDataDir() + "index";
@@ -357,6 +294,137 @@ public class SolrIndexSplitter {
     }
   }
 
+  private static class ShardSplitingQuery extends Query {
+    final private int partition;
+    final private SchemaField field;
+    final private DocRouter.Range[] rangesArr;
+    final private DocRouter docRouter;
+    final private String splitKey;
+
+    ShardSplitingQuery(int partition, SchemaField field, DocRouter.Range[] rangesArr, DocRouter docRouter, String splitKey) {
+      this.partition = partition;
+      this.field = field;
+      this.rangesArr = rangesArr;
+      this.docRouter = docRouter;
+      this.splitKey = splitKey;
+    }
+
+    @Override
+    public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+      return new ConstantScoreWeight(this, boost) {
+
+        @Override
+        public Scorer scorer(LeafReaderContext context) throws IOException {
+          FixedBitSet set = findDocsToDelete(context);
+          log.info("### partition=" + partition + ", leaf=" + context + ", maxDoc=" + context.reader().maxDoc() +
+          ", numDels=" + context.reader().numDeletedDocs() + ", setLen=" + set.length() + ", setCard=" + set.cardinality());
+          Bits liveDocs = context.reader().getLiveDocs();
+          if (liveDocs != null) {
+            // check that we don't delete already deleted docs
+            FixedBitSet dels = FixedBitSet.copyOf(liveDocs);
+            dels.flip(0, dels.length());
+            dels.and(set);
+            if (dels.cardinality() > 0) {
+              log.error("### INVALID DELS " + dels.cardinality());
+            }
+          }
+          return new ConstantScoreScorer(this, score(), new BitSetIterator(set, set.length()));
+        }
+
+        @Override
+        public boolean isCacheable(LeafReaderContext ctx) {
+          return false;
+        }
+
+        @Override
+        public String toString() {
+          return "weight(shardSplittingQuery,part" + partition + ")";
+        }
+      };
+    }
+
+    private FixedBitSet findDocsToDelete(LeafReaderContext readerContext) throws IOException {
+      LeafReader reader = readerContext.reader();
+      FixedBitSet docSet = new FixedBitSet(reader.maxDoc());
+      Bits liveDocs = reader.getLiveDocs();
+
+      Terms terms = reader.terms(field.getName());
+      TermsEnum termsEnum = terms==null ? null : terms.iterator();
+      if (termsEnum == null) return docSet;
+
+      BytesRef term = null;
+      PostingsEnum postingsEnum = null;
+      HashBasedRouter hashRouter = docRouter instanceof HashBasedRouter ? (HashBasedRouter)docRouter : null;
+
+      CharsRefBuilder idRef = new CharsRefBuilder();
+      for (;;) {
+        term = termsEnum.next();
+        if (term == null) break;
+
+        // figure out the hash for the term
+
+        // FUTURE: if conversion to strings costs too much, we could
+        // specialize and use the hash function that can work over bytes.
+        field.getType().indexedToReadable(term, idRef);
+        String idString = idRef.toString();
+
+        if (splitKey != null) {
+          // todo have composite routers support these kind of things instead
+          String part1 = getRouteKey(idString);
+          if (part1 == null)
+            continue;
+          if (!splitKey.equals(part1))  {
+            continue;
+          }
+        }
+
+        int hash = 0;
+        if (hashRouter != null && rangesArr != null) {
+          hash = hashRouter.sliceHash(idString, null, null, null);
+        }
+
+        postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE);
+        postingsEnum = BitsFilteredPostingsEnum.wrap(postingsEnum, liveDocs);
+        for (;;) {
+          int doc = postingsEnum.nextDoc();
+          if (doc == DocIdSetIterator.NO_MORE_DOCS) break;
+          if (rangesArr == null) {
+            if (doc % partition != 0) {
+              docSet.set(doc);
+            }
+          } else  {
+            if (!rangesArr[partition].includes(hash)) {
+              docSet.set(doc);
+            }
+          }
+        }
+      }
+      return docSet;
+    }
+
+    @Override
+    public String toString(String field) {
+      return "shardSplittingQuery";
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) {
+        return true;
+      }
+      ShardSplitingQuery q = (ShardSplitingQuery)obj;
+      if (partition != q.partition) {
+        return false;
+      }
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      return partition;
+    }
+  }
+
   FixedBitSet[] split(LeafReaderContext readerContext) throws IOException {
     LeafReader reader = readerContext.reader();
     FixedBitSet[] docSets = new FixedBitSet[numPieces];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java b/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
index 8783931..956bd66 100644
--- a/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
+++ b/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
@@ -36,10 +36,10 @@ public class SplitIndexCommand extends UpdateCommand {
   public DocRouter router;
   public String routeFieldName;
   public String splitKey;
-  public boolean hardLink;
+  public boolean offline;
 
   public SplitIndexCommand(SolrQueryRequest req, List<String> paths, List<SolrCore> cores, List<DocRouter.Range> ranges,
-                           DocRouter router, String routeFieldName, String splitKey, boolean hardLink) {
+                           DocRouter router, String routeFieldName, String splitKey, boolean offline) {
     super(req);
     this.paths = paths;
     this.cores = cores;
@@ -47,7 +47,7 @@ public class SplitIndexCommand extends UpdateCommand {
     this.router = router;
     this.routeFieldName = routeFieldName;
     this.splitKey = splitKey;
-    this.hardLink = hardLink;
+    this.offline = offline;
   }
 
   @Override
@@ -68,7 +68,7 @@ public class SplitIndexCommand extends UpdateCommand {
     if (splitKey != null) {
       sb.append(",split.key=" + splitKey);
     }
-    sb.append(",hardLink=" + hardLink);
+    sb.append(",offline=" + offline);
     sb.append('}');
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
index 22862b4..dd5f9f3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
@@ -134,7 +134,7 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
       killerThread.start();
       killCounter.incrementAndGet();
 
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, null, null);
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, null, null, false);
 
       log.info("Layout after split: \n");
       printLayout();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index ca33f37..4667c95 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -119,14 +119,24 @@ public class ShardSplitTest extends BasicDistributedZkTest {
   Creates a collection with replicationFactor=1, splits a shard. Restarts the sub-shard leader node.
   Add a replica. Ensure count matches in leader and replica.
    */
+  @Test
   public void testSplitStaticIndexReplication() throws Exception {
+    doSplitStaticIndexReplication(false);
+  }
+
+  @Test
+  public void testSplitStaticIndexReplicationOffline() throws Exception {
+    doSplitStaticIndexReplication(true);
+  }
+
+  private void doSplitStaticIndexReplication(boolean offline) throws Exception {
     waitForThingsToLevelOut(15);
 
     DocCollection defCol = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
     Replica replica = defCol.getReplicas().get(0);
     String nodeName = replica.getNodeName();
 
-    String collectionName = "testSplitStaticIndexReplication";
+    String collectionName = "testSplitStaticIndexReplication_" + (offline ? "offline" : "online");
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
     create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
     create.setCreateNodeSet(nodeName); // we want to create the leader on a fixed node so that we know which one to restart later
@@ -144,6 +154,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
         CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
         splitShard.setShardName(SHARD1);
+        splitShard.setOffline(offline);
         String asyncId = splitShard.processAsync(client);
         RequestStatusState state = CollectionAdminRequest.requestStatus(asyncId).waitFor(client, 120);
         if (state == RequestStatusState.COMPLETED)  {
@@ -354,8 +365,17 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
   @Test
   public void testSplitMixedReplicaTypes() throws Exception {
+    doSplitMixedReplicaTypes(false);
+  }
+
+  @Test
+  public void testSplitMixedReplicaTypesOffline() throws Exception {
+    doSplitMixedReplicaTypes(true);
+  }
+
+  private void doSplitMixedReplicaTypes(boolean offline) throws Exception {
     waitForThingsToLevelOut(15);
-    String collectionName = "testSplitMixedReplicaTypes";
+    String collectionName = "testSplitMixedReplicaTypes" + (offline ? "offline" : "online");
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2, 2, 2);
     create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance
     create.process(cloudClient);
@@ -368,6 +388,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
     CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
     splitShard.setShardName(SHARD1);
+    splitShard.setOffline(offline);
     splitShard.process(cloudClient);
     waitForThingsToLevelOut(15);
 
@@ -401,7 +422,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     assertEquals("actual PULL", numPull, actualPull.get());
   }
 
-    @Test
+  @Test
   @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
   public void testSplitWithChaosMonkey() throws Exception {
     waitForThingsToLevelOut(15);
@@ -608,6 +629,15 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
   @Test
   public void testSplitShardWithRule() throws Exception {
+    doSplitShardWithRule(false);
+  }
+
+  @Test
+  public void testSplitShardWithRuleOffline() throws Exception {
+    doSplitShardWithRule(true);
+  }
+
+  private void doSplitShardWithRule(boolean offline) throws Exception {
     waitForThingsToLevelOut(15);
 
     if (usually()) {
@@ -617,14 +647,14 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     }
 
     log.info("Starting testSplitShardWithRule");
-    String collectionName = "shardSplitWithRule";
+    String collectionName = "shardSplitWithRule_" + (offline ? "offline" : "online");
     CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2)
         .setRule("shard:*,replica:<2,node:*");
     CollectionAdminResponse response = createRequest.process(cloudClient);
     assertEquals(0, response.getStatus());
 
     CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName)
-        .setShardName("shard1");
+        .setShardName("shard1").setOffline(offline);
     response = splitShardRequest.process(cloudClient);
     assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus());
   }
@@ -641,7 +671,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     // test with only one range
     subRanges.add(ranges.get(0));
     try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null, false);
       fail("Shard splitting with just one custom hash range should not succeed");
     } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
@@ -652,7 +682,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     subRanges.add(ranges.get(3)); // order shouldn't matter
     subRanges.add(ranges.get(0));
     try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null, false);
       fail("Shard splitting with missing hashes in between given ranges should not succeed");
     } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
@@ -665,7 +695,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     subRanges.add(ranges.get(2));
     subRanges.add(new DocRouter.Range(ranges.get(3).min - 15, ranges.get(3).max));
     try {
-      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+      splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null, false);
       fail("Shard splitting with overlapping ranges should not succeed");
     } catch (HttpSolrClient.RemoteSolrException e) {
       log.info("Expected exception:", e);
@@ -691,6 +721,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     final int[] docCounts = new int[ranges.size()];
     int numReplicas = shard1.getReplicas().size();
 
+    cloudClient.getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
+    clusterState = cloudClient.getZkStateReader().getClusterState();
+    log.debug("-- COLLECTION: {}", clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION));
     del("*:*");
     for (int id = 0; id <= 100; id++) {
       String shardKey = "" + (char)('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
@@ -733,7 +766,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     try {
       for (int i = 0; i < 3; i++) {
         try {
-          splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
+          splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null, false);
           log.info("Layout after split: \n");
           printLayout();
           break;
@@ -815,7 +848,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
       for (int i = 0; i < 3; i++) {
         try {
-          splitShard(collectionName, SHARD1, null, null);
+          splitShard(collectionName, SHARD1, null, null, false);
           break;
         } catch (HttpSolrClient.RemoteSolrException e) {
           if (e.code() != 500) {
@@ -897,7 +930,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
 
       for (int i = 0; i < 3; i++) {
         try {
-          splitShard(collectionName, null, null, splitKey);
+          splitShard(collectionName, null, null, splitKey, false);
           break;
         } catch (HttpSolrClient.RemoteSolrException e) {
           if (e.code() != 500) {
@@ -1000,10 +1033,11 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     }
   }
 
-  protected void splitShard(String collection, String shardId, List<DocRouter.Range> subRanges, String splitKey) throws SolrServerException, IOException {
+  protected void splitShard(String collection, String shardId, List<DocRouter.Range> subRanges, String splitKey, boolean offline) throws SolrServerException, IOException {
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
     params.set("timing", "true");
+    params.set("offline", String.valueOf(offline));
     params.set("collection", collection);
     if (shardId != null)  {
       params.set("shard", shardId);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
index fb0a743..048db77 100644
--- a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
+++ b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
@@ -21,6 +21,7 @@ import java.io.UnsupportedEncodingException;
 import java.lang.invoke.MethodHandles;
 import java.nio.charset.StandardCharsets;
 import java.util.List;
+import java.util.concurrent.locks.ReadWriteLock;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
@@ -45,7 +46,7 @@ import org.slf4j.LoggerFactory;
 
 public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  
+
   File indexDir1 = null, indexDir2 = null, indexDir3 = null;
 
   @BeforeClass
@@ -67,6 +68,15 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
 
   @Test
   public void testSplitByPaths() throws Exception {
+    doTestSplitByPaths(false);
+  }
+
+  @Test
+  public void testSplitByPathsOffline() throws Exception {
+    doTestSplitByPaths(true);
+  }
+
+  private void doTestSplitByPaths(boolean offline) throws Exception {
     LocalSolrQueryRequest request = null;
     try {
       // add two docs
@@ -83,8 +93,8 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null, ranges, new PlainIdRouter(), null, null, false);
-      new SolrIndexSplitter(command).split();
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null, ranges, new PlainIdRouter(), null, null, offline);
+      doSplit(command);
 
       Directory directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
           DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
@@ -106,9 +116,33 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       if (request != null) request.close(); // decrefs the searcher
     }
   }
+
+  private void doSplit(SplitIndexCommand command) throws Exception {
+    if (command.offline) {
+      ReadWriteLock lock = command.req.getCore().getSolrCoreState().getIndexWriterLock();
+      lock.writeLock().lockInterruptibly();
+    }
+    try {
+      new SolrIndexSplitter(command).split();
+    } finally {
+      if (command.offline) {
+        ReadWriteLock lock = command.req.getCore().getSolrCoreState().getIndexWriterLock();
+        lock.writeLock().unlock();
+      }
+    }
+
+  }
   
   // SOLR-5144
   public void testSplitDeletes() throws Exception {
+    doTestSplitDeletes(false);
+  }
+
+  public void testSplitDeletesOffline() throws Exception {
+    doTestSplitDeletes(true);
+  }
+
+  private void doTestSplitDeletes(boolean offline) throws Exception {
     LocalSolrQueryRequest request = null;
     try {
       // add two docs
@@ -128,8 +162,8 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null, ranges, new PlainIdRouter(), null, null, false);
-      new SolrIndexSplitter(command).split();
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null, ranges, new PlainIdRouter(), null, null, offline);
+      doSplit(command);
 
       Directory directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
           DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
@@ -152,11 +186,25 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
 
   @Test
   public void testSplitByCores() throws Exception {
-    // add two docs
+    doTestSplitByCores(false);
+  }
+
+  @Test
+  public void testSplitByCoresOffline() throws Exception {
+    doTestSplitByCores(true);
+  }
+
+  private void doTestSplitByCores(boolean offline) throws Exception {
+    // add three docs and 1 delete
     String id1 = "dorothy";
     assertU(adoc("id", id1));
     String id2 = "kansas";
     assertU(adoc("id", id2));
+    String id3 = "wizard";
+    assertU(adoc("id", id3));
+    assertU(commit());
+    assertJQ(req("q", "*:*"), "/response/numFound==3");
+    assertU(delI("wizard"));
     assertU(commit());
     assertJQ(req("q", "*:*"), "/response/numFound==2");
     List<DocRouter.Range> ranges = getRanges(id1, id2);
@@ -173,8 +221,8 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       try {
         request = lrf.makeRequest("q", "dummy");
 
-        SplitIndexCommand command = new SplitIndexCommand(request, null, Lists.newArrayList(core1, core2), ranges, new PlainIdRouter(), null, null, false);
-        new SolrIndexSplitter(command).split();
+        SplitIndexCommand command = new SplitIndexCommand(request, null, Lists.newArrayList(core1, core2), ranges, new PlainIdRouter(), null, null, offline);
+        doSplit(command);
       } finally {
         if (request != null) request.close();
       }
@@ -196,6 +244,15 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
 
   @Test
   public void testSplitAlternately() throws Exception {
+    doTestSplitAlternately(false);
+  }
+
+  @Test
+  public void testSplitAlternatelyOffline() throws Exception {
+    doTestSplitAlternately(true);
+  }
+
+  private void doTestSplitAlternately(boolean offline) throws Exception {
     LocalSolrQueryRequest request = null;
     Directory directory = null;
     try {
@@ -210,8 +267,8 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath(), indexDir3.getAbsolutePath()), null, null, new PlainIdRouter(), null, null, false);
-      new SolrIndexSplitter(command).split();
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath(), indexDir3.getAbsolutePath()), null, null, new PlainIdRouter(), null, null, offline);
+      doSplit(command);
 
       directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
           DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
@@ -242,7 +299,16 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
   }
 
   @Test
-  public void testSplitByRouteKey() throws Exception  {
+  public void testSplitByRouteKey() throws Exception {
+    doTestSplitByRouteKey(false);
+  }
+
+  @Test
+  public void testSplitByRouteKeyOffline() throws Exception  {
+    doTestSplitByRouteKey(true);
+  }
+
+  private void doTestSplitByRouteKey(boolean offline) throws Exception  {
     File indexDir = createTempDir().toFile();
 
     CompositeIdRouter r1 = new CompositeIdRouter();
@@ -275,8 +341,8 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
     try {
       request = lrf.makeRequest("q", "dummy");
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir.getAbsolutePath()), null, Lists.newArrayList(splitKeyRange), new CompositeIdRouter(), null, splitKey, false);
-      new SolrIndexSplitter(command).split();
+          Lists.newArrayList(indexDir.getAbsolutePath()), null, Lists.newArrayList(splitKeyRange), new CompositeIdRouter(), null, splitKey, offline);
+      doSplit(command);
       directory = h.getCore().getDirectoryFactory().get(indexDir.getAbsolutePath(),
           DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
       DirectoryReader reader = DirectoryReader.open(directory);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
index 1f5fdd2..d777c79 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionAdminRequest.java
@@ -1129,6 +1129,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
     protected String ranges;
     protected String splitKey;
     protected String shard;
+    protected boolean offline;
 
     private Properties properties;
 
@@ -1140,6 +1141,15 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
     public SplitShard setRanges(String ranges) { this.ranges = ranges; return this; }
     public String getRanges() { return ranges; }
 
+    public SplitShard setOffline(boolean offline) {
+      this.offline = offline;
+      return this;
+    }
+
+    public boolean getOffline() {
+      return offline;
+    }
+
     public SplitShard setSplitKey(String splitKey) {
       this.splitKey = splitKey;
       return this;
@@ -1176,6 +1186,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
       params.set(CoreAdminParams.SHARD, shard);
       params.set("split.key", this.splitKey);
       params.set(CoreAdminParams.RANGES, ranges);
+      params.set(CommonAdminParams.OFFLINE, offline);
 
       if(properties != null) {
         addProperties(params, properties);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7b5e6b42/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java
index 58c39a3..9103450 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CoreAdminParams.java
@@ -84,7 +84,7 @@ public abstract class CoreAdminParams
 
   /** The hash ranges to be used to split a shard or an index */
   public final static String RANGES = "ranges";
-  
+
   public static final String ROLES = "roles";
 
   public static final String REQUESTID = "requestid";


Mime
View raw message