lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a.@apache.org
Subject lucene-solr:jira/solr-12509: SOLR-12509: Implement splitting that uses hard-link Directory to avoid copying.
Date Wed, 04 Jul 2018 09:29:29 GMT
Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-12509 [created] 0b894b0cc


SOLR-12509: Implement splitting that uses hard-link Directory to avoid copying.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0b894b0c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0b894b0c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0b894b0c

Branch: refs/heads/jira/solr-12509
Commit: 0b894b0cce6d31e89cb6ede750fd16d24a0a66f2
Parents: e8d1057
Author: Andrzej Bialecki <ab@apache.org>
Authored: Wed Jul 4 11:29:06 2018 +0200
Committer: Andrzej Bialecki <ab@apache.org>
Committed: Wed Jul 4 11:29:06 2018 +0200

----------------------------------------------------------------------
 .../cloud/api/collections/SplitShardCmd.java    |  95 +++++++--
 .../solr/cloud/overseer/ReplicaMutator.java     |  28 ++-
 .../solr/handler/admin/CollectionsHandler.java  |   6 +-
 .../org/apache/solr/handler/admin/SplitOp.java  |   3 +-
 .../apache/solr/update/SolrIndexSplitter.java   | 211 ++++++++++++++++++-
 .../apache/solr/update/SplitIndexCommand.java   |   6 +-
 .../cloud/api/collections/ShardSplitTest.java   |  12 +-
 .../solr/update/SolrIndexSplitterTest.java      |  10 +-
 .../solr/common/params/CommonAdminParams.java   |   2 +
 9 files changed, 328 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index ccb111a..052c279 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -51,11 +52,13 @@ import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.cloud.rule.ImplicitSnitch;
 import org.apache.solr.common.params.CommonAdminParams;
+import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.CoreAdminParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.handler.component.ShardHandler;
+import org.apache.solr.util.RTimerTree;
 import org.apache.solr.util.TestInjection;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
@@ -87,13 +90,18 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
   public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList results)
throws Exception {
     boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
+    boolean offline = message.getBool(CommonAdminParams.OFFLINE, false);
+    boolean withTiming = message.getBool(CommonParams.TIMING, false);
+
     String collectionName = message.getStr(CoreAdminParams.COLLECTION);
 
-    log.info("Split shard invoked");
+    log.debug("Split shard invoked: {}", message);
     ZkStateReader zkStateReader = ocmh.zkStateReader;
     zkStateReader.forceUpdateCollection(collectionName);
     AtomicReference<String> slice = new AtomicReference<>();
     slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
+    Set<String> offlineSlices = new HashSet<>();
+    RTimerTree timings = new RTimerTree();
 
     String splitKey = message.getStr("split.key");
     DocCollection collection = clusterState.getCollection(collectionName);
@@ -111,7 +119,9 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted.");
     }
 
+    RTimerTree t = timings.sub("checkDiskSpace");
     checkDiskSpace(collectionName, slice.get(), parentShardLeader);
+    t.stop();
 
     // let's record the ephemeralOwner of the parent leader node
     Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE
+ "/" + parentShardLeader.getNodeName(), null, true);
@@ -120,6 +130,22 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node:
" + parentShardLeader.getNodeName() + " is not live anymore!");
     }
 
+    if (offline) {
+      // deactivate all slices
+      DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
+      Map<String, Object> propMap = new HashMap<>();
+      propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
+      collection.getSlices().forEach(s -> {
+        if (s.getState() == Slice.State.ACTIVE) {
+          offlineSlices.add(s.getName());
+          propMap.put(s.getName(), Slice.State.INACTIVE.toString());
+        }
+      });
+      propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
+      ZkNodeProps m = new ZkNodeProps(propMap);
+      inQueue.offer(Utils.toJSON(m));
+    }
+
     List<DocRouter.Range> subRanges = new ArrayList<>();
     List<String> subSlices = new ArrayList<>();
     List<String> subShardNames = new ArrayList<>();
@@ -153,7 +179,9 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
     List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1)
* 2);
 
+    t = timings.sub("fillRanges");
     String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges,
subSlices, subShardNames, firstNrtReplica);
+    t.stop();
 
     try {
 
@@ -196,12 +224,13 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
       Map<String, String> requestMap = new HashMap<>();
       String nodeName = parentShardLeader.getNodeName();
 
+      t = timings.sub("createSubSlicesAndLeadersInState");
       for (int i = 0; i < subRanges.size(); i++) {
         String subSlice = subSlices.get(i);
         String subShardName = subShardNames.get(i);
         DocRouter.Range subRange = subRanges.get(i);
 
-        log.info("Creating slice " + subSlice + " of collection " + collectionName + " on
" + nodeName);
+        log.debug("Creating slice " + subSlice + " of collection " + collectionName + " on
" + nodeName);
 
         Map<String, Object> propMap = new HashMap<>();
         propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
@@ -210,7 +239,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
         propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
         propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
         propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
-        propMap.put("shard_parent_node", parentShardLeader.getNodeName());
+        propMap.put("shard_parent_node", nodeName);
         propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
         DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
         inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
@@ -221,7 +250,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
         // refresh cluster state
         clusterState = zkStateReader.getClusterState();
 
-        log.info("Adding replica " + subShardName + " as part of slice " + subSlice + " of
collection " + collectionName
+        log.debug("Adding first replica " + subShardName + " as part of slice " + subSlice
+ " of collection " + collectionName
             + " on " + nodeName);
         propMap = new HashMap<>();
         propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
@@ -248,9 +277,11 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
       ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard
leaders", asyncId, requestMap);
 
+      t.stop();
+      t = timings.sub("waitForSubSliceLeadersAlive");
       for (String subShardName : subShardNames) {
         // wait for parent leader to acknowledge the sub-shard core
-        log.info("Asking parent leader to wait for: " + subShardName + " to be alive on:
" + nodeName);
+        log.debug("Asking parent leader to wait for: " + subShardName + " to be alive on:
" + nodeName);
         String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
         CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
         cmd.setCoreName(subShardName);
@@ -266,8 +297,9 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
       ocmh.processResponses(results, shardHandler, true, "SPLITSHARD timed out waiting for
subshard leaders to come up",
           asyncId, requestMap);
+      t.stop();
 
-      log.info("Successfully created all sub-shards for collection " + collectionName + "
parent shard: " + slice
+      log.debug("Successfully created all sub-shards for collection " + collectionName +
" parent shard: " + slice
           + " on: " + parentShardLeader);
 
       log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice " +
slice + " of collection "
@@ -275,6 +307,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
       ModifiableSolrParams params = new ModifiableSolrParams();
       params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
+      params.set("hardLink", "true");
       params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
       for (int i = 0; i < subShardNames.size(); i++) {
         String subShardName = subShardNames.get(i);
@@ -282,18 +315,23 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
       }
       params.set(CoreAdminParams.RANGES, rangesStr);
 
+      t = timings.sub("splitParentCore");
+
       ocmh.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler, asyncId,
requestMap);
 
       ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to invoke SPLIT
core admin command", asyncId,
           requestMap);
 
-      log.info("Index on shard: " + nodeName + " split into two successfully");
+      t.stop();
+
+      log.debug("Index on shard: " + nodeName + " split into two successfully");
 
+      t = timings.sub("applyBufferedUpdates");
       // apply buffered updates on sub-shards
       for (int i = 0; i < subShardNames.size(); i++) {
         String subShardName = subShardNames.get(i);
 
-        log.info("Applying buffered updates on : " + subShardName);
+        log.debug("Applying buffered updates on : " + subShardName);
 
         params = new ModifiableSolrParams();
         params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
@@ -304,8 +342,9 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
       ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed while asking
sub shard leaders" +
           " to apply buffered updates", asyncId, requestMap);
+      t.stop();
 
-      log.info("Successfully applied buffered updates on : " + subShardNames);
+      log.debug("Successfully applied buffered updates on : " + subShardNames);
 
       // Replica creation for the new Slices
       // replica placement is controlled by the autoscaling policy framework
@@ -329,6 +368,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
         numTlog.decrementAndGet();
       }
 
+      t = timings.sub("identifyNodesForReplicas");
       List<ReplicaPosition> replicaPositions = Assign.identifyNodes(ocmh.cloudManager,
           clusterState,
           new ArrayList<>(clusterState.getLiveNodes()),
@@ -336,13 +376,15 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
           new ZkNodeProps(collection.getProperties()),
           subSlices, numNrt.get(), numTlog.get(), numPull.get());
       sessionWrapper = PolicyHelper.getLastSessionWrapper(true);
+      t.stop();
 
+      t = timings.sub("createReplicaPlaceholders");
       for (ReplicaPosition replicaPosition : replicaPositions) {
         String sliceName = replicaPosition.shard;
         String subShardNodeName = replicaPosition.node;
         String solrCoreName = Assign.buildSolrCoreName(collectionName, sliceName, replicaPosition.type,
replicaPosition.index);
 
-        log.info("Creating replica shard " + solrCoreName + " as part of slice " + sliceName
+ " of collection "
+        log.debug("Creating replica shard " + solrCoreName + " as part of slice " + sliceName
+ " of collection "
             + collectionName + " on " + subShardNodeName);
 
         // we first create all replicas in DOWN state without actually creating their cores
in order to
@@ -384,7 +426,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
         replicas.add(propMap);
       }
-
+      t.stop();
       assert TestInjection.injectSplitFailureBeforeReplicaCreation();
 
       long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
@@ -419,7 +461,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
 
       if (repFactor == 1) {
         // switch sub shard states to 'active'
-        log.info("Replication factor is 1 so switching shard states");
+        log.debug("Replication factor is 1 so switching shard states");
         DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
         Map<String, Object> propMap = new HashMap<>();
         propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
@@ -431,7 +473,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
         ZkNodeProps m = new ZkNodeProps(propMap);
         inQueue.offer(Utils.toJSON(m));
       } else {
-        log.info("Requesting shard state be set to 'recovery'");
+        log.debug("Requesting shard state be set to 'recovery'");
         DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
         Map<String, Object> propMap = new HashMap<>();
         propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
@@ -443,6 +485,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
         inQueue.offer(Utils.toJSON(m));
       }
 
+      t = timings.sub("createCoresForReplicas");
       // now actually create replica cores on sub shard nodes
       for (Map<String, Object> replica : replicas) {
         ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
@@ -451,14 +494,19 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
       assert TestInjection.injectSplitFailureAfterReplicaCreation();
 
       ocmh.processResponses(results, shardHandler, true, "SPLITSHARD failed to create subshard
replicas", asyncId, requestMap);
+      t.stop();
 
       log.info("Successfully created all replica shards for all sub-slices " + subSlices);
 
+      t = timings.sub("finalCommit");
       ocmh.commit(results, slice.get(), parentShardLeader);
-
+      t.stop();
+      if (withTiming) {
+        results.add(CommonParams.TIMING, timings.asNamedList());
+      }
       return true;
     } catch (SolrException e) {
-      cleanupAfterFailure(zkStateReader, collectionName, parentSlice.getName(), subSlices);
+      cleanupAfterFailure(zkStateReader, collectionName, parentSlice.getName(), subSlices,
offlineSlices);
       throw e;
     } catch (Exception e) {
       log.error("Error executing split operation for collection: " + collectionName + " parent
shard: " + slice, e);
@@ -505,7 +553,8 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
     }
   }
 
-  private void cleanupAfterFailure(ZkStateReader zkStateReader, String collectionName, String
parentShard, List<String> subSlices) {
+  private void cleanupAfterFailure(ZkStateReader zkStateReader, String collectionName, String
parentShard,
+                                   List<String> subSlices, Set<String> offlineSlices)
{
     log.debug("- cleanup after failed split of " + collectionName + "/" + parentShard);
     // get the latest state
     try {
@@ -524,7 +573,7 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
     // set already created sub shards states to CONSTRUCTION - this prevents them
     // from entering into RECOVERY or ACTIVE (SOLR-9455)
     DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
-    Map<String, Object> propMap = new HashMap<>();
+    final Map<String, Object> propMap = new HashMap<>();
     propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
     propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
     for (Slice s : coll.getSlices()) {
@@ -539,6 +588,8 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
     if (parentSlice.getState() == Slice.State.INACTIVE) {
       propMap.put(parentShard, Slice.State.ACTIVE.toString());
     }
+    // plus any other previously deactivated slices
+    offlineSlices.forEach(s -> propMap.put(s, Slice.State.ACTIVE.toString()));
 
     try {
       ZkNodeProps m = new ZkNodeProps(propMap);
@@ -555,11 +606,11 @@ public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd
{
         continue;
       }
       log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
-      propMap = new HashMap<>();
-      propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
-      propMap.put(COLLECTION_PROP, collectionName);
-      propMap.put(SHARD_ID_PROP, subSlice);
-      ZkNodeProps m = new ZkNodeProps(propMap);
+      HashMap<String, Object> props = new HashMap<>();
+      props.put(Overseer.QUEUE_OPERATION, "deleteshard");
+      props.put(COLLECTION_PROP, collectionName);
+      props.put(SHARD_ID_PROP, subSlice);
+      ZkNodeProps m = new ZkNodeProps(props);
       try {
         ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
       } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index f897072..2c99af0 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -25,6 +25,7 @@ import java.util.Locale;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.cloud.DistribStateManager;
@@ -52,12 +53,12 @@ import static org.apache.solr.common.params.CommonParams.NAME;
 public class ReplicaMutator {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  protected final SolrCloudManager dataProvider;
+  protected final SolrCloudManager cloudManager;
   protected final DistribStateManager stateManager;
 
-  public ReplicaMutator(SolrCloudManager dataProvider) {
-    this.dataProvider = dataProvider;
-    this.stateManager = dataProvider.getDistribStateManager();
+  public ReplicaMutator(SolrCloudManager cloudManager) {
+    this.cloudManager = cloudManager;
+    this.stateManager = cloudManager.getDistribStateManager();
   }
 
   protected Replica setProperty(Replica replica, String key, String value) {
@@ -200,7 +201,7 @@ public class ReplicaMutator {
   }
 
   public ZkWriteCommand setState(ClusterState clusterState, ZkNodeProps message) {
-    if (Overseer.isLegacy(dataProvider.getClusterStateProvider())) {
+    if (Overseer.isLegacy(cloudManager.getClusterStateProvider())) {
       return updateState(clusterState, message);
     } else {
       return updateStateNew(clusterState, message);
@@ -224,7 +225,7 @@ public class ReplicaMutator {
       ClusterStateMutator.getShardNames(numShards, shardNames);
       Map<String, Object> createMsg = Utils.makeMap(NAME, cName);
       createMsg.putAll(message.getProperties());
-      writeCommand = new ClusterStateMutator(dataProvider).createCollection(prevState, new
ZkNodeProps(createMsg));
+      writeCommand = new ClusterStateMutator(cloudManager).createCollection(prevState, new
ZkNodeProps(createMsg));
       DocCollection collection = writeCommand.collection;
       newState = ClusterStateMutator.newState(prevState, cName, collection);
     }
@@ -440,7 +441,7 @@ public class ReplicaMutator {
                   log.error("The shard leader node: {} is not live anymore!", shardParentNode);
                   isLeaderSame = false;
                 } else if (!shardParentZkSession.equals(leaderZnode.getOwner())) {
-                  log.error("The zk session id for shard leader node: {} has changed from
{} to {}",
+                  log.error("The zk session id for shard leader node: {} has changepostd
from {} to {}",
                       shardParentNode, shardParentZkSession, leaderZnode.getOwner());
                   isLeaderSame = false;
                 }
@@ -457,12 +458,21 @@ public class ReplicaMutator {
               propMap.put(Overseer.QUEUE_OPERATION, "updateshardstate");
               propMap.put(parentSliceName, Slice.State.INACTIVE.toString());
               propMap.put(sliceName, Slice.State.ACTIVE.toString());
+              long now = cloudManager.getTimeSource().getEpochTimeNs();
               for (Slice subShardSlice : subShardSlices) {
                 propMap.put(subShardSlice.getName(), Slice.State.ACTIVE.toString());
+                String lastTimeStr = subShardSlice.getStr(ZkStateReader.STATE_TIMESTAMP_PROP);
+                if (lastTimeStr != null) {
+                  long start = Long.parseLong(lastTimeStr);
+                  log.info("TIMINGS: Sub-shard " + subShardSlice.getName() + " recovered
in " +
+                      TimeUnit.MILLISECONDS.convert(now - start, TimeUnit.NANOSECONDS) +
" ms");
+                } else {
+                  log.info("TIMINGS Sub-shard " + subShardSlice.getName() + " not available:
" + subShardSlice);
+                }
               }
               propMap.put(ZkStateReader.COLLECTION_PROP, collection.getName());
               ZkNodeProps m = new ZkNodeProps(propMap);
-              return new SliceMutator(dataProvider).updateShardState(prevState, m).collection;
+              return new SliceMutator(cloudManager).updateShardState(prevState, m).collection;
             } else  {
               // we must mark the shard split as failed by switching sub-shards to recovery_failed
state
               Map<String, Object> propMap = new HashMap<>();
@@ -473,7 +483,7 @@ public class ReplicaMutator {
               }
               propMap.put(ZkStateReader.COLLECTION_PROP, collection.getName());
               ZkNodeProps m = new ZkNodeProps(propMap);
-              return new SliceMutator(dataProvider).updateShardState(prevState, m).collection;
+              return new SliceMutator(cloudManager).updateShardState(prevState, m).collection;
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 1b5edd7..d954c40 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -143,8 +143,10 @@ import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_VALUE
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 import static org.apache.solr.common.params.CommonAdminParams.IN_PLACE_MOVE;
+import static org.apache.solr.common.params.CommonAdminParams.OFFLINE;
 import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
 import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.common.params.CommonParams.TIMING;
 import static org.apache.solr.common.params.CommonParams.VALUE_LONG;
 import static org.apache.solr.common.params.CoreAdminParams.DATA_DIR;
 import static org.apache.solr.common.params.CoreAdminParams.DELETE_DATA_DIR;
@@ -662,7 +664,9 @@ public class CollectionsHandler extends RequestHandlerBase implements
Permission
           SHARD_ID_PROP,
           "split.key",
           CoreAdminParams.RANGES,
-          WAIT_FOR_FINAL_STATE);
+          WAIT_FOR_FINAL_STATE,
+          TIMING,
+          OFFLINE);
       return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
     }),
     DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java b/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
index 9dda6d4..66d3aae 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SplitOp.java
@@ -132,7 +132,8 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
       }
 
 
-      SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router,
routeFieldName, splitKey);
+      boolean hardLink = req.getParams().getBool("hardLink", true);
+      SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router,
routeFieldName, splitKey, hardLink);
       core.getUpdateHandler().split(cmd);
 
       if (it.handler.coreContainer.isZooKeeperAware()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
index aadbe74..841e835 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.Future;
 
 import org.apache.lucene.index.CodecReader;
 import org.apache.lucene.index.FilterCodecReader;
@@ -31,15 +32,23 @@ import org.apache.lucene.index.SlowCodecReaderWrapper;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.HardlinkCopyDirectoryWrapper;
+import org.apache.lucene.store.LockFactory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRefBuilder;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IOUtils;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.CompositeIdRouter;
 import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.HashBasedRouter;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.CachingDirectoryFactory;
+import org.apache.solr.core.DirectoryFactory;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.handler.IndexFetcher;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.search.BitsFilteredPostingsEnum;
 import org.apache.solr.search.SolrIndexSearcher;
@@ -62,6 +71,110 @@ public class SolrIndexSplitter {
   int currPartition = 0;
   String routeFieldName;
   String splitKey;
+  boolean hardLink;
+
+  private static class HardLinkDirectoryFactoryWrapper extends DirectoryFactory {
+
+    private final DirectoryFactory delegate;
+
+    HardLinkDirectoryFactoryWrapper(DirectoryFactory delegate) {
+      this.delegate = delegate;
+    }
+
+    public DirectoryFactory getDelegate() {
+      return delegate;
+    }
+
+    private Directory unwrap(Directory dir) {
+      while (dir instanceof HardlinkCopyDirectoryWrapper) {
+        dir = ((HardlinkCopyDirectoryWrapper)dir).getDelegate();
+      }
+      return dir;
+    }
+
+    @Override
+    public void doneWithDirectory(Directory directory) throws IOException {
+      delegate.doneWithDirectory(unwrap(directory));
+    }
+
+    @Override
+    public void addCloseListener(Directory dir, CachingDirectoryFactory.CloseListener closeListener)
{
+      delegate.addCloseListener(unwrap(dir), closeListener);
+    }
+
+    @Override
+    public void close() throws IOException {
+      delegate.close();
+    }
+
+    @Override
+    protected Directory create(String path, LockFactory lockFactory, DirContext dirContext)
throws IOException {
+      throw new UnsupportedOperationException("create");
+    }
+
+    @Override
+    protected LockFactory createLockFactory(String rawLockType) throws IOException {
+      throw new UnsupportedOperationException("createLockFactory");
+    }
+
+    @Override
+    public boolean exists(String path) throws IOException {
+      return delegate.exists(path);
+    }
+
+    @Override
+    public void remove(Directory dir) throws IOException {
+      delegate.remove(unwrap(dir));
+    }
+
+    @Override
+    public void remove(Directory dir, boolean afterCoreClose) throws IOException {
+      delegate.remove(unwrap(dir), afterCoreClose);
+    }
+
+    @Override
+    public void remove(String path, boolean afterCoreClose) throws IOException {
+      delegate.remove(path, afterCoreClose);
+    }
+
+    @Override
+    public void remove(String path) throws IOException {
+      delegate.remove(path);
+    }
+
+    private Directory wrap(Directory dir) {
+      if (dir instanceof HardlinkCopyDirectoryWrapper) {
+        return dir;
+      } else {
+        return new HardlinkCopyDirectoryWrapper(dir);
+      }
+    }
+    @Override
+    public Directory get(String path, DirContext dirContext, String rawLockType) throws IOException
{
+      Directory dir = delegate.get(path, dirContext, rawLockType);
+      return wrap(dir);
+    }
+
+    @Override
+    public void incRef(Directory directory) {
+      delegate.incRef(unwrap(directory));
+    }
+
+    @Override
+    public boolean isPersistent() {
+      return delegate.isPersistent();
+    }
+
+    @Override
+    public void release(Directory directory) throws IOException {
+      delegate.release(unwrap(directory));
+    }
+
+    @Override
+    public void init(NamedList args) {
+      delegate.init(args);
+    }
+  }
 
   public SolrIndexSplitter(SplitIndexCommand cmd) {
     searcher = cmd.getReq().getSearcher();
@@ -86,6 +199,7 @@ public class SolrIndexSplitter {
     if (cmd.splitKey != null) {
       splitKey = getRouteKey(cmd.splitKey);
     }
+    this.hardLink = cmd.hardLink;
   }
 
   public void split() throws IOException {
@@ -113,16 +227,24 @@ public class SolrIndexSplitter {
       boolean success = false;
 
       RefCounted<IndexWriter> iwRef = null;
-      IndexWriter iw = null;
-      if (cores != null) {
+      IndexWriter iw;
+      if (cores != null && !hardLink) {
         SolrCore subCore = cores.get(partitionNumber);
         iwRef = subCore.getUpdateHandler().getSolrCoreState().getIndexWriter(subCore);
         iw = iwRef.get();
       } else {
         SolrCore core = searcher.getCore();
-        String path = paths.get(partitionNumber);
+        String path;
+        DirectoryFactory factory;
+        if (hardLink && cores != null) {
+          path =  cores.get(partitionNumber).getDataDir() + "index.split";
+          factory = new HardLinkDirectoryFactoryWrapper(core.getDirectoryFactory());
+        } else {
+          factory = core.getDirectoryFactory();
+          path = paths.get(partitionNumber);
+        }
         iw = SolrIndexWriter.create(core, "SplittingIndexWriter"+partitionNumber + (ranges
!= null ? " " + ranges.get(partitionNumber) : ""), path,
-                                    core.getDirectoryFactory(), true, core.getLatestSchema(),
+                                    factory, true, core.getLatestSchema(),
                                     core.getSolrConfig().indexConfig, core.getDeletionPolicy(),
core.getCodec());
       }
 
@@ -151,9 +273,88 @@ public class SolrIndexSplitter {
           }
         }
       }
-
     }
+    // all sub-indexes created ok
+    // when using hard-linking switch directories & refresh cores
+    if (hardLink && cores != null) {
+      boolean switchOk = true;
+      for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) {
+        SolrCore subCore = cores.get(partitionNumber);
+        String indexDirPath = subCore.getIndexDir();
+
+        log.debug("Switching directories");
+        String hardLinkPath = subCore.getDataDir() + "index.split";
+        subCore.modifyIndexProps("index.split");
+        try {
+          subCore.getUpdateHandler().newIndexWriter(false);
+          openNewSearcher(subCore);
+        } catch (Exception e) {
+          log.error("Failed to switch sub-core " + indexDirPath + " to " + hardLinkPath +
", split will fail.", e);
+          switchOk = false;
+          break;
+        }
+      }
+      if (!switchOk) {
+        // rollback the switch
+        for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++)
{
+          SolrCore subCore = cores.get(partitionNumber);
+          Directory dir = null;
+          try {
+            dir = subCore.getDirectoryFactory().get(subCore.getDataDir(), DirectoryFactory.DirContext.META_DATA,
+                subCore.getSolrConfig().indexConfig.lockType);
+            dir.deleteFile(IndexFetcher.INDEX_PROPERTIES);
+          } finally {
+            if (dir != null) {
+              subCore.getDirectoryFactory().release(dir);
+            }
+          }
+          // switch back if necessary and remove the hardlinked dir
+          String hardLinkPath = subCore.getDataDir() + "index.split";
+          try {
+            dir = subCore.getDirectoryFactory().get(hardLinkPath, DirectoryFactory.DirContext.DEFAULT,
+                subCore.getSolrConfig().indexConfig.lockType);
+            subCore.getDirectoryFactory().doneWithDirectory(dir);
+            subCore.getDirectoryFactory().remove(dir);
+          } finally {
+            if (dir != null) {
+              subCore.getDirectoryFactory().release(dir);
+            }
+          }
+          subCore.getUpdateHandler().newIndexWriter(false);
+          try {
+            openNewSearcher(subCore);
+          } catch (Exception e) {
+            log.warn("Error rolling back failed split of " + hardLinkPath, e);
+          }
+        }
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "There were errors
during index split");
+      } else {
+        // complete the switch remove original index
+        for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++)
{
+          SolrCore subCore = cores.get(partitionNumber);
+          String oldIndexPath = subCore.getDataDir() + "index";
+          Directory indexDir = null;
+          try {
+            indexDir = subCore.getDirectoryFactory().get(oldIndexPath,
+                DirectoryFactory.DirContext.DEFAULT, subCore.getSolrConfig().indexConfig.lockType);
+            subCore.getDirectoryFactory().doneWithDirectory(indexDir);
+            subCore.getDirectoryFactory().remove(indexDir);
+          } finally {
+            if (indexDir != null) {
+              subCore.getDirectoryFactory().release(indexDir);
+            }
+          }
+        }
+      }
+    }
+  }
 
+  private void openNewSearcher(SolrCore core) throws Exception {
+    Future[] waitSearcher = new Future[1];
+    core.getSearcher(true, false, waitSearcher, true);
+    if (waitSearcher[0] != null) {
+      waitSearcher[0].get();
+    }
   }
 
   FixedBitSet[] split(LeafReaderContext readerContext) throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java b/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
index eaa1e59..8783931 100644
--- a/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
+++ b/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
@@ -36,8 +36,10 @@ public class SplitIndexCommand extends UpdateCommand {
   public DocRouter router;
   public String routeFieldName;
   public String splitKey;
+  public boolean hardLink;
 
-  public SplitIndexCommand(SolrQueryRequest req, List<String> paths, List<SolrCore>
cores, List<DocRouter.Range> ranges, DocRouter router, String routeFieldName, String
splitKey) {
+  public SplitIndexCommand(SolrQueryRequest req, List<String> paths, List<SolrCore>
cores, List<DocRouter.Range> ranges,
+                           DocRouter router, String routeFieldName, String splitKey, boolean
hardLink) {
     super(req);
     this.paths = paths;
     this.cores = cores;
@@ -45,6 +47,7 @@ public class SplitIndexCommand extends UpdateCommand {
     this.router = router;
     this.routeFieldName = routeFieldName;
     this.splitKey = splitKey;
+    this.hardLink = hardLink;
   }
 
   @Override
@@ -65,6 +68,7 @@ public class SplitIndexCommand extends UpdateCommand {
     if (splitKey != null) {
       sb.append(",split.key=" + splitKey);
     }
+    sb.append(",hardLink=" + hardLink);
     sb.append('}');
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
index 4411bc4..ca33f37 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/ShardSplitTest.java
@@ -62,7 +62,9 @@ import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Utils;
+import org.apache.solr.util.LogLevel;
 import org.apache.solr.util.TestInjection;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -72,6 +74,7 @@ import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
 import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
 import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 
+@LogLevel("org.apache.solr.cloud.api.collections=DEBUG")
 @Slow
 public class ShardSplitTest extends BasicDistributedZkTest {
 
@@ -358,6 +361,11 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     create.process(cloudClient);
     waitForRecoveriesToFinish(collectionName, false);
 
+    for (int i = 0; i < 100; i++) {
+      cloudClient.add(collectionName, getDoc("id", "id-" + i, "foo_s", "bar " + i));
+    }
+    cloudClient.commit(collectionName);
+
     CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
     splitShard.setShardName(SHARD1);
     splitShard.process(cloudClient);
@@ -995,6 +1003,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
   protected void splitShard(String collection, String shardId, List<DocRouter.Range>
subRanges, String splitKey) throws SolrServerException, IOException {
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
+    params.set("timing", "true");
     params.set("collection", collection);
     if (shardId != null)  {
       params.set("shard", shardId);
@@ -1019,7 +1028,8 @@ public class ShardSplitTest extends BasicDistributedZkTest {
     baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
 
     try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 30000, 60000 * 5)) {
-      baseServer.request(request);
+      NamedList<Object> rsp = baseServer.request(request);
+      log.info("Shard split response: " + Utils.toJSONString(rsp));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
index ae743da..fb0a743 100644
--- a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
+++ b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
@@ -83,7 +83,7 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null,
ranges, new PlainIdRouter(), null, null);
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null,
ranges, new PlainIdRouter(), null, null, false);
       new SolrIndexSplitter(command).split();
 
       Directory directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
@@ -128,7 +128,7 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null,
ranges, new PlainIdRouter(), null, null);
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null,
ranges, new PlainIdRouter(), null, null, false);
       new SolrIndexSplitter(command).split();
 
       Directory directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
@@ -173,7 +173,7 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       try {
         request = lrf.makeRequest("q", "dummy");
 
-        SplitIndexCommand command = new SplitIndexCommand(request, null, Lists.newArrayList(core1,
core2), ranges, new PlainIdRouter(), null, null);
+        SplitIndexCommand command = new SplitIndexCommand(request, null, Lists.newArrayList(core1,
core2), ranges, new PlainIdRouter(), null, null, false);
         new SolrIndexSplitter(command).split();
       } finally {
         if (request != null) request.close();
@@ -210,7 +210,7 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath(), indexDir3.getAbsolutePath()),
null, null, new PlainIdRouter(), null, null);
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath(), indexDir3.getAbsolutePath()),
null, null, new PlainIdRouter(), null, null, false);
       new SolrIndexSplitter(command).split();
 
       directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
@@ -275,7 +275,7 @@ public class SolrIndexSplitterTest extends SolrTestCaseJ4 {
     try {
       request = lrf.makeRequest("q", "dummy");
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir.getAbsolutePath()), null, Lists.newArrayList(splitKeyRange),
new CompositeIdRouter(), null, splitKey);
+          Lists.newArrayList(indexDir.getAbsolutePath()), null, Lists.newArrayList(splitKeyRange),
new CompositeIdRouter(), null, splitKey, false);
       new SolrIndexSplitter(command).split();
       directory = h.getCore().getDirectoryFactory().get(indexDir.getAbsolutePath(),
           DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0b894b0c/solr/solrj/src/java/org/apache/solr/common/params/CommonAdminParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CommonAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CommonAdminParams.java
index c39b4a8..94bf215 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CommonAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CommonAdminParams.java
@@ -25,6 +25,8 @@ public interface CommonAdminParams
   String WAIT_FOR_FINAL_STATE = "waitForFinalState";
   /** Allow in-place move of replicas that use shared filesystems. */
   String IN_PLACE_MOVE = "inPlaceMove";
+  /** Put collection off-line (with all shard in INACTIVE state). */
+  String OFFLINE = "offline";
   /** Timeout for replicas to become active. */
   String TIMEOUT = "timeout";
 }


Mime
View raw message