lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a.@apache.org
Subject [lucene-solr] 03/03: SOLR-15055: Initial implementation of `withCollection`.
Date Tue, 05 Jan 2021 19:23:50 GMT
This is an automated email from the ASF dual-hosted git repository.

ab pushed a commit to branch jira/solr-15055
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 8bda9aec15831bc0878a9977eedb3de411014ed3
Author: Andrzej Bialecki <ab@apache.org>
AuthorDate: Tue Jan 5 20:23:01 2021 +0100

    SOLR-15055: Initial implementation of `withCollection`.
---
 .../apache/solr/cloud/api/collections/Assign.java  |  8 ++-
 .../cloud/api/collections/CreateCollectionCmd.java | 71 ++++++++++++++++++++--
 .../placement/impl/ReplicaPlacementImpl.java       |  5 +-
 .../plugins/AffinityPlacementFactory.java          | 49 +++++++++++++++
 .../apache/solr/cluster/placement/Builders.java    |  1 +
 .../plugins/AffinityPlacementFactoryTest.java      | 62 +++++++++++++++++++
 .../apache/solr/common/cloud/ReplicaPosition.java  |  7 ++-
 7 files changed, 192 insertions(+), 11 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index 786bfa9..430bb02 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -339,7 +339,7 @@ public class Assign {
     return nodeNameVsShardCount;
   }
 
-  // throw an exception if any node int the supplied list is not live.
+  // throw an exception if any node in the supplied list is not live.
   // Empty or null list always succeeds and returns the input.
   private static List<String> checkLiveNodes(List<String> createNodeList, ClusterState
clusterState) {
     Set<String> liveNodes = clusterState.getLiveNodes();
@@ -392,7 +392,8 @@ public class Assign {
     public final int numTlogReplicas;
     public final int numPullReplicas;
 
-    public AssignRequest(String collectionName, List<String> shardNames, List<String>
nodes, int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
+    public AssignRequest(String collectionName, List<String> shardNames, List<String>
nodes,
+                         int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
       this.collectionName = collectionName;
       this.shardNames = shardNames;
       this.nodes = nodes;
@@ -453,6 +454,7 @@ public class Assign {
     public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest
assignRequest) throws Assign.AssignmentException, IOException, InterruptedException {
       ClusterState clusterState = solrCloudManager.getClusterStateProvider().getClusterState();
       List<String> nodeList = assignRequest.nodes; // can this be empty list?
+      String collectionName = assignRequest.collectionName;
 
       if (nodeList == null || nodeList.isEmpty()) {
         HashMap<String, Assign.ReplicaCount> nodeNameVsShardCount =
@@ -471,7 +473,7 @@ public class Assign {
       for (String aShard : assignRequest.shardNames) {
         for (Map.Entry<Replica.Type, Integer> e : countsPerReplicaType(assignRequest).entrySet())
{
           for (int j = 0; j < e.getValue(); j++) {
-            result.add(new ReplicaPosition(aShard, j, e.getKey(), nodeList.get(i % nodeList.size())));
+            result.add(new ReplicaPosition(collectionName, aShard, j, e.getKey(), nodeList.get(i
% nodeList.size())));
             i++;
           }
         }
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 2e2a06c..45d1abc 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -30,6 +30,7 @@ import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Properties;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.solr.client.solrj.cloud.AlreadyExistsException;
 import org.apache.solr.client.solrj.cloud.BadVersionException;
@@ -42,6 +43,7 @@ import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cluster.placement.PlacementPlugin;
+import org.apache.solr.cluster.placement.ReplicaPlacement;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.Aliases;
@@ -78,7 +80,10 @@ import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
 import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
 import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
 import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
+import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
+import static org.apache.solr.common.params.CollectionAdminParams.WITH_COLLECTION;
 import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 import static org.apache.solr.common.params.CommonAdminParams.WAIT_FOR_FINAL_STATE;
 import static org.apache.solr.common.params.CommonParams.NAME;
@@ -116,6 +121,25 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "collection alias already
exists: " + collectionName);
     }
 
+    String withCollection = message.getStr(WITH_COLLECTION);
+    String withCollectionShard = null;
+    if (withCollection != null) {
+      String realWithCollection = aliases.resolveSimpleAlias(withCollection);
+      if (!clusterState.hasCollection(realWithCollection)) {
+        throw new SolrException(ErrorCode.BAD_REQUEST, "The 'withCollection' does not exist:
" + realWithCollection);
+      } else  {
+        DocCollection collection = clusterState.getCollection(realWithCollection);
+        if (collection.getActiveSlices().size() > 1)  {
+          throw new SolrException(ErrorCode.BAD_REQUEST, "The `withCollection` must have
only one shard, found: " + collection.getActiveSlices().size());
+        }
+        withCollectionShard = collection.getActiveSlices().iterator().next().getName();
+      }
+      if (!realWithCollection.equals(withCollection)) {
+        message = message.plus(WITH_COLLECTION, realWithCollection);
+        withCollection = realWithCollection;
+      }
+    }
+
     String configName = getConfigName(collectionName, message);
     if (configName == null) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No config set found to
associate with the collection.");
@@ -191,14 +215,17 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
       Map<String,ShardRequest> coresToCreate = new LinkedHashMap<>();
       ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
       for (ReplicaPosition replicaPosition : replicaPositions) {
+        if (replicaPosition.collection.equals(withCollection)) {
+          continue;
+        }
         String nodeName = replicaPosition.node;
 
         String coreName = Assign.buildSolrCoreName(ocmh.cloudManager.getDistribStateManager(),
-            ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(collectionName),
+            ocmh.cloudManager.getClusterStateProvider().getClusterState().getCollection(replicaPosition.collection),
             replicaPosition.shard, replicaPosition.type, true);
         if (log.isDebugEnabled()) {
           log.debug(formatString("Creating core {0} as part of shard {1} of collection {2}
on {3}"
-              , coreName, replicaPosition.shard, collectionName, nodeName));
+              , coreName, replicaPosition.shard, replicaPosition.collection, nodeName));
         }
 
         String baseUrl = zkStateReader.getBaseUrlForNodeName(nodeName);
@@ -206,7 +233,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
         // Otherwise the core creation fails
         ZkNodeProps props = new ZkNodeProps(
             Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
-            ZkStateReader.COLLECTION_PROP, collectionName,
+            ZkStateReader.COLLECTION_PROP, replicaPosition.collection,
             ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
             ZkStateReader.CORE_NAME_PROP, coreName,
             ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
@@ -221,7 +248,7 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
 
         params.set(CoreAdminParams.NAME, coreName);
         params.set(COLL_CONF, configName);
-        params.set(CoreAdminParams.COLLECTION, collectionName);
+        params.set(CoreAdminParams.COLLECTION, replicaPosition.collection);
         params.set(CoreAdminParams.SHARD, replicaPosition.shard);
         params.set(ZkStateReader.NUM_SHARDS_PROP, shardNames.size());
         params.set(CoreAdminParams.NEW_COLLECTION, "true");
@@ -241,7 +268,6 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
         sreq.shards = new String[]{baseUrl};
         sreq.actualShards = sreq.shards;
         sreq.params = params;
-
         coresToCreate.put(coreName, sreq);
       }
 
@@ -254,8 +280,27 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
       }
 
       shardRequestTracker.processResponses(results, shardHandler, false, null, Collections.emptySet());
+
+      if (withCollection != null) {
+        // process replica placements for the secondary collection, if any are needed
+        for (ReplicaPosition replicaPosition : replicaPositions) {
+          if (!replicaPosition.collection.equals(withCollection)) {
+            continue;
+          }
+          ZkNodeProps props = new ZkNodeProps(
+              Overseer.QUEUE_OPERATION, ADDREPLICA.toString(),
+              ZkStateReader.COLLECTION_PROP, replicaPosition.collection,
+              ZkStateReader.SHARD_ID_PROP, replicaPosition.shard,
+              "node", replicaPosition.node,
+              CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.TRUE.toString()); // set to
true because we want `withCollection` to be ready after this collection is created
+          new AddReplicaCmd(ocmh).call(clusterState, props, results);
+          clusterState = zkStateReader.getClusterState(); // refresh
+        }
+      }
+
       @SuppressWarnings({"rawtypes"})
       boolean failure = results.get("failure") != null && ((SimpleOrderedMap)results.get("failure")).size()
> 0;
+      // NOTE: failure doesn't clean up replicas of `withCollection` added above
       if (failure) {
         // Let's cleanup as we hit an exception
         // We shouldn't be passing 'results' here for the cleanup as the response would then
contain 'success'
@@ -276,6 +321,22 @@ public class CreateCollectionCmd implements OverseerCollectionMessageHandler.Cmd
         }
       }
 
+      // modify the `withCollection` and store this new collection's name with it
+      if (withCollection != null) {
+        ZkNodeProps props = new ZkNodeProps(
+            Overseer.QUEUE_OPERATION, MODIFYCOLLECTION.toString(),
+            ZkStateReader.COLLECTION_PROP, withCollection,
+            CollectionAdminParams.COLOCATED_WITH, collectionName);
+        ocmh.overseer.offerStateUpdate(Utils.toJSON(props));
+        try {
+          zkStateReader.waitForState(withCollection, 5, TimeUnit.SECONDS, (collectionState)
-> collectionName.equals(collectionState.getStr(COLOCATED_WITH)));
+        } catch (TimeoutException e) {
+          log.warn("Timed out waiting to see the {} property set on collection: {}", COLOCATED_WITH,
withCollection);
+          // maybe the overseer queue is backed up, we don't want to fail the create request
+          // because of this time out, continue
+        }
+      }
+
       // create an alias pointing to the new collection, if different from the collectionName
       if (!alias.equals(collectionName)) {
         ocmh.zkStateReader.aliasesManager.applyModificationAndExportToZk(a -> a.cloneWithCollectionAlias(alias,
collectionName));
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
index 69d9718..e8045d8 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
@@ -80,7 +80,10 @@ class ReplicaPlacementImpl implements ReplicaPlacement {
     List<ReplicaPosition> replicaPositions = new ArrayList<>(replicaPlacementSet.size());
     int index = 0; // This really an arbitrary value when adding replicas and a possible
source of core name collisions
     for (ReplicaPlacement placement : replicaPlacementSet) {
-      replicaPositions.add(new ReplicaPosition(placement.getShardName(), index++, SimpleClusterAbstractionsImpl.ReplicaImpl.toCloudReplicaType(placement.getReplicaType()),
placement.getNode().getName()));
+      replicaPositions.add(new ReplicaPosition(placement.getCollection().getName(),
+          placement.getShardName(), index++,
+          SimpleClusterAbstractionsImpl.ReplicaImpl.toCloudReplicaType(placement.getReplicaType()),
+          placement.getNode().getName()));
     }
 
     return replicaPositions;
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
index 9c50289..2166a0f 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
@@ -22,11 +22,13 @@ import com.google.common.collect.TreeMultimap;
 import org.apache.solr.cluster.*;
 import org.apache.solr.cluster.placement.*;
 import org.apache.solr.cluster.placement.impl.NodeMetricImpl;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.SuppressForbidden;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.*;
 import java.util.stream.Collectors;
@@ -193,6 +195,22 @@ public class AffinityPlacementFactory implements PlacementPluginFactory<Affinity
       Set<Node> nodes = request.getTargetNodes();
       SolrCollection solrCollection = request.getCollection();
 
+      final SolrCollection secondaryCollection;
+      String withCollection = solrCollection.getCustomProperty(CollectionAdminParams.WITH_COLLECTION);
+      if (withCollection != null) {
+        try {
+          secondaryCollection = cluster.getCollection(withCollection);
+          int numShards = secondaryCollection.getShardNames().size();
+          if (numShards != 1) {
+            throw new PlacementException("Secondary collection '" + withCollection + "' has
" + numShards + " but must have exactly 1.");
+          }
+        } catch (IOException e) {
+          throw new PlacementException("Error retrieving secondary collection '" + withCollection
+ "' information", e);
+        }
+      } else {
+        secondaryCollection = null;
+      }
+
       // Request all needed attributes
       attributeFetcher.requestNodeSystemProperty(AVAILABILITY_ZONE_SYSPROP).requestNodeSystemProperty(REPLICA_TYPE_SYSPROP);
       attributeFetcher
@@ -242,6 +260,37 @@ public class AffinityPlacementFactory implements PlacementPluginFactory<Affinity
         }
       }
 
+      if (secondaryCollection != null) {
+        // 2nd phase to allocate required secondary collection replicas
+        Set<Node> secondaryNodes = new HashSet<>();
+        Shard shard1 = secondaryCollection.iterator().next();
+        shard1.replicas().forEach(r -> {
+          secondaryNodes.add(r.getNode());
+        });
+        Set<ReplicaPlacement> secondaryPlacements = new HashSet<>();
+        Set<Node> alreadyAdded = new HashSet<>();
+        replicaPlacements.forEach(primaryPlacement -> {
+          if (!secondaryNodes.contains(primaryPlacement.getNode())) {
+            if (!alreadyAdded.contains(primaryPlacement.getNode())) {
+              // missing secondary replica on the node - add it
+              secondaryPlacements.add(placementPlanFactory
+                  .createReplicaPlacement(
+                      secondaryCollection,
+                      shard1.getShardName(),
+                      primaryPlacement.getNode(),
+                      // TODO: make this configurable
+                      // we default to PULL because if additional indexing
+                      // capacity is required the admin can manually
+                      // add NRT/TLOG replicas as needed
+                      Replica.ReplicaType.PULL));
+              // avoid adding multiple replicas for multiple shards
+              alreadyAdded.add(primaryPlacement.getNode());
+            }
+          }
+        });
+        replicaPlacements.addAll(secondaryPlacements);
+      }
+
       return placementPlanFactory.createPlacementPlan(request, replicaPlacements);
     }
 
diff --git a/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java b/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java
index 21b8369..d0a26ba 100644
--- a/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java
+++ b/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java
@@ -54,6 +54,7 @@ public class Builders {
         NodeBuilder nodeBuilder = new NodeBuilder().setNodeName("node_" + n); // Default
name, can be changed
         nodeBuilder.setTotalDiskGB(10000.0);
         nodeBuilder.setFreeDiskGB(5000.0);
+        nodeBuilder.setCoreCount(0);
         nodeBuilders.add(nodeBuilder);
       }
       return this;
diff --git a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java
b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java
index 81dda9d..78cca03 100644
--- a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java
@@ -27,6 +27,7 @@ import org.apache.solr.cluster.placement.*;
 import org.apache.solr.cluster.placement.Builders;
 import org.apache.solr.cluster.placement.impl.PlacementPlanFactoryImpl;
 import org.apache.solr.cluster.placement.impl.PlacementRequestImpl;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.util.Pair;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -650,6 +651,67 @@ public class AffinityPlacementFactoryTest extends SolrTestCaseJ4 {
     }
   }
 
+  @Test
+  public void testWithCollection() throws Exception {
+    String primaryCollectionName = "testWithCollection_primary";
+    String secondaryCollectionName = "testWithCollection_secondary";
+    int NUM_NODES = 5;
+    Builders.ClusterBuilder clusterBuilder = Builders.newClusterBuilder().initializeLiveNodes(NUM_NODES);
+
+    // create the 1-shard secondary collection first
+    Builders.CollectionBuilder secondaryCollectionBuilder = Builders.newCollectionBuilder(secondaryCollectionName);
+    secondaryCollectionBuilder.initializeShardsReplicas(1, 1, 0, 0, clusterBuilder.getLiveNodeBuilders());
+    clusterBuilder.addCollection(secondaryCollectionBuilder);
+    // create an empty primary collection
+    Builders.CollectionBuilder primaryCollectionBuilder = Builders.newCollectionBuilder(primaryCollectionName);
+    primaryCollectionBuilder.addCustomProperty(CollectionAdminParams.WITH_COLLECTION, secondaryCollectionName);
+    clusterBuilder.addCollection(primaryCollectionBuilder);
+
+    Cluster cluster = clusterBuilder.build();
+    SolrCollection primaryCollection = cluster.getCollection(primaryCollectionName);
+    SolrCollection secondaryCollection = cluster.getCollection(secondaryCollectionName);
+
+    // use one node where secondary replica exists and one empty node
+    Set<Node> nodes = new HashSet<>();
+    Node secondaryNode = secondaryCollection.iterator().next().iterator().next().getNode();
+    nodes.add(secondaryNode);
+    for (Node n : cluster.getLiveNodes()) {
+      nodes.add(n);
+      if (nodes.size() > 1) {
+        break;
+      }
+    }
+    PlacementRequestImpl placementRequest = new PlacementRequestImpl(primaryCollection,
+        Set.of("shard1", "shard2"), nodes, 2, 0, 0);
+    PlacementPlanFactory placementPlanFactory = new PlacementPlanFactoryImpl();
+    AttributeFetcher attributeFetcher = clusterBuilder.buildAttributeFetcher();
+    PlacementPlan pp = plugin.computePlacement(cluster, placementRequest, attributeFetcher,
placementPlanFactory);
+    assertEquals(5, pp.getReplicaPlacements().size());
+    Map<String, Map<String, AtomicInteger>> collShardReplicas = new HashMap<>();
+    Map<Node, Map<String, Map<String, AtomicInteger>>> nodeCollShardReplicas
= new HashMap<>();
+    pp.getReplicaPlacements().forEach(p -> {
+      collShardReplicas
+          .computeIfAbsent(p.getCollection().getName(), c -> new HashMap<>())
+          .computeIfAbsent(p.getShardName(), s -> new AtomicInteger())
+          .incrementAndGet();
+      nodeCollShardReplicas
+          .computeIfAbsent(p.getNode(), n -> new HashMap<>())
+          .computeIfAbsent(p.getCollection().getName(), c -> new HashMap<>())
+          .computeIfAbsent(p.getShardName(), s -> new AtomicInteger())
+          .incrementAndGet();
+    });
+    assertEquals("2 collections", 2, collShardReplicas.size());
+    assertEquals("2 nodes", 2, nodeCollShardReplicas.size());
+    nodeCollShardReplicas.forEach((node, colls) -> {
+      assertTrue("should have primary placement on node " + node, colls.containsKey(primaryCollectionName));
+      if (node.equals(secondaryNode)) {
+        assertFalse("should not have the secondary placement on node " + node, colls.containsKey(secondaryCollectionName));
+      } else {
+        assertTrue("should have the secondary placement on node " + node, colls.containsKey(secondaryCollectionName));
+      }
+    });
+  }
+
   @Test @Slow
   public void testScalability() throws Exception {
     log.info("==== numNodes ====");
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
index 62d8761..bb854d6 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ReplicaPosition.java
@@ -19,17 +19,20 @@ package org.apache.solr.common.cloud;
 
 
 public class ReplicaPosition implements Comparable<ReplicaPosition> {
+  public final String collection;
   public final String shard;
   public final int index;
   public final Replica.Type type;
   public String node;
 
-  public ReplicaPosition(String shard, int replicaIdx, Replica.Type type) {
+  public ReplicaPosition(String collection, String shard, int replicaIdx, Replica.Type type)
{
+    this.collection = collection;
     this.shard = shard;
     this.index = replicaIdx;
     this.type = type;
   }
-  public ReplicaPosition(String shard, int replicaIdx, Replica.Type type, String node) {
+  public ReplicaPosition(String collection, String shard, int replicaIdx, Replica.Type type,
String node) {
+    this.collection = collection;
     this.shard = shard;
     this.index = replicaIdx;
     this.type = type;


Mime
View raw message