lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a.@apache.org
Subject lucene-solr:master: SOLR-12208: Add simulator support for properly tracking INDEX.sizeInBytes and INDEX.sizeInGB.
Date Tue, 12 Jun 2018 14:22:07 GMT
Repository: lucene-solr
Updated Branches:
  refs/heads/master 90e4eca9d -> 7c4bad0e8


SOLR-12208: Add simulator support for properly tracking INDEX.sizeInBytes and INDEX.sizeInGB.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7c4bad0e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7c4bad0e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7c4bad0e

Branch: refs/heads/master
Commit: 7c4bad0e8f6c6befa1aeb79b9a047cae40134ba8
Parents: 90e4eca
Author: Andrzej Bialecki <ab@apache.org>
Authored: Tue Jun 12 16:19:51 2018 +0200
Committer: Andrzej Bialecki <ab@apache.org>
Committed: Tue Jun 12 16:22:00 2018 +0200

----------------------------------------------------------------------
 .../cloud/autoscaling/IndexSizeTriggerTest.java |  12 +-
 .../cloud/autoscaling/sim/SimCloudManager.java  |  52 ++++++---
 .../sim/SimClusterStateProvider.java            | 110 ++++++++++++-------
 .../cloud/autoscaling/sim/TestLargeCluster.java |   8 +-
 .../solrj/cloud/autoscaling/ReplicaInfo.java    |   2 +
 5 files changed, 121 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7c4bad0e/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
index a62048a..41be72c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
@@ -56,7 +56,6 @@ import org.apache.solr.util.LogLevel;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -137,7 +136,6 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
   }
 
   @Test
-  @Ignore("https://issues.apache.org/jira/browse/SOLR-12208")
   public void testTrigger() throws Exception {
     String collectionName = "testTrigger_collection";
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
@@ -235,7 +233,6 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
   }
 
   @Test
-  //@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12392")
   public void testSplitIntegration() throws Exception {
     String collectionName = "testSplitIntegration_collection";
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
@@ -348,7 +345,6 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
   }
 
   @Test
-  //@LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12392")
   public void testMergeIntegration() throws Exception {
     String collectionName = "testMergeIntegration_collection";
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
@@ -459,10 +455,10 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
 
   @Test
   public void testMixedBounds() throws Exception {
-    if (cloudManager instanceof SimCloudManager) {
-      log.warn("Requires SOLR-12208");
-      return;
-    }
+//    if (cloudManager instanceof SimCloudManager) {
+//      log.warn("Requires SOLR-12208");
+//      return;
+//    }
 
     String collectionName = "testMixedBounds_collection";
     CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7c4bad0e/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
index 900fc76..63dd5bf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimCloudManager.java
@@ -126,12 +126,13 @@ public class SimCloudManager implements SolrCloudManager {
   private Overseer.OverseerThread triggerThread;
   private ThreadGroup triggerThreadGroup;
   private SolrResourceLoader loader;
-  private MetricsHistoryHandler historyHandler;
+  private MetricsHandler metricsHandler;
+  private MetricsHistoryHandler metricsHistoryHandler;
   private TimeSource timeSource;
 
   private static int nodeIdPort = 10000;
-  public static int DEFAULT_DISK = 1000; // 1000 GB
-  public static int DEFAULT_IDX_SIZE_BYTES = 1000000000; // 1 GB
+  public static int DEFAULT_DISK = 1024; // 1000 GiB
+  public static long DEFAULT_IDX_SIZE_BYTES = 10240; // 10 kiB
 
   /**
    * Create a simulated cluster. This cluster uses the following components:
@@ -396,10 +397,10 @@ public class SimCloudManager implements SolrCloudManager {
     clusterStateProvider.simAddNode(nodeId);
     LOG.trace("-- added node " + nodeId);
     // initialize history handler if this is the first node
-    if (historyHandler == null && liveNodesSet.size() == 1) {
-      MetricsHandler metricsHandler = new MetricsHandler(metricManager);
-      historyHandler = new MetricsHistoryHandler(nodeId, metricsHandler, solrClient, this,
Collections.emptyMap());
-      historyHandler.initializeMetrics(metricManager, SolrMetricManager.getRegistryName(SolrInfoBean.Group.node),
metricTag, CommonParams.METRICS_HISTORY_PATH);
+    if (metricsHistoryHandler == null && liveNodesSet.size() == 1) {
+      metricsHandler = new MetricsHandler(metricManager);
+      metricsHistoryHandler = new MetricsHistoryHandler(nodeId, metricsHandler, solrClient,
this, Collections.emptyMap());
+      metricsHistoryHandler.initializeMetrics(metricManager, SolrMetricManager.getRegistryName(SolrInfoBean.Group.node),
metricTag, CommonParams.METRICS_HISTORY_PATH);
     }
     return nodeId;
   }
@@ -417,6 +418,16 @@ public class SimCloudManager implements SolrCloudManager {
     if (withValues) {
       nodeStateProvider.simRemoveNodeValues(nodeId);
     }
+    if (liveNodesSet.isEmpty()) {
+      // remove handlers
+      if (metricsHistoryHandler != null) {
+        IOUtils.closeQuietly(metricsHistoryHandler);
+        metricsHistoryHandler = null;
+      }
+      if (metricsHandler != null) {
+        metricsHandler = null;
+      }
+    }
     LOG.trace("-- removed node " + nodeId);
   }
 
@@ -642,15 +653,18 @@ public class SimCloudManager implements SolrCloudManager {
     if (req.getPath() != null) {
       if (req.getPath().startsWith("/admin/autoscaling") ||
           req.getPath().startsWith("/cluster/autoscaling") ||
-          req.getPath().startsWith("/admin/metrics/history") ||
-          req.getPath().startsWith("/cluster/metrics/history")
+          req.getPath().startsWith("/admin/metrics") ||
+          req.getPath().startsWith("/cluster/metrics")
           ) {
         metricManager.registry("solr.node").counter("ADMIN." + req.getPath() + ".requests").inc();
         boolean autoscaling = req.getPath().contains("autoscaling");
+        boolean history = req.getPath().contains("history");
         if (autoscaling) {
           incrementCount("autoscaling");
-        } else {
+        } else if (history) {
           incrementCount("metricsHistory");
+        } else {
+          incrementCount("metrics");
         }
         ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
         params.set(CommonParams.PATH, req.getPath());
@@ -669,10 +683,18 @@ public class SimCloudManager implements SolrCloudManager {
         if (autoscaling) {
           autoScalingHandler.handleRequest(queryRequest, queryResponse);
         } else {
-          if (historyHandler != null) {
-            historyHandler.handleRequest(queryRequest, queryResponse);
+          if (history) {
+            if (metricsHistoryHandler != null) {
+              metricsHistoryHandler.handleRequest(queryRequest, queryResponse);
+            } else {
+              throw new UnsupportedOperationException("must add at least 1 node first");
+            }
           } else {
-            throw new UnsupportedOperationException("must add at least 1 node first");
+            if (metricsHandler != null) {
+              metricsHandler.handleRequest(queryRequest, queryResponse);
+            } else {
+              throw new UnsupportedOperationException("must add at least 1 node first");
+            }
           }
         }
         if (queryResponse.getException() != null) {
@@ -824,8 +846,8 @@ public class SimCloudManager implements SolrCloudManager {
 
   @Override
   public void close() throws IOException {
-    if (historyHandler != null) {
-      IOUtils.closeQuietly(historyHandler);
+    if (metricsHistoryHandler != null) {
+      IOUtils.closeQuietly(metricsHistoryHandler);
     }
     IOUtils.closeQuietly(clusterStateProvider);
     IOUtils.closeQuietly(nodeStateProvider);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7c4bad0e/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
index 20ffca9..4943874 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java
@@ -111,6 +111,8 @@ import static org.apache.solr.common.params.CommonParams.NAME;
 public class SimClusterStateProvider implements ClusterStateProvider {
   private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  public static final long DEFAULT_DOC_SIZE_BYTES = 500;
+
   private final LiveNodesSet liveNodes;
   private final SimDistribStateManager stateManager;
   private final SimCloudManager cloudManager;
@@ -232,6 +234,19 @@ public class SimClusterStateProvider implements ClusterStateProvider
{
 
   // todo: maybe hook up DistribStateManager /clusterstate.json watchers?
 
+  private ReplicaInfo getReplicaInfo(Replica r) {
+    List<ReplicaInfo> list = nodeReplicaMap.get(r.getNodeName());
+    if (list == null) {
+      return null;
+    }
+    for (ReplicaInfo ri : list) {
+      if (r.getName().equals(ri.getName())) {
+        return ri;
+      }
+    }
+    return null;
+  }
+
   /**
    * Add a new node to the cluster.
    * @param nodeId unique node id
@@ -455,10 +470,12 @@ public class SimClusterStateProvider implements ClusterStateProvider
{
       List<ReplicaInfo> replicas = nodeReplicaMap.computeIfAbsent(nodeId, n -> new
ArrayList<>());
       // mark replica as active
       replicaInfo.getVariables().put(ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
-      // add a property expected in Policy calculations and in tests
-      // NOTE: this confusingly reuses INDEX.sizeInBytes name but
-      // the actual value is expressed in GB units!!!
-      replicaInfo.getVariables().put(Suggestion.coreidxsize, 1);
+      // add a property expected in Policy calculations, if missing
+      if (replicaInfo.getVariable(Suggestion.ConditionType.CORE_IDX.metricsAttribute) ==
null) {
+        replicaInfo.getVariables().put(Suggestion.ConditionType.CORE_IDX.metricsAttribute,
SimCloudManager.DEFAULT_IDX_SIZE_BYTES);
+        replicaInfo.getVariables().put(Suggestion.coreidxsize,
+            Suggestion.ConditionType.CORE_IDX.convertVal(SimCloudManager.DEFAULT_IDX_SIZE_BYTES));
+      }
 
       replicas.add(replicaInfo);
       LOG.trace("-- simAddReplica {}", replicaInfo);
@@ -482,7 +499,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
           Utils.parseMetricsReplicaName(replicaInfo.getCollection(), replicaInfo.getCore()));
       cloudManager.getMetricManager().registry(registry).counter("UPDATE./update.requests");
       cloudManager.getMetricManager().registry(registry).counter("QUERY./select.requests");
-      cloudManager.getMetricManager().registerGauge(null, registry, () -> 1000, "", true,
"INDEX.sizeInBytes");
+      cloudManager.getMetricManager().registerGauge(null, registry,
+          () -> replicaInfo.getVariable(Suggestion.ConditionType.CORE_IDX.metricsAttribute),
+          "", true, "INDEX.sizeInBytes");
       if (runLeaderElection) {
         simRunLeaderElection(Collections.singleton(replicaInfo.getCollection()), true);
       }
@@ -601,14 +620,8 @@ public class SimClusterStateProvider implements ClusterStateProvider
{
       // mark all replicas as non-leader (probably not necessary) and collect all active
and live
       List<ReplicaInfo> active = new ArrayList<>();
       s.getReplicas().forEach(r -> {
-        AtomicReference<ReplicaInfo> riRef = new AtomicReference<>();
         // find our ReplicaInfo for this replica
-        nodeReplicaMap.get(r.getNodeName()).forEach(info -> {
-          if (info.getName().equals(r.getName())) {
-            riRef.set(info);
-          }
-        });
-        ReplicaInfo ri = riRef.get();
+        ReplicaInfo ri = getReplicaInfo(r);
         if (ri == null) {
           throw new IllegalStateException("-- could not find ReplicaInfo for replica " +
r);
         }
@@ -978,7 +991,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
     String numDocsStr = leader.getStr("SEARCHER.searcher.numDocs", "0");
     long numDocs = Long.parseLong(numDocsStr);
     long newNumDocs = numDocs / subSlices.size();
-    long remainder = numDocs % subSlices.size();
+    long remainderDocs = numDocs % subSlices.size();
+    long newIndexSize = SimCloudManager.DEFAULT_IDX_SIZE_BYTES + newNumDocs * DEFAULT_DOC_SIZE_BYTES;
+    long remainderIndexSize = SimCloudManager.DEFAULT_IDX_SIZE_BYTES + remainderDocs * DEFAULT_DOC_SIZE_BYTES;
     String remainderSlice = null;
 
     for (ReplicaPosition replicaPosition : replicaPositions) {
@@ -992,15 +1007,19 @@ public class SimClusterStateProvider implements ClusterStateProvider
{
       replicaProps.put(ZkStateReader.BASE_URL_PROP, Utils.getBaseUrlForNodeName(subShardNodeName,
"http"));
 
       long replicasNumDocs = newNumDocs;
+      long replicasIndexSize = newIndexSize;
       if (remainderSlice == null) {
         remainderSlice = subSliceName;
       }
       if (remainderSlice.equals(subSliceName)) { // only add to one sub slice
-        replicasNumDocs += remainder;
+        replicasNumDocs += remainderDocs;
+        replicasIndexSize += remainderIndexSize;
       }
       replicaProps.put("SEARCHER.searcher.numDocs", replicasNumDocs);
       replicaProps.put("SEARCHER.searcher.maxDoc", replicasNumDocs);
       replicaProps.put("SEARCHER.searcher.deletedDocs", 0);
+      replicaProps.put(Suggestion.ConditionType.CORE_IDX.metricsAttribute, replicasIndexSize);
+      replicaProps.put(Suggestion.coreidxsize, Suggestion.ConditionType.CORE_IDX.convertVal(replicasIndexSize));
 
       ReplicaInfo ri = new ReplicaInfo("core_node" + Assign.incAndGetId(stateManager, collectionName,
0),
           solrCoreName, collectionName, replicaPosition.shard, replicaPosition.type, subShardNodeName,
replicaProps);
@@ -1150,24 +1169,28 @@ public class SimClusterStateProvider implements ClusterStateProvider
{
             continue;
           }
           cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(),
leader)).counter("UPDATE./update.requests").inc();
-          String numDocsStr = leader.getStr("SEARCHER.searcher.numDocs");
-          if (numDocsStr == null) {
-            LOG.debug("-- no docs in " + leader);
-            continue;
-          }
-          long numDocs = Long.parseLong(numDocsStr);
-          if (numDocs == 0) {
+          ReplicaInfo ri = getReplicaInfo(leader);
+          Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
+          if (numDocs == null || numDocs.intValue() <= 0) {
             LOG.debug("-- attempting to delete nonexistent doc " + id + " from " + s.getLeader());
             continue;
           }
-          if (numDocsStr != null) {
-            modified = true;
-            try {
-              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs",
1, true, false);
-              simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", -1,
true, false);
-            } catch (Exception e) {
-              throw new IOException(e);
+          modified = true;
+          try {
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs", 1,
true, false);
+            simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", -1, true,
false);
+            Number indexSize = (Number)ri.getVariable(Suggestion.ConditionType.CORE_IDX.metricsAttribute);
+            if (indexSize != null && indexSize.longValue() > SimCloudManager.DEFAULT_IDX_SIZE_BYTES)
{
+              indexSize = indexSize.longValue() - DEFAULT_DOC_SIZE_BYTES;
+              simSetShardValue(collection, s.getName(), Suggestion.ConditionType.CORE_IDX.metricsAttribute,
+                  indexSize.intValue(), false, false);
+              simSetShardValue(collection, s.getName(), Suggestion.coreidxsize,
+                  Suggestion.ConditionType.CORE_IDX.convertVal(indexSize), false, false);
+            } else {
+              throw new Exception("unexpected indexSize ri=" + ri);
             }
+          } catch (Exception e) {
+            throw new IOException(e);
           }
         }
       }
@@ -1185,18 +1208,19 @@ public class SimClusterStateProvider implements ClusterStateProvider
{
             }
 
             cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(),
leader)).counter("UPDATE./update.requests").inc();
-            String numDocsStr = leader.getStr("SEARCHER.searcher.numDocs");
-            if (numDocsStr == null) {
-              continue;
-            }
-            long numDocs = Long.parseLong(numDocsStr);
-            if (numDocs == 0) {
+            ReplicaInfo ri = getReplicaInfo(leader);
+            Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
+            if (numDocs == null || numDocs.intValue() == 0) {
               continue;
             }
             modified = true;
             try {
               simSetShardValue(collection, s.getName(), "SEARCHER.searcher.deletedDocs",
numDocs, false, false);
               simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 0, false,
false);
+              simSetShardValue(collection, s.getName(), Suggestion.ConditionType.CORE_IDX.metricsAttribute,
+                  SimCloudManager.DEFAULT_IDX_SIZE_BYTES, false, false);
+              simSetShardValue(collection, s.getName(), Suggestion.coreidxsize,
+                  Suggestion.ConditionType.CORE_IDX.convertVal(SimCloudManager.DEFAULT_IDX_SIZE_BYTES),
false, false);
             } catch (Exception e) {
               throw new IOException(e);
             }
@@ -1212,16 +1236,24 @@ public class SimClusterStateProvider implements ClusterStateProvider
{
           }
           Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
           Replica leader = s.getLeader();
-          if (leader != null) {
-            cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(),
leader)).counter("UPDATE./update.requests").inc();
+          if (leader == null) {
+            LOG.debug("-- no leader in " + s);
+            continue;
           }
+          cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(),
leader)).counter("UPDATE./update.requests").inc();
           modified = true;
           try {
             simSetShardValue(collection, s.getName(), "SEARCHER.searcher.numDocs", 1, true,
false);
             simSetShardValue(collection, s.getName(), "SEARCHER.searcher.maxDoc", 1, true,
false);
-            // Policy reuses this value and expects it to be in GB units!!!
-            // the idea here is to increase the index size by 500 bytes with each doc
-            // simSetShardValue(collection, s.getName(), "INDEX.sizeInBytes", 500, true,
false);
+
+            ReplicaInfo ri = getReplicaInfo(leader);
+            Number indexSize = (Number)ri.getVariable(Suggestion.ConditionType.CORE_IDX.metricsAttribute);
+            // for each new document increase the size by DEFAULT_DOC_SIZE_BYTES
+            indexSize = indexSize.longValue() + DEFAULT_DOC_SIZE_BYTES;
+            simSetShardValue(collection, s.getName(), Suggestion.ConditionType.CORE_IDX.metricsAttribute,
+                indexSize.longValue(), false, false);
+            simSetShardValue(collection, s.getName(), Suggestion.coreidxsize,
+                Suggestion.ConditionType.CORE_IDX.convertVal(indexSize), false, false);
           } catch (Exception e) {
             throw new IOException(e);
           }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7c4bad0e/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
index b30e633..8cb589b 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/sim/TestLargeCluster.java
@@ -102,7 +102,7 @@ public class TestLargeCluster extends SimSolrCloudTestCase {
     triggerFinishedCount.set(0);
     triggerFinishedLatch = new CountDownLatch(1);
     listenerEvents.clear();
-    // disable .scheduled_maintenance
+    // disable .scheduled_maintenance and .auto_add_replicas
     String suspendTriggerCommand = "{" +
         "'suspend-trigger' : {'name' : '.scheduled_maintenance'}" +
         "}";
@@ -110,6 +110,12 @@ public class TestLargeCluster extends SimSolrCloudTestCase {
     SolrClient solrClient = cluster.simGetSolrClient();
     NamedList<Object> response = solrClient.request(req);
     assertEquals(response.get("result").toString(), "success");
+    suspendTriggerCommand = "{" +
+        "'suspend-trigger' : {'name' : '.auto_add_replicas'}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, suspendTriggerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
 
     // do this in advance if missing
     if (!cluster.getSimClusterStateProvider().simListCollections().contains(CollectionAdminParams.SYSTEM_COLL))
{

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7c4bad0e/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
index 50e77f8..edc7269 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/autoscaling/ReplicaInfo.java
@@ -92,10 +92,12 @@ public class ReplicaInfo implements MapWriter {
     });
   }
 
+  /** Replica "coreNode" name. */
   public String getName() {
     return name;
   }
 
+  /** SolrCore name. */
   public String getCore() {
     return core;
   }


Mime
View raw message