lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From er...@apache.org
Subject [lucene-solr] branch branch_8x updated: SOLR-11998:RebalanceLeaders API broken response format with wt=JSON
Date Mon, 21 Jan 2019 05:24:05 GMT
This is an automated email from the ASF dual-hosted git repository.

erick pushed a commit to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git


The following commit(s) were added to refs/heads/branch_8x by this push:
     new 83f9835  SOLR-11998:RebalanceLeaders API broken response format with wt=JSON
83f9835 is described below

commit 83f9835a47a00a2ec58a4cf5fc0d492497cf7898
Author: Erick Erickson <Erick.Erickson@gmail.com>
AuthorDate: Sun Jan 20 23:13:19 2019 -0600

    SOLR-11998:RebalanceLeaders API broken response format with wt=JSON
    
    (cherry picked from commit 60aef389cfbe708701c50fc3a68a4082b69ec4d0)
---
 solr/CHANGES.txt                                   |   2 +
 .../solr/handler/admin/RebalanceLeaders.java       |  28 +++---
 solr/solr-ref-guide/src/collections-api.adoc       | 100 ++++++++-------------
 3 files changed, 51 insertions(+), 79 deletions(-)

diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3c6294f..03786ee 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -270,6 +270,8 @@ Bug Fixes
 
 * SOLR-13091: REBALANCELEADERS is broken (Erick Erickson)
 
+* SOLR-11998: RebalanceLeaders API broken response format with wt=JSON (Erick Erickson)
+
 Improvements
 ----------------------
 
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
index 522a432..12f8f3b 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java
@@ -37,7 +37,7 @@ import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
@@ -110,7 +110,7 @@ class RebalanceLeaders {
   final static String INACTIVE_PREFERREDS = "inactivePreferreds";
   final static String ALREADY_LEADERS = "alreadyLeaders";
   final static String SUMMARY = "Summary";
-  final NamedList<Object> results = new NamedList<>();
+  final SimpleOrderedMap results = new SimpleOrderedMap();
   final Map<String, String> pendingOps = new HashMap<>();
   private String collectionName;
 
@@ -154,7 +154,7 @@ class RebalanceLeaders {
     }
 
     checkLeaderStatus();
-    NamedList<Object> summary = new NamedList<>();
+    SimpleOrderedMap summary = new SimpleOrderedMap();
     if (pendingOps.size() == 0) {
       summary.add("Success", "All active replicas with the preferredLeader property set are
leaders");
     } else {
@@ -287,12 +287,12 @@ class RebalanceLeaders {
   // Provide some feedback to the user about what actually happened, or in this case where
no action was
   // possible
   private void addInactiveToResults(Slice slice, Replica replica) {
-    NamedList<Object> inactives = (NamedList<Object>) results.get(INACTIVE_PREFERREDS);
+    SimpleOrderedMap inactives = (SimpleOrderedMap) results.get(INACTIVE_PREFERREDS);
     if (inactives == null) {
-      inactives = new NamedList<>();
+      inactives = new SimpleOrderedMap();
       results.add(INACTIVE_PREFERREDS, inactives);
     }
-    NamedList<Object> res = new NamedList<>();
+    SimpleOrderedMap res = new SimpleOrderedMap();
     res.add("status", "skipped");
     res.add("msg", "Replica " + replica.getName() + " is a referredLeader for shard " + slice.getName()
+ ", but is inactive. No change necessary");
     inactives.add(replica.getName(), res);
@@ -301,12 +301,12 @@ class RebalanceLeaders {
   // Provide some feedback to the user about what actually happened, or in this case where
no action was
   // necesary since this preferred replica was already the leader
   private void addAlreadyLeaderToResults(Slice slice, Replica replica) {
-    NamedList<Object> alreadyLeaders = (NamedList<Object>) results.get(ALREADY_LEADERS);
+    SimpleOrderedMap alreadyLeaders = (SimpleOrderedMap) results.get(ALREADY_LEADERS);
     if (alreadyLeaders == null) {
-      alreadyLeaders = new NamedList<>();
+      alreadyLeaders = new SimpleOrderedMap();
       results.add(ALREADY_LEADERS, alreadyLeaders);
     }
-    NamedList<Object> res = new NamedList<>();
+    SimpleOrderedMap res = new SimpleOrderedMap();
     res.add("status", "skipped");
     res.add("msg", "Replica " + replica.getName() + " is already the leader for shard " +
slice.getName() + ". No change necessary");
     alreadyLeaders.add(replica.getName(), res);
@@ -459,13 +459,13 @@ class RebalanceLeaders {
 
   // If we actually changed the leader, we should send that fact back in the response.
   private void addToSuccesses(Slice slice, Replica replica) {
-    NamedList<Object> successes = (NamedList<Object>) results.get("successes");
+    SimpleOrderedMap successes = (SimpleOrderedMap) results.get("successes");
     if (successes == null) {
-      successes = new NamedList<>();
+      successes = new SimpleOrderedMap();
       results.add("successes", successes);
     }
     log.info("Successfully changed leader of shard {} to replica {}", slice.getName(), replica.getName());
-    NamedList<Object> res = new NamedList<>();
+    SimpleOrderedMap res = new SimpleOrderedMap();
     res.add("status", "success");
     res.add("msg", "Successfully changed leader of slice " + slice.getName() + " to " + replica.getName());
     successes.add(slice.getName(), res);
@@ -478,12 +478,12 @@ class RebalanceLeaders {
     if (pendingOps.size() == 0) {
       return;
     }
-    NamedList<Object> fails = (NamedList<Object>) new NamedList<>();
+    SimpleOrderedMap fails = new SimpleOrderedMap();
     results.add("failures", fails);
 
     for (Map.Entry<String, String> ent : pendingOps.entrySet()) {
       log.info("Failed to change leader of shard {} to replica {}", ent.getKey(), ent.getValue());
-      NamedList<Object> res = new NamedList<>();
+      SimpleOrderedMap res = new SimpleOrderedMap();
       res.add("status", "failed");
       res.add("msg", String.format(Locale.ROOT, "Could not change leder for slice %s to %s",
ent.getKey(), ent.getValue()));
       fails.add(ent.getKey(), res);
diff --git a/solr/solr-ref-guide/src/collections-api.adoc b/solr/solr-ref-guide/src/collections-api.adoc
index ddaaeb8..640e8f0 100644
--- a/solr/solr-ref-guide/src/collections-api.adoc
+++ b/solr/solr-ref-guide/src/collections-api.adoc
@@ -2145,7 +2145,7 @@ For example, if 10 reassignments are to take place and `maxAtOnce` is
`1` and `m
 
 === REBALANCELEADERS Response
 
-The response will include the status of the request. If the status is anything other than
"0", an error message will explain why the request failed.
+The response will include the status of the request. A status of "0" indicates the request
was _processed_, not that all assignments were successful. Examine the "Summary" section for
that information.
 
 === Examples using REBALANCELEADERS
 
@@ -2155,78 +2155,48 @@ Either of these commands would cause all the active replicas that
had the `prefe
 
 [source,text]
 ----
-http://localhost:8983/solr/admin/collections?action=REBALANCELEADERS&collection=collection1&wt=xml
-http://localhost:8983/solr/admin/collections?action=REBALANCELEADERS&collection=collection1&maxAtOnce=5&maxWaitSeconds=30&wt=xml
+http://localhost:8983/solr/admin/collections?action=REBALANCELEADERS&collection=collection1&wt=json
+
+http://localhost:8983/solr/admin/collections?action=REBALANCELEADERS&collection=collection1&maxAtOnce=5&maxWaitSeconds=30&wt=json
 ----
 
 *Output*
 
-In this example, two replicas in the "alreadyLeaders" section already had the leader assigned
to the same node as the `preferredLeader` property so no action was taken.
+In this example:
 
-The "Success" tag indicates that the command rebalanced all leaders. If, for any reason some
replicas with preferredLeader=true are not leaders, this will be "Failure" rather than "Success".
If a replica cannot be made leader due to not being "Active", it's also considered a failure.
+* In the "alreadyLeaders" section, core_node5 was already the leader, so there were no changes
in leadership for shard1.
+* In the "inactivePreferreds" section, core_node57 had the preferredLeader property set,
but the node was not active, the leader for shard7 was not changed. This is considered successful.
+* In the "successes" section, core_node23 was _not_ the leader for shard3, so leadership
was assigned to that replica.
 
-The replica in the "inactivePreferreds" section had the `preferredLeader` property set but
the node was down and no action was taken. The three nodes in the "successes" section were
made leaders because they had the `preferredLeader` property set but were not leaders and
they were active.
+The "Summary" section with the "Success" tag indicates that the command rebalanced all _active_
replicas with the preferredLeader property set as requried. If a replica cannot be made leader
due to not being healthy (for example, it is on a Solr instance that is not running), it's
also considered success.
 
-[source,xml]
-----
-<response>
-  <lst name="responseHeader">
-    <int name="status">0</int>
-    <int name="QTime">123</int>
-  </lst>
-  <lst>
-    <str name="Success">All replicas with the preferredLeader property set are leaders</str>
-  </lst>
-  <lst name="alreadyLeaders">
-    <lst name="core_node1">
-      <str name="status">success</str>
-      <str name="msg">Already leader</str>
-      <str name="nodeName">192.168.1.167:7400_solr</str>
-    </lst>
-    <lst name="core_node17">
-      <str name="status">success</str>
-      <str name="msg">Already leader</str>
-      <str name="nodeName">192.168.1.167:7600_solr</str>
-    </lst>
-  </lst>
-  <lst name="inactivePreferreds">
-    <lst name="core_node4">
-      <str name="status">skipped</str>
-      <str name="msg">Node is a referredLeader, but it's inactive. Skipping</str>
-      <str name="nodeName">192.168.1.167:7500_solr</str>
-    </lst>
-  </lst>
-  <lst name="successes">
-    <lst name="_collection1_shard3_replica1">
-      <str name="status">success</str>
-      <str name="msg">
-        Assigned 'Collection: 'collection1', Shard: 'shard3', Core: 'collection1_shard3_replica1',
BaseUrl:
-        'http://192.168.1.167:8983/solr'' to be leader
-      </str>
-    </lst>
-    <lst name="_collection1_shard5_replica3">
-      <str name="status">success</str>
-      <str name="msg">
-        Assigned 'Collection: 'collection1', Shard: 'shard5', Core: 'collection1_shard5_replica3',
BaseUrl:
-        'http://192.168.1.167:7200/solr'' to be leader
-      </str>
-    </lst>
-    <lst name="_collection1_shard4_replica2">
-      <str name="status">success</str>
-      <str name="msg">
-        Assigned 'Collection: 'collection1', Shard: 'shard4', Core: 'collection1_shard4_replica2',
BaseUrl:
-        'http://192.168.1.167:7300/solr'' to be leader
-      </str>
-    </lst>
-  </lst>
-</response>
+[source,json]
 ----
-
-Examining the clusterstate after issuing this call should show that every live node that
has the `preferredLeader` property should also have the "leader" property set to _true_.
-
-NOTE: The added work done by an NRT leader during indexing is quite small. The primary use-case
is to redistribute the leader role if there are a large number of leaders concentrated on
a small number of nodes. Rebalancing will likely not improve performance unless the imbalance
of leadership roles is measured in multiples of 10.
-
-NOTE: The BALANCESHARDUNIQUE command that distributes the preferredLeader property does not
guarantee perfect distribution and in some collection topoligies it is impossible to make
that guarantee.
+{
+  "responseHeader":{
+    "status":0,
+    "QTime":3054},
+  "Summary":{
+    "Success":"All active replicas with the preferredLeader property set are leaders"},
+  "alreadyLeaders":{
+    "core_node5":{
+      "status":"skipped",
+      "msg":"Replica core_node5 is already the leader for shard shard1. No change necessary"}},
+  "inactivePreferreds":{
+    "core_node57":{
+      "status":"skipped",
+      "msg":"Replica core_node57 is a referredLeader for shard shard7, but is inactive. No
change necessary"}},
+  "successes":{
+    "shard3":{
+      "status":"success",
+      "msg":"Successfully changed leader of slice shard3 to core_node23"}}}
+----
+
+Examining the clusterstate after issuing this call should show that every active replica
that has the `preferredLeader` property should also have the "leader" property set to _true_.
+
+NOTE: The added work done by an NRT leader is quite small and only present when indexing.
The primary use-case is to redistribute the leader role if there are a large number of leaders
concentrated on a small number of nodes. Rebalancing will likely not improve performance unless
the imbalance of leadership roles is measured in multiples of 10.
+
+NOTE: The BALANCESHARDUNIQUE command that distributes the preferredLeader property does not
guarantee perfect distribution and in some collection topologies it is impossible to make
that guarantee.
 
 [[forceleader]]
 == FORCELEADER: Force Shard Leader


Mime
View raw message