ignite-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From agoncha...@apache.org
Subject [18/18] ignite git commit: IGNITE-5068 - Fixed lost partitions handling
Date Thu, 08 Jun 2017 13:13:52 GMT
IGNITE-5068 - Fixed lost partitions handling


Project: http://git-wip-us.apache.org/repos/asf/ignite/repo
Commit: http://git-wip-us.apache.org/repos/asf/ignite/commit/930e2e01
Tree: http://git-wip-us.apache.org/repos/asf/ignite/tree/930e2e01
Diff: http://git-wip-us.apache.org/repos/asf/ignite/diff/930e2e01

Branch: refs/heads/ignite-5267-merge-ea
Commit: 930e2e0114a72452ca3760aac7d640145a412909
Parents: 1a19b90 3cf4c7b
Author: Alexey Goncharuk <alexey.goncharuk@gmail.com>
Authored: Thu Jun 8 16:12:51 2017 +0300
Committer: Alexey Goncharuk <alexey.goncharuk@gmail.com>
Committed: Thu Jun 8 16:12:51 2017 +0300

----------------------------------------------------------------------
 .../dht/GridDhtPartitionTopologyImpl.java       | 111 +++++++------------
 1 file changed, 42 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ignite/blob/930e2e01/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
----------------------------------------------------------------------
diff --cc modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
index fa5a092,58fb14c..626ae0a
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
@@@ -68,11 -71,7 +68,9 @@@ import static org.apache.ignite.interna
  /**
   * Partition topology.
   */
 -@GridToStringExclude class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology
{
 +@GridToStringExclude
 +public class GridDhtPartitionTopologyImpl implements GridDhtPartitionTopology {
-     /** */
 +    private static final GridDhtPartitionState[] MOVING_STATES = new GridDhtPartitionState[]
{MOVING};
- 
      /** If true, then check consistency. */
      private static final boolean CONSISTENCY_CHECK = false;
  
@@@ -897,10 -969,10 +895,10 @@@
                  for (Map.Entry<UUID, GridDhtPartitionMap> entry : node2part.entrySet())
{
                      GridDhtPartitionState state = entry.getValue().get(p);
  
 -                    ClusterNode n = cctx.discovery().node(entry.getKey());
 +                    ClusterNode n = ctx.discovery().node(entry.getKey());
  
-                     if (n != null && state != null && (state == MOVING ||
state == OWNING) && !nodes.contains(n)
-                         && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion()))
{
+                     if (n != null && state != null && (state == MOVING ||
state == OWNING || state == RENTING)
+                         && !nodes.contains(n) && (topVer.topologyVersion()
< 0 || n.order() <= topVer.topologyVersion())) {
                          nodes.add(n);
                      }
  
@@@ -917,8 -989,8 +915,8 @@@
                  for (UUID nodeId : diffIds) {
                      assert !affIds.contains(nodeId);
  
-                     if (hasState(p, nodeId, OWNING, MOVING)) {
+                     if (hasState(p, nodeId, OWNING, MOVING, RENTING)) {
 -                        ClusterNode n = cctx.discovery().node(nodeId);
 +                        ClusterNode n = ctx.discovery().node(nodeId);
  
                          if (n != null && (topVer.topologyVersion() < 0 || n.order()
<= topVer.topologyVersion())) {
                              if (nodes == null) {
@@@ -1547,26 -1553,18 +1545,22 @@@
                      }
                      // Update map for remote node.
                      else if (plc != PartitionLossPolicy.IGNORE) {
-                         // TODO
- //                        Set<UUID> nodeIds = part2node.get(part);
- //
- //                        if (nodeIds != null) {
- //                            for (UUID nodeId : nodeIds) {
- //                                GridDhtPartitionMap nodeMap = node2part.get(nodeId);
- //
- //                                if (nodeMap.get(part) != EVICTED)
- //                                    nodeMap.put(part, LOST);
- //                            }
- //                        }
+                         for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet())
{
 -                            if (e.getKey().equals(cctx.localNodeId()))
++                            if (e.getKey().equals(ctx.localNodeId()))
+                                 continue;
+ 
+                             if (e.getValue().get(part) != EVICTED)
+                                 e.getValue().put(part, LOST);
+                         }
                      }
  
 -                    if (cctx.events().isRecordable(EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST))
 -                        cctx.events().addPreloadEvent(part, EVT_CACHE_REBALANCE_PART_DATA_LOST,
 -                            discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp());
 +                    if (grp.eventRecordable(EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST))
{
 +                        grp.addRebalanceEvent(part,
 +                            EVT_CACHE_REBALANCE_PART_DATA_LOST,
 +                            discoEvt.eventNode(),
 +                            discoEvt.type(),
 +                            discoEvt.timestamp());
 +                    }
                  }
  
                  if (plc != PartitionLossPolicy.IGNORE)
@@@ -1871,9 -1824,9 +1844,9 @@@
          }
  
          if (node2part != null) {
 -            UUID locNodeId = cctx.localNodeId();
 +            UUID locNodeId = ctx.localNodeId();
  
-         GridDhtPartitionMap map = node2part.get(locNodeId);
+             GridDhtPartitionMap map = node2part.get(locNodeId);
  
              if (map == null) {
                  map = new GridDhtPartitionMap(locNodeId,
@@@ -1889,10 -1842,10 +1862,10 @@@
  
              map.put(p, state);
  
-             if (state == MOVING || state == OWNING) {
+             if (state == MOVING || state == OWNING || state == RENTING) {
 -                AffinityAssignment assignment = cctx.affinity().assignment(diffFromAffinityVer);
 +                AffinityAssignment assignment = grp.affinity().cachedAffinity(diffFromAffinityVer);
  
 -                if (!assignment.getIds(p).contains(cctx.localNodeId())) {
 +                if (!assignment.getIds(p).contains(ctx.localNodeId())) {
                      Set<UUID> diffIds = diffFromAffinity.get(p);
  
                      if (diffIds == null)


Mime
View raw message