helix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From l...@apache.org
Subject [4/5] helix git commit: Add deprecated clusterStateVerifiers classes back to their original packages for back-compatiblilty, marked them all as deprecated.
Date Mon, 20 Mar 2017 16:16:48 GMT
http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/StrictMatchExternalViewVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/StrictMatchExternalViewVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/StrictMatchExternalViewVerifier.java
deleted file mode 100644
index 70bd275..0000000
--- a/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/StrictMatchExternalViewVerifier.java
+++ /dev/null
@@ -1,331 +0,0 @@
-package org.apache.helix.tools.ClusterStateVerifier;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.helix.PropertyKey;
-import org.apache.helix.controller.rebalancer.util.ConstraintBasedAssignment;
-import org.apache.helix.controller.stages.ClusterDataCache;
-import org.apache.helix.manager.zk.ZkClient;
-import org.apache.helix.model.ExternalView;
-import org.apache.helix.model.IdealState;
-import org.apache.helix.model.Partition;
-import org.apache.helix.model.StateModelDefinition;
-import org.apache.helix.task.TaskConstants;
-import org.apache.log4j.Logger;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Verifier that verifies whether the ExternalViews of given resources (or all resources in the cluster)
- * match exactly as its ideal mapping (in idealstate).
- */
-public class StrictMatchExternalViewVerifier extends ZkHelixClusterVerifier {
-  private static Logger LOG = Logger.getLogger(StrictMatchExternalViewVerifier.class);
-
-  private final Set<String> _resources;
-  private final Set<String> _expectLiveInstances;
-
-  public StrictMatchExternalViewVerifier(String zkAddr, String clusterName, Set<String> resources,
-      Set<String> expectLiveInstances) {
-    super(zkAddr, clusterName);
-    _resources = resources;
-    _expectLiveInstances = expectLiveInstances;
-  }
-
-  public StrictMatchExternalViewVerifier(ZkClient zkClient, String clusterName,
-      Set<String> resources, Set<String> expectLiveInstances) {
-    super(zkClient, clusterName);
-    _resources = resources;
-    _expectLiveInstances = expectLiveInstances;
-  }
-
-  public static class Builder {
-    private String _clusterName;
-    private Set<String> _resources;
-    private Set<String> _expectLiveInstances;
-    private String _zkAddr;
-    private ZkClient _zkClient;
-
-    public StrictMatchExternalViewVerifier build() {
-      if (_clusterName == null || (_zkAddr == null && _zkClient == null)) {
-        throw new IllegalArgumentException("Cluster name or zookeeper info is missing!");
-      }
-
-      if (_zkClient != null) {
-        return new StrictMatchExternalViewVerifier(_zkClient, _clusterName, _resources,
-            _expectLiveInstances);
-      }
-      return new StrictMatchExternalViewVerifier(_zkAddr, _clusterName, _resources,
-          _expectLiveInstances);
-    }
-
-    public Builder(String clusterName) {
-      _clusterName = clusterName;
-    }
-
-    public String getClusterName() {
-      return _clusterName;
-    }
-
-    public Set<String> getResources() {
-      return _resources;
-    }
-
-    public Builder setResources(Set<String> resources) {
-      _resources = resources;
-      return this;
-    }
-
-    public Set<String> getExpectLiveInstances() {
-      return _expectLiveInstances;
-    }
-
-    public Builder setExpectLiveInstances(Set<String> expectLiveInstances) {
-      _expectLiveInstances = expectLiveInstances;
-      return this;
-    }
-
-    public String getZkAddr() {
-      return _zkAddr;
-    }
-
-    public Builder setZkAddr(String zkAddr) {
-      _zkAddr = zkAddr;
-      return this;
-    }
-
-    public ZkClient getZkClient() {
-      return _zkClient;
-    }
-
-    public Builder setZkClient(ZkClient zkClient) {
-      _zkClient = zkClient;
-      return this;
-    }
-  }
-
-  @Override
-  public boolean verify(long timeout) {
-    return verifyByZkCallback(timeout);
-  }
-
-  @Override
-  public boolean verifyByZkCallback(long timeout) {
-    List<ClusterVerifyTrigger> triggers = new ArrayList<ClusterVerifyTrigger>();
-
-    // setup triggers
-    if (_resources != null && !_resources.isEmpty()) {
-      for (String resource : _resources) {
-        triggers
-            .add(new ClusterVerifyTrigger(_keyBuilder.idealStates(resource), true, false, false));
-        triggers
-            .add(new ClusterVerifyTrigger(_keyBuilder.externalView(resource), true, false, false));
-      }
-
-    } else {
-      triggers.add(new ClusterVerifyTrigger(_keyBuilder.idealStates(), false, true, true));
-      triggers.add(new ClusterVerifyTrigger(_keyBuilder.externalViews(), false, true, true));
-    }
-
-    return verifyByCallback(timeout, triggers);
-  }
-
-  @Override
-  protected boolean verifyState() {
-    try {
-      PropertyKey.Builder keyBuilder = _accessor.keyBuilder();
-      // read cluster once and do verification
-      ClusterDataCache cache = new ClusterDataCache();
-      cache.refresh(_accessor);
-
-      Map<String, IdealState> idealStates = cache.getIdealStates();
-      if (idealStates == null) {
-        // ideal state is null because ideal state is dropped
-        idealStates = Collections.emptyMap();
-      }
-
-      // filter out all resources that use Task state model
-      Iterator<Map.Entry<String, IdealState>> it = idealStates.entrySet().iterator();
-      while (it.hasNext()) {
-        Map.Entry<String, IdealState> pair = it.next();
-        if (pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME)) {
-          it.remove();
-        }
-      }
-
-      // verify live instances.
-      if (_expectLiveInstances != null && !_expectLiveInstances.isEmpty()) {
-        Set<String> actualLiveNodes = cache.getLiveInstances().keySet();
-        if (!_expectLiveInstances.equals(actualLiveNodes)) {
-          return false;
-        }
-      }
-
-      Map<String, ExternalView> extViews = _accessor.getChildValuesMap(keyBuilder.externalViews());
-      if (extViews == null) {
-        extViews = Collections.emptyMap();
-      }
-
-      // Filter resources if requested
-      if (_resources != null && !_resources.isEmpty()) {
-        idealStates.keySet().retainAll(_resources);
-        extViews.keySet().retainAll(_resources);
-      }
-
-      // if externalView is not empty and idealState doesn't exist
-      // add empty idealState for the resource
-      for (String resource : extViews.keySet()) {
-        if (!idealStates.containsKey(resource)) {
-          idealStates.put(resource, new IdealState(resource));
-        }
-      }
-
-      for (String resourceName : idealStates.keySet()) {
-        ExternalView extView = extViews.get(resourceName);
-        IdealState idealState = idealStates.get(resourceName);
-        if (extView == null) {
-          if (idealState.isExternalViewDisabled()) {
-            continue;
-          } else {
-            LOG.debug("externalView for " + resourceName + " is not available");
-            return false;
-          }
-        }
-
-        boolean result = verifyExternalView(cache, extView, idealState);
-        if (!result) {
-          return false;
-        }
-      }
-      return true;
-    } catch (Exception e) {
-      LOG.error("exception in verification", e);
-      return false;
-    }
-  }
-
-  private boolean verifyExternalView(ClusterDataCache dataCache, ExternalView externalView,
-      IdealState idealState) {
-    Map<String, Map<String, String>> mappingInExtview = externalView.getRecord().getMapFields();
-    Map<String, Map<String, String>> idealPartitionState;
-
-    switch (idealState.getRebalanceMode()) {
-    case FULL_AUTO:
-    case SEMI_AUTO:
-    case USER_DEFINED:
-      idealPartitionState = computeIdealPartitionState(dataCache, idealState);
-      break;
-    case CUSTOMIZED:
-      idealPartitionState = idealState.getRecord().getMapFields();
-      break;
-    case TASK:
-      // ignore jobs
-    default:
-      return true;
-    }
-
-    return mappingInExtview.equals(idealPartitionState);
-  }
-
-  private Map<String, Map<String, String>> computeIdealPartitionState(ClusterDataCache cache,
-      IdealState idealState) {
-    String stateModelDefName = idealState.getStateModelDefRef();
-    StateModelDefinition stateModelDef = cache.getStateModelDef(stateModelDefName);
-
-    Map<String, Map<String, String>> idealPartitionState =
-        new HashMap<String, Map<String, String>>();
-
-    Set<String> liveEnabledInstances = new HashSet<String>(cache.getLiveInstances().keySet());
-    liveEnabledInstances.removeAll(cache.getDisabledInstances());
-
-    for (String partition : idealState.getPartitionSet()) {
-      List<String> preferenceList = ConstraintBasedAssignment
-          .getPreferenceList(new Partition(partition), idealState, liveEnabledInstances);
-      Map<String, String> idealMapping =
-          computeIdealMapping(preferenceList, stateModelDef, liveEnabledInstances);
-      idealPartitionState.put(partition, idealMapping);
-    }
-
-    return idealPartitionState;
-  }
-
-  /**
-   * compute the ideal mapping for resource in SEMI-AUTO based on its preference list
-   */
-  private Map<String, String> computeIdealMapping(List<String> instancePreferenceList,
-      StateModelDefinition stateModelDef, Set<String> liveEnabledInstances) {
-    Map<String, String> instanceStateMap = new HashMap<String, String>();
-
-    if (instancePreferenceList == null) {
-      return instanceStateMap;
-    }
-
-    List<String> statesPriorityList = stateModelDef.getStatesPriorityList();
-    boolean assigned[] = new boolean[instancePreferenceList.size()];
-
-    for (String state : statesPriorityList) {
-      String num = stateModelDef.getNumInstancesPerState(state);
-      int stateCount = -1;
-      if ("N".equals(num)) {
-        stateCount = liveEnabledInstances.size();
-      } else if ("R".equals(num)) {
-        stateCount = instancePreferenceList.size();
-      } else {
-        try {
-          stateCount = Integer.parseInt(num);
-        } catch (Exception e) {
-          LOG.error("Invalid count for state:" + state + " ,count=" + num);
-        }
-      }
-      if (stateCount > 0) {
-        int count = 0;
-        for (int i = 0; i < instancePreferenceList.size(); i++) {
-          String instanceName = instancePreferenceList.get(i);
-
-          if (!assigned[i]) {
-            instanceStateMap.put(instanceName, state);
-            count = count + 1;
-            assigned[i] = true;
-            if (count == stateCount) {
-              break;
-            }
-          }
-        }
-      }
-    }
-
-    return instanceStateMap;
-  }
-
-  @Override
-  public String toString() {
-    String verifierName = getClass().getSimpleName();
-    return verifierName + "(" + _clusterName + "@" + _zkClient.getServers() + "@resources["
-        + _resources != null ? Arrays.toString(_resources.toArray()) : "" + "])";
-  }
-}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/ZkHelixClusterVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/ZkHelixClusterVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/ZkHelixClusterVerifier.java
deleted file mode 100644
index 094deb8..0000000
--- a/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier/ZkHelixClusterVerifier.java
+++ /dev/null
@@ -1,269 +0,0 @@
-package org.apache.helix.tools.ClusterStateVerifier;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.I0Itec.zkclient.IZkChildListener;
-import org.I0Itec.zkclient.IZkDataListener;
-import org.apache.helix.HelixDataAccessor;
-import org.apache.helix.PropertyKey;
-import org.apache.helix.ZNRecord;
-import org.apache.helix.manager.zk.ZKHelixDataAccessor;
-import org.apache.helix.manager.zk.ZkBaseDataAccessor;
-import org.apache.helix.manager.zk.ZkClient;
-import org.apache.helix.util.ZKClientPool;
-import org.apache.log4j.Logger;
-
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-public abstract class ZkHelixClusterVerifier
-    implements IZkChildListener, IZkDataListener, HelixClusterVerifier {
-  private static Logger LOG = Logger.getLogger(ZkHelixClusterVerifier.class);
-  protected static int DEFAULT_TIMEOUT = 30 * 1000;
-  protected static int DEFAULT_PERIOD = 1000;
-
-
-  protected final ZkClient _zkClient;
-  protected final String _clusterName;
-  protected final HelixDataAccessor _accessor;
-  protected final PropertyKey.Builder _keyBuilder;
-  private CountDownLatch _countdown;
-
-  protected static class ClusterVerifyTrigger {
-    final PropertyKey _triggerKey;
-    final boolean _triggerOnDataChange;
-    final boolean _triggerOnChildChange;
-    final boolean _triggerOnChildDataChange;
-
-    public ClusterVerifyTrigger(PropertyKey triggerKey, boolean triggerOnDataChange,
-        boolean triggerOnChildChange, boolean triggerOnChildDataChange) {
-      _triggerKey = triggerKey;
-      _triggerOnDataChange = triggerOnDataChange;
-      _triggerOnChildChange = triggerOnChildChange;
-      _triggerOnChildDataChange = triggerOnChildDataChange;
-    }
-
-    public boolean isTriggerOnDataChange() {
-      return _triggerOnDataChange;
-    }
-
-    public PropertyKey getTriggerKey() {
-      return _triggerKey;
-    }
-
-    public boolean isTriggerOnChildChange() {
-      return _triggerOnChildChange;
-    }
-
-    public boolean isTriggerOnChildDataChange() {
-      return _triggerOnChildDataChange;
-    }
-  }
-
-  public ZkHelixClusterVerifier(ZkClient zkClient, String clusterName) {
-    if (zkClient == null || clusterName == null) {
-      throw new IllegalArgumentException("requires zkClient|clusterName");
-    }
-    _zkClient = zkClient;
-    _clusterName = clusterName;
-    _accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient));
-    _keyBuilder = _accessor.keyBuilder();
-  }
-
-  public ZkHelixClusterVerifier(String zkAddr, String clusterName) {
-    if (zkAddr == null || clusterName == null) {
-      throw new IllegalArgumentException("requires zkAddr|clusterName");
-    }
-    _zkClient = ZKClientPool.getZkClient(zkAddr);
-    _clusterName = clusterName;
-    _accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient));
-    _keyBuilder = _accessor.keyBuilder();
-  }
-
-  /**
-   *  Verify the cluster.
-   *  The method will be blocked at most {@code timeout}.
-   *  Return true if the verify succeed, otherwise return false.
-   *
-   * @param timeout in milliseconds
-   * @return true if succeed, false if not.
-   */
-  public boolean verify(long timeout) {
-    return verifyByZkCallback(timeout);
-  }
-
-  /**
-   *  Verify the cluster.
-   *  The method will be blocked at most 30 seconds.
-   *  Return true if the verify succeed, otherwise return false.
-   *
-   * @return true if succeed, false if not.
-   */
-  public boolean verify() {
-    return verify(DEFAULT_TIMEOUT);
-  }
-
-  /**
-   *  Verify the cluster by relying on zookeeper callback and verify.
-   *  The method will be blocked at most {@code timeout}.
-   *  Return true if the verify succeed, otherwise return false.
-   *
-   * @param timeout in milliseconds
-   * @return true if succeed, false if not.
-   */
-  public abstract boolean verifyByZkCallback(long timeout);
-
-  /**
-   *  Verify the cluster by relying on zookeeper callback and verify.
-   *  The method will be blocked at most 30 seconds.
-   *  Return true if the verify succeed, otherwise return false.
-   *
-   * @return true if succeed, false if not.
-   */
-  public boolean verifyByZkCallback() {
-    return verifyByZkCallback(DEFAULT_TIMEOUT);
-  }
-
-  /**
-   *  Verify the cluster by periodically polling the cluster status and verify.
-   *  The method will be blocked at most {@code timeout}.
-   *  Return true if the verify succeed, otherwise return false.
-   *
-   * @param timeout
-   * @param period polling interval
-   * @return
-   */
-  public boolean verifyByPolling(long timeout, long period) {
-    try {
-      long start = System.currentTimeMillis();
-      boolean success;
-      do {
-        success = verifyState();
-        if (success) {
-          return true;
-        }
-        TimeUnit.MILLISECONDS.sleep(period);
-      } while ((System.currentTimeMillis() - start) <= timeout);
-    } catch (Exception e) {
-      LOG.error("Exception in verifier", e);
-    }
-    return false;
-  }
-
-  /**
-   *  Verify the cluster by periodically polling the cluster status and verify.
-   *  The method will be blocked at most 30 seconds.
-   *  Return true if the verify succeed, otherwise return false.
-   *
-   * @return true if succeed, false if not.
-   */
-  public boolean verifyByPolling() {
-    return verifyByPolling(DEFAULT_TIMEOUT, DEFAULT_PERIOD);
-  }
-
-  protected boolean verifyByCallback(long timeout, List<ClusterVerifyTrigger> triggers) {
-    _countdown = new CountDownLatch(1);
-
-    for (ClusterVerifyTrigger trigger : triggers) {
-      subscribeTrigger(trigger);
-    }
-
-    boolean success = false;
-    try {
-      success = verifyState();
-      if (!success) {
-
-        success = _countdown.await(timeout, TimeUnit.MILLISECONDS);
-        if (!success) {
-          // make a final try if timeout
-          success = verifyState();
-        }
-      }
-    } catch (Exception e) {
-      LOG.error("Exception in verifier", e);
-    }
-
-    // clean up
-    _zkClient.unsubscribeAll();
-
-    return success;
-  }
-
-  private void subscribeTrigger(ClusterVerifyTrigger trigger) {
-    String path = trigger.getTriggerKey().getPath();
-    if (trigger.isTriggerOnDataChange()) {
-      _zkClient.subscribeDataChanges(path, this);
-    }
-
-    if (trigger.isTriggerOnChildChange()) {
-      _zkClient.subscribeChildChanges(path, this);
-    }
-
-    if (trigger.isTriggerOnChildDataChange()) {
-      List<String> childs = _zkClient.getChildren(path);
-      for (String child : childs) {
-        String childPath = String.format("%s/%s", path, child);
-        _zkClient.subscribeDataChanges(childPath, this);
-      }
-    }
-  }
-
-  /**
-   * The method actually performs the required verifications.
-   * @return
-   * @throws Exception
-   */
-  protected abstract boolean verifyState() throws Exception;
-
-  @Override
-  public void handleDataChange(String dataPath, Object data) throws Exception {
-    boolean success = verifyState();
-    if (success) {
-      _countdown.countDown();
-    }
-  }
-
-  @Override
-  public void handleDataDeleted(String dataPath) throws Exception {
-    _zkClient.unsubscribeDataChanges(dataPath, this);
-  }
-
-  @Override
-  public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
-    for (String child : currentChilds) {
-      String childPath = String.format("%s/%s", parentPath, child);
-      _zkClient.subscribeDataChanges(childPath, this);
-    }
-
-    boolean success = verifyState();
-    if (success) {
-      _countdown.countDown();
-    }
-  }
-
-  public ZkClient getZkClient() {
-    return _zkClient;
-  }
-
-  public String getClusterName() {
-    return _clusterName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifier.java
new file mode 100644
index 0000000..9341c8f
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifier.java
@@ -0,0 +1,33 @@
+package org.apache.helix.tools;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.helix.manager.zk.ZkClient;
+
+
+/**
+ * Please use implementations of HelixClusterVerifier (BestPossibleExternalViewVerifier, StrictMatchExternalViewVerifier, etc in tools.ClusterVerifiers).
+ */
+@Deprecated
+public abstract class ClusterVerifier extends org.apache.helix.tools.ClusterVerifiers.ClusterVerifier {
+  public ClusterVerifier(ZkClient zkclient, String clusterName) {
+    super(zkclient, clusterName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/BestPossibleExternalViewVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/BestPossibleExternalViewVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/BestPossibleExternalViewVerifier.java
new file mode 100644
index 0000000..6c79bed
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/BestPossibleExternalViewVerifier.java
@@ -0,0 +1,376 @@
+package org.apache.helix.tools.ClusterVerifiers;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.helix.HelixDefinedState;
+import org.apache.helix.HelixException;
+import org.apache.helix.PropertyKey;
+import org.apache.helix.controller.pipeline.Stage;
+import org.apache.helix.controller.pipeline.StageContext;
+import org.apache.helix.controller.stages.AttributeName;
+import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
+import org.apache.helix.controller.stages.BestPossibleStateOutput;
+import org.apache.helix.controller.stages.ClusterDataCache;
+import org.apache.helix.controller.stages.ClusterEvent;
+import org.apache.helix.controller.stages.CurrentStateComputationStage;
+import org.apache.helix.controller.stages.ResourceComputationStage;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.model.ExternalView;
+import org.apache.helix.model.IdealState;
+import org.apache.helix.model.Partition;
+import org.apache.helix.model.StateModelDefinition;
+import org.apache.helix.task.TaskConstants;
+import org.apache.log4j.Logger;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * verifier that the ExternalViews of given resources (or all resources in the cluster)
+ * match its best possible mapping states.
+ */
+public class BestPossibleExternalViewVerifier extends ZkHelixClusterVerifier {
+  private static Logger LOG = Logger.getLogger(BestPossibleExternalViewVerifier.class);
+
+  private final Map<String, Map<String, String>> _errStates;
+  private final Set<String> _resources;
+  private final Set<String> _expectLiveInstances;
+
+  public BestPossibleExternalViewVerifier(String zkAddr, String clusterName, Set<String> resources,
+      Map<String, Map<String, String>> errStates, Set<String> expectLiveInstances) {
+    super(zkAddr, clusterName);
+    _errStates = errStates;
+    _resources = resources;
+    _expectLiveInstances = expectLiveInstances;
+  }
+
+  public BestPossibleExternalViewVerifier(ZkClient zkClient, String clusterName,
+      Set<String> resources, Map<String, Map<String, String>> errStates,
+      Set<String> expectLiveInstances) {
+    super(zkClient, clusterName);
+    _errStates = errStates;
+    _resources = resources;
+    _expectLiveInstances = expectLiveInstances;
+  }
+
+  public static class Builder {
+    private String _clusterName;
+    private Map<String, Map<String, String>> _errStates;
+    private Set<String> _resources;
+    private Set<String> _expectLiveInstances;
+    private String _zkAddr;
+    private ZkClient _zkClient;
+
+    public Builder(String clusterName) {
+      _clusterName = clusterName;
+    }
+
+    public BestPossibleExternalViewVerifier build() {
+      if (_clusterName == null || (_zkAddr == null && _zkClient == null)) {
+        throw new IllegalArgumentException("Cluster name or zookeeper info is missing!");
+      }
+
+      if (_zkClient != null) {
+        return new BestPossibleExternalViewVerifier(_zkClient, _clusterName, _resources, _errStates,
+            _expectLiveInstances);
+      }
+      return new BestPossibleExternalViewVerifier(_zkAddr, _clusterName, _resources, _errStates,
+          _expectLiveInstances);
+    }
+
+    public String getClusterName() {
+      return _clusterName;
+    }
+
+    public Map<String, Map<String, String>> getErrStates() {
+      return _errStates;
+    }
+
+    public Builder setErrStates(Map<String, Map<String, String>> errStates) {
+      _errStates = errStates;
+      return this;
+    }
+
+    public Set<String> getResources() {
+      return _resources;
+    }
+
+    public Builder setResources(Set<String> resources) {
+      _resources = resources;
+      return this;
+    }
+
+    public Set<String> getExpectLiveInstances() {
+      return _expectLiveInstances;
+    }
+
+    public Builder setExpectLiveInstances(Set<String> expectLiveInstances) {
+      _expectLiveInstances = expectLiveInstances;
+      return this;
+    }
+
+    public String getZkAddr() {
+      return _zkAddr;
+    }
+
+    public Builder setZkAddr(String zkAddr) {
+      _zkAddr = zkAddr;
+      return this;
+    }
+
+    public ZkClient getZkClient() {
+      return _zkClient;
+    }
+
+    public Builder setZkClient(ZkClient zkClient) {
+      _zkClient = zkClient;
+      return this;
+    }
+  }
+
+  @Override
+  public boolean verify(long timeout) {
+    return verifyByZkCallback(timeout);
+  }
+
+  @Override
+  public boolean verifyByZkCallback(long timeout) {
+    List<ClusterVerifyTrigger> triggers = new ArrayList<ClusterVerifyTrigger>();
+
+    // setup triggers
+    if (_resources != null && !_resources.isEmpty()) {
+      for (String resource : _resources) {
+        triggers
+            .add(new ClusterVerifyTrigger(_keyBuilder.idealStates(resource), true, false, false));
+        triggers
+            .add(new ClusterVerifyTrigger(_keyBuilder.externalView(resource), true, false, false));
+      }
+
+    } else {
+      triggers.add(new ClusterVerifyTrigger(_keyBuilder.idealStates(), false, true, true));
+      triggers.add(new ClusterVerifyTrigger(_keyBuilder.externalViews(), false, true, true));
+    }
+
+    return verifyByCallback(timeout, triggers);
+  }
+
+  @Override
+  protected boolean verifyState() {
+    try {
+      PropertyKey.Builder keyBuilder = _accessor.keyBuilder();
+      // read cluster once and do verification
+      ClusterDataCache cache = new ClusterDataCache();
+      cache.refresh(_accessor);
+
+      Map<String, IdealState> idealStates = cache.getIdealStates();
+      if (idealStates == null) {
+        // ideal state is null because ideal state is dropped
+        idealStates = Collections.emptyMap();
+      }
+
+      // filter out all resources that use Task state model
+      Iterator<Map.Entry<String, IdealState>> it = idealStates.entrySet().iterator();
+      while (it.hasNext()) {
+        Map.Entry<String, IdealState> pair = it.next();
+        if (pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME)) {
+          it.remove();
+        }
+      }
+
+      // verify live instances.
+      if (_expectLiveInstances != null && !_expectLiveInstances.isEmpty()) {
+        Set<String> actualLiveNodes = cache.getLiveInstances().keySet();
+        if (!_expectLiveInstances.equals(actualLiveNodes)) {
+          return false;
+        }
+      }
+
+      Map<String, ExternalView> extViews = _accessor.getChildValuesMap(keyBuilder.externalViews());
+      if (extViews == null) {
+        extViews = Collections.emptyMap();
+      }
+
+      // Filter resources if requested
+      if (_resources != null && !_resources.isEmpty()) {
+        idealStates.keySet().retainAll(_resources);
+        extViews.keySet().retainAll(_resources);
+      }
+
+      // if externalView is not empty and idealState doesn't exist
+      // add empty idealState for the resource
+      for (String resource : extViews.keySet()) {
+        if (!idealStates.containsKey(resource)) {
+          idealStates.put(resource, new IdealState(resource));
+        }
+      }
+
+      // calculate best possible state
+      BestPossibleStateOutput bestPossOutput = calcBestPossState(cache);
+      Map<String, Map<Partition, Map<String, String>>> bestPossStateMap =
+          bestPossOutput.getStateMap();
+
+      // set error states
+      if (_errStates != null) {
+        for (String resourceName : _errStates.keySet()) {
+          Map<String, String> partErrStates = _errStates.get(resourceName);
+          for (String partitionName : partErrStates.keySet()) {
+            String instanceName = partErrStates.get(partitionName);
+
+            if (!bestPossStateMap.containsKey(resourceName)) {
+              bestPossStateMap.put(resourceName, new HashMap<Partition, Map<String, String>>());
+            }
+            Partition partition = new Partition(partitionName);
+            if (!bestPossStateMap.get(resourceName).containsKey(partition)) {
+              bestPossStateMap.get(resourceName).put(partition, new HashMap<String, String>());
+            }
+            bestPossStateMap.get(resourceName).get(partition)
+                .put(instanceName, HelixDefinedState.ERROR.toString());
+          }
+        }
+      }
+
+      for (String resourceName : idealStates.keySet()) {
+        ExternalView extView = extViews.get(resourceName);
+        IdealState is = idealStates.get(resourceName);
+        if (extView == null) {
+          if (is.isExternalViewDisabled()) {
+            continue;
+          } else {
+            LOG.debug("externalView for " + resourceName + " is not available");
+            return false;
+          }
+        }
+
+        // step 0: remove empty map and DROPPED state from best possible state
+        Map<Partition, Map<String, String>> bpStateMap =
+            bestPossOutput.getResourceMap(resourceName);
+
+        StateModelDefinition stateModelDef = cache.getStateModelDef(is.getStateModelDefRef());
+        if (stateModelDef == null) {
+          throw new HelixException(
+              "State model definition " + is.getStateModelDefRef() + " for resource not found!" + is
+                  .getResourceName());
+        }
+
+        boolean result = verifyExternalView(is, extView, bpStateMap, stateModelDef);
+        if (!result) {
+          LOG.debug("verifyExternalView fails! ExternalView: " + extView + " BestPossibleState: "
+              + bpStateMap);
+          return false;
+        }
+      }
+      return true;
+    } catch (Exception e) {
+      LOG.error("exception in verification", e);
+      return false;
+    }
+  }
+
+  private boolean verifyExternalView(IdealState idealState, ExternalView externalView,
+      Map<Partition, Map<String, String>> bestPossibleState, StateModelDefinition stateModelDef) {
+    Set<String> ignoreStaes = new HashSet<String>(
+        Arrays.asList(stateModelDef.getInitialState(), HelixDefinedState.DROPPED.toString()));
+
+    Map<String, Map<String, String>> bestPossibleStateMap =
+        convertBestPossibleState(bestPossibleState);
+    removeEntryWithIgnoredStates(bestPossibleStateMap.entrySet().iterator(), ignoreStaes);
+
+    Map<String, Map<String, String>> externalViewMap = externalView.getRecord().getMapFields();
+    removeEntryWithIgnoredStates(externalViewMap.entrySet().iterator(), ignoreStaes);
+
+    return externalViewMap.equals(bestPossibleStateMap);
+  }
+
+  private void removeEntryWithIgnoredStates(
+      Iterator<Map.Entry<String, Map<String, String>>> partitionInstanceStateMapIter,
+      Set<String> ignoredStates) {
+    while (partitionInstanceStateMapIter.hasNext()) {
+      Map.Entry<String, Map<String, String>> entry = partitionInstanceStateMapIter.next();
+      Map<String, String> instanceStateMap = entry.getValue();
+      if (instanceStateMap.isEmpty()) {
+        partitionInstanceStateMapIter.remove();
+      } else {
+        // remove instances with DROPPED and OFFLINE state
+        Iterator<Map.Entry<String, String>> insIter = instanceStateMap.entrySet().iterator();
+        while (insIter.hasNext()) {
+          String state = insIter.next().getValue();
+          if (ignoredStates.contains(state)) {
+            insIter.remove();
+          }
+        }
+      }
+    }
+  }
+
+  private Map<String, Map<String, String>> convertBestPossibleState(
+      Map<Partition, Map<String, String>> bestPossibleState) {
+    Map<String, Map<String, String>> result = new HashMap<String, Map<String, String>>();
+    for (Partition partition : bestPossibleState.keySet()) {
+      result.put(partition.getPartitionName(), bestPossibleState.get(partition));
+    }
+    return result;
+  }
+
+  /**
+   * calculate the best possible state note that DROPPED states are not checked since when
+   * kick off the BestPossibleStateCalcStage we are providing an empty current state map
+   *
+   * @param cache
+   * @return
+   * @throws Exception
+   */
+  private BestPossibleStateOutput calcBestPossState(ClusterDataCache cache) throws Exception {
+    ClusterEvent event = new ClusterEvent("sampleEvent");
+    event.addAttribute("ClusterDataCache", cache);
+
+    runStage(event, new ResourceComputationStage());
+    runStage(event, new CurrentStateComputationStage());
+
+    // TODO: be caution here, should be handled statelessly.
+    runStage(event, new BestPossibleStateCalcStage());
+
+    BestPossibleStateOutput output =
+        event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString());
+
+    return output;
+  }
+
+  private void runStage(ClusterEvent event, Stage stage) throws Exception {
+    StageContext context = new StageContext();
+    stage.init(context);
+    stage.preProcess();
+    stage.process(event);
+    stage.postProcess();
+  }
+
+  @Override
+  public String toString() {
+    String verifierName = getClass().getSimpleName();
+    return verifierName + "(" + _clusterName + "@" + _zkClient + "@resources["
+       + (_resources != null ? Arrays.toString(_resources.toArray()) : "") + "])";
+  }
+}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterExternalViewVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterExternalViewVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterExternalViewVerifier.java
new file mode 100644
index 0000000..fa697c4
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterExternalViewVerifier.java
@@ -0,0 +1,175 @@
+package org.apache.helix.tools.ClusterVerifiers;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.helix.controller.pipeline.Stage;
+import org.apache.helix.controller.pipeline.StageContext;
+import org.apache.helix.controller.stages.AttributeName;
+import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
+import org.apache.helix.controller.stages.BestPossibleStateOutput;
+import org.apache.helix.controller.stages.ClusterDataCache;
+import org.apache.helix.controller.stages.ClusterEvent;
+import org.apache.helix.controller.stages.CurrentStateComputationStage;
+import org.apache.helix.controller.stages.ResourceComputationStage;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.model.ExternalView;
+import org.apache.helix.model.Partition;
+import org.apache.log4j.Logger;
+
+/**
+ * given zk, cluster, and a list of expected live-instances
+ * check whether cluster's external-view reaches best-possible states
+ */
+
+/**
+ * This class is deprecated, please use BestPossibleExternalViewVerifier instead.
+ */
+@Deprecated
+public class ClusterExternalViewVerifier extends ClusterVerifier {
+  private static Logger LOG = Logger.getLogger(ClusterExternalViewVerifier.class);
+
+  final List<String> _expectSortedLiveNodes; // always sorted
+
+  public ClusterExternalViewVerifier(ZkClient zkclient, String clusterName,
+      List<String> expectLiveNodes) {
+    super(zkclient, clusterName);
+    _expectSortedLiveNodes = expectLiveNodes;
+    Collections.sort(_expectSortedLiveNodes);
+  }
+
+  boolean verifyLiveNodes(List<String> actualLiveNodes) {
+    Collections.sort(actualLiveNodes);
+    return _expectSortedLiveNodes.equals(actualLiveNodes);
+  }
+
+  /**
+   * @param externalView
+   * @param bestPossibleState map of partition to map of instance to state
+   * @return
+   */
+  boolean verifyExternalView(ExternalView externalView,
+      Map<Partition, Map<String, String>> bestPossibleState) {
+    Map<String, Map<String, String>> bestPossibleStateMap =
+        convertBestPossibleState(bestPossibleState);
+    // trimBestPossibleState(bestPossibleStateMap);
+
+    Map<String, Map<String, String>> externalViewMap = externalView.getRecord().getMapFields();
+    return externalViewMap.equals(bestPossibleStateMap);
+  }
+
+  static void runStage(ClusterEvent event, Stage stage) throws Exception {
+    StageContext context = new StageContext();
+    stage.init(context);
+    stage.preProcess();
+    stage.process(event);
+    stage.postProcess();
+  }
+
+  BestPossibleStateOutput calculateBestPossibleState(ClusterDataCache cache) throws Exception {
+    ClusterEvent event = new ClusterEvent("event");
+    event.addAttribute("ClusterDataCache", cache);
+
+    List<Stage> stages = new ArrayList<Stage>();
+    stages.add(new ResourceComputationStage());
+    stages.add(new CurrentStateComputationStage());
+    stages.add(new BestPossibleStateCalcStage());
+
+    for (Stage stage : stages) {
+      runStage(event, stage);
+    }
+
+    return event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString());
+  }
+
+  /**
+   * remove empty map and DROPPED state from best possible state
+   * @param bestPossibleState
+   */
+  // static void trimBestPossibleState(Map<String, Map<String, String>> bestPossibleState) {
+  // Iterator<Entry<String, Map<String, String>>> iter = bestPossibleState.entrySet().iterator();
+  // while (iter.hasNext()) {
+  // Map.Entry<String, Map<String, String>> entry = iter.next();
+  // Map<String, String> instanceStateMap = entry.getValue();
+  // if (instanceStateMap.isEmpty()) {
+  // iter.remove();
+  // } else {
+  // // remove instances with DROPPED state
+  // Iterator<Map.Entry<String, String>> insIter = instanceStateMap.entrySet().iterator();
+  // while (insIter.hasNext()) {
+  // Map.Entry<String, String> insEntry = insIter.next();
+  // String state = insEntry.getValue();
+  // if (state.equalsIgnoreCase(HelixDefinedState.DROPPED.toString())) {
+  // insIter.remove();
+  // }
+  // }
+  // }
+  // }
+  // }
+
+  static Map<String, Map<String, String>> convertBestPossibleState(
+      Map<Partition, Map<String, String>> bestPossibleState) {
+    Map<String, Map<String, String>> result = new HashMap<String, Map<String, String>>();
+    for (Partition partition : bestPossibleState.keySet()) {
+      result.put(partition.getPartitionName(), bestPossibleState.get(partition));
+    }
+    return result;
+  }
+
+  @Override
+  public boolean verify() throws Exception {
+    ClusterDataCache cache = new ClusterDataCache();
+    cache.refresh(_accessor);
+
+    List<String> liveInstances = new ArrayList<String>();
+    liveInstances.addAll(cache.getLiveInstances().keySet());
+    boolean success = verifyLiveNodes(liveInstances);
+    if (!success) {
+      LOG.info("liveNodes not match, expect: " + _expectSortedLiveNodes + ", actual: "
+          + liveInstances);
+      return false;
+    }
+
+    BestPossibleStateOutput bestPossbileStates = calculateBestPossibleState(cache);
+    Map<String, ExternalView> externalViews =
+        _accessor.getChildValuesMap(_keyBuilder.externalViews());
+
+    // TODO all ideal-states should be included in external-views
+
+    for (String resourceName : externalViews.keySet()) {
+      ExternalView externalView = externalViews.get(resourceName);
+      Map<Partition, Map<String, String>> bestPossbileState =
+          bestPossbileStates.getResourceMap(resourceName);
+      success = verifyExternalView(externalView, bestPossbileState);
+      if (!success) {
+        LOG.info("external-view for resource: " + resourceName + " not match");
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterLiveNodesVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterLiveNodesVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterLiveNodesVerifier.java
new file mode 100644
index 0000000..2a71566
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterLiveNodesVerifier.java
@@ -0,0 +1,54 @@
+package org.apache.helix.tools.ClusterVerifiers;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.helix.manager.zk.ZkClient;
+
+public class ClusterLiveNodesVerifier extends ZkHelixClusterVerifier {
+
+  final Set<String> _expectLiveNodes;
+
+  public ClusterLiveNodesVerifier(ZkClient zkclient, String clusterName,
+      List<String> expectLiveNodes) {
+    super(zkclient, clusterName);
+    _expectLiveNodes = new HashSet<String>(expectLiveNodes);
+  }
+
+  @Override
+  public boolean verifyByZkCallback(long timeout) {
+    List<ClusterVerifyTrigger> triggers = new ArrayList<ClusterVerifyTrigger>();
+    triggers.add(new ClusterVerifyTrigger(_keyBuilder.liveInstances(), false, true, true));
+
+    return verifyByCallback(timeout, triggers);
+  }
+
+  @Override
+  protected boolean verifyState() throws Exception {
+    Set<String> actualLiveNodes =
+        new HashSet<String>(_accessor.getChildNames(_keyBuilder.liveInstances()));
+    return _expectLiveNodes.equals(actualLiveNodes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterStateVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterStateVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterStateVerifier.java
new file mode 100644
index 0000000..d2a2d09
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterStateVerifier.java
@@ -0,0 +1,739 @@
+package org.apache.helix.tools.ClusterVerifiers;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.I0Itec.zkclient.IZkChildListener;
+import org.I0Itec.zkclient.IZkDataListener;
+import org.I0Itec.zkclient.exception.ZkNodeExistsException;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixDefinedState;
+import org.apache.helix.PropertyKey;
+import org.apache.helix.PropertyKey.Builder;
+import org.apache.helix.PropertyPathBuilder;
+import org.apache.helix.PropertyType;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.controller.pipeline.Stage;
+import org.apache.helix.controller.pipeline.StageContext;
+import org.apache.helix.controller.stages.AttributeName;
+import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
+import org.apache.helix.controller.stages.BestPossibleStateOutput;
+import org.apache.helix.controller.stages.ClusterDataCache;
+import org.apache.helix.controller.stages.ClusterEvent;
+import org.apache.helix.controller.stages.CurrentStateComputationStage;
+import org.apache.helix.controller.stages.ResourceComputationStage;
+import org.apache.helix.manager.zk.ZKHelixDataAccessor;
+import org.apache.helix.manager.zk.ZkBaseDataAccessor;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.helix.model.ExternalView;
+import org.apache.helix.model.IdealState;
+import org.apache.helix.model.Partition;
+import org.apache.helix.model.Resource;
+import org.apache.helix.task.TaskConstants;
+import org.apache.helix.tools.ClusterSetup;
+import org.apache.helix.util.ZKClientPool;
+import org.apache.log4j.Logger;
+
+import com.google.common.collect.Sets;
+
+/**
+ * This class is deprecated, please use dedicated verifier classes, such as BestPossibleExternViewVerifier, etc.
+ */
+@Deprecated
+public class ClusterStateVerifier {
+  public static String cluster = "cluster";
+  public static String zkServerAddress = "zkSvr";
+  public static String help = "help";
+  public static String timeout = "timeout";
+  public static String period = "period";
+  public static String resources = "resources";
+
+  private static Logger LOG = Logger.getLogger(ClusterStateVerifier.class);
+
+  public interface Verifier {
+    boolean verify();
+  }
+
+  public interface ZkVerifier extends Verifier {
+    ZkClient getZkClient();
+
+    String getClusterName();
+  }
+
+  /** Use BestPossibleExternViewVerifier instead */
+  @Deprecated
+  static class ExtViewVeriferZkListener implements IZkChildListener, IZkDataListener {
+    final CountDownLatch _countDown;
+    final ZkClient _zkClient;
+    final Verifier _verifier;
+
+    public ExtViewVeriferZkListener(CountDownLatch countDown, ZkClient zkClient, ZkVerifier verifier) {
+      _countDown = countDown;
+      _zkClient = zkClient;
+      _verifier = verifier;
+    }
+
+    @Override
+    public void handleDataChange(String dataPath, Object data) throws Exception {
+      boolean result = _verifier.verify();
+      if (result == true) {
+        _countDown.countDown();
+      }
+    }
+
+    @Override
+    public void handleDataDeleted(String dataPath) throws Exception {
+      // TODO Auto-generated method stub
+
+    }
+
+    @Override
+    public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
+      for (String child : currentChilds) {
+        String childPath = parentPath.equals("/") ? parentPath + child : parentPath + "/" + child;
+        _zkClient.subscribeDataChanges(childPath, this);
+      }
+
+      boolean result = _verifier.verify();
+      if (result == true) {
+        _countDown.countDown();
+      }
+    }
+  }
+
+  private static ZkClient validateAndGetClient(String zkAddr, String clusterName) {
+    if (zkAddr == null || clusterName == null) {
+      throw new IllegalArgumentException("requires zkAddr|clusterName");
+    }
+    return ZKClientPool.getZkClient(zkAddr);
+  }
+
+  public static class BestPossAndExtViewZkVerifier implements ZkVerifier {
+    private final String clusterName;
+    private final Map<String, Map<String, String>> errStates;
+    private final ZkClient zkClient;
+    private final Set<String> resources;
+
+    public BestPossAndExtViewZkVerifier(String zkAddr, String clusterName) {
+      this(zkAddr, clusterName, null);
+    }
+
+    public BestPossAndExtViewZkVerifier(String zkAddr, String clusterName,
+        Map<String, Map<String, String>> errStates) {
+      this(zkAddr, clusterName, errStates, null);
+    }
+
+    public BestPossAndExtViewZkVerifier(String zkAddr, String clusterName,
+        Map<String, Map<String, String>> errStates, Set<String> resources) {
+      this(validateAndGetClient(zkAddr, clusterName), clusterName, errStates, resources);
+    }
+
+    public BestPossAndExtViewZkVerifier(ZkClient zkClient, String clusterName,
+        Map<String, Map<String, String>> errStates, Set<String> resources) {
+      if (zkClient == null || clusterName == null) {
+        throw new IllegalArgumentException("requires zkClient|clusterName");
+      }
+      this.clusterName = clusterName;
+      this.errStates = errStates;
+      this.zkClient = zkClient;
+      this.resources = resources;
+    }
+
+    @Override
+    public boolean verify() {
+      try {
+        HelixDataAccessor accessor =
+            new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
+
+        return verifyBestPossAndExtView(accessor, errStates, clusterName, resources);
+      } catch (Exception e) {
+        LOG.error("exception in verification", e);
+      }
+      return false;
+    }
+
+    private boolean verifyBestPossAndExtView(HelixDataAccessor accessor,
+        Map<String, Map<String, String>> errStates, String clusterName, Set<String> resources) {
+      try {
+        PropertyKey.Builder keyBuilder = accessor.keyBuilder();
+        // read cluster once and do verification
+        ClusterDataCache cache = new ClusterDataCache();
+        cache.refresh(accessor);
+
+        Map<String, IdealState> idealStates = cache.getIdealStates();
+        if (idealStates == null) {
+          // ideal state is null because ideal state is dropped
+          idealStates = Collections.emptyMap();
+        }
+
+        // filter out all resources that use Task state model
+        Iterator<Map.Entry<String, IdealState>> it = idealStates.entrySet().iterator();
+        while (it.hasNext()) {
+          Map.Entry<String, IdealState> pair = it.next();
+          if (pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME)) {
+            it.remove();
+          }
+        }
+
+        Map<String, ExternalView> extViews = accessor.getChildValuesMap(keyBuilder.externalViews());
+        if (extViews == null) {
+          extViews = Collections.emptyMap();
+        }
+
+        // Filter resources if requested
+        if (resources != null && !resources.isEmpty()) {
+          idealStates.keySet().retainAll(resources);
+          extViews.keySet().retainAll(resources);
+        }
+
+        // if externalView is not empty and idealState doesn't exist
+        // add empty idealState for the resource
+        for (String resource : extViews.keySet()) {
+          if (!idealStates.containsKey(resource)) {
+            idealStates.put(resource, new IdealState(resource));
+          }
+        }
+
+        // calculate best possible state
+        BestPossibleStateOutput bestPossOutput = calcBestPossState(cache, resources);
+        Map<String, Map<Partition, Map<String, String>>> bestPossStateMap =
+            bestPossOutput.getStateMap();
+
+        // set error states
+        if (errStates != null) {
+          for (String resourceName : errStates.keySet()) {
+            Map<String, String> partErrStates = errStates.get(resourceName);
+            for (String partitionName : partErrStates.keySet()) {
+              String instanceName = partErrStates.get(partitionName);
+
+              if (!bestPossStateMap.containsKey(resourceName)) {
+                bestPossStateMap.put(resourceName, new HashMap<Partition, Map<String, String>>());
+              }
+              Partition partition = new Partition(partitionName);
+              if (!bestPossStateMap.get(resourceName).containsKey(partition)) {
+                bestPossStateMap.get(resourceName).put(partition, new HashMap<String, String>());
+              }
+              bestPossStateMap.get(resourceName).get(partition)
+                  .put(instanceName, HelixDefinedState.ERROR.toString());
+            }
+          }
+        }
+
+        // System.out.println("stateMap: " + bestPossStateMap);
+
+        for (String resourceName : idealStates.keySet()) {
+          ExternalView extView = extViews.get(resourceName);
+          if (extView == null) {
+            IdealState is = idealStates.get(resourceName);
+            if (is.isExternalViewDisabled()) {
+              continue;
+            } else {
+              LOG.info("externalView for " + resourceName + " is not available");
+              return false;
+            }
+          }
+
+          // step 0: remove empty map and DROPPED state from best possible state
+          Map<Partition, Map<String, String>> bpStateMap =
+              bestPossOutput.getResourceMap(resourceName);
+          Iterator<Map.Entry<Partition, Map<String, String>>> iter = bpStateMap.entrySet().iterator();
+          while (iter.hasNext()) {
+            Map.Entry<Partition, Map<String, String>> entry = iter.next();
+            Map<String, String> instanceStateMap = entry.getValue();
+            if (instanceStateMap.isEmpty()) {
+              iter.remove();
+            } else {
+              // remove instances with DROPPED state
+              Iterator<Map.Entry<String, String>> insIter = instanceStateMap.entrySet().iterator();
+              while (insIter.hasNext()) {
+                Map.Entry<String, String> insEntry = insIter.next();
+                String state = insEntry.getValue();
+                if (state.equalsIgnoreCase(HelixDefinedState.DROPPED.toString())) {
+                  insIter.remove();
+                }
+              }
+            }
+          }
+
+          // System.err.println("resource: " + resourceName + ", bpStateMap: " + bpStateMap);
+
+          // step 1: externalView and bestPossibleState has equal size
+          int extViewSize = extView.getRecord().getMapFields().size();
+          int bestPossStateSize = bestPossOutput.getResourceMap(resourceName).size();
+          if (extViewSize != bestPossStateSize) {
+            LOG.info("exterView size (" + extViewSize + ") is different from bestPossState size ("
+                + bestPossStateSize + ") for resource: " + resourceName);
+
+            // System.err.println("exterView size (" + extViewSize
+            // + ") is different from bestPossState size (" + bestPossStateSize
+            // + ") for resource: " + resourceName);
+            // System.out.println("extView: " + extView.getRecord().getMapFields());
+            // System.out.println("bestPossState: " +
+            // bestPossOutput.getResourceMap(resourceName));
+            return false;
+          }
+
+          // step 2: every entry in external view is contained in best possible state
+          for (String partition : extView.getRecord().getMapFields().keySet()) {
+            Map<String, String> evInstanceStateMap = extView.getRecord().getMapField(partition);
+            Map<String, String> bpInstanceStateMap =
+                bestPossOutput.getInstanceStateMap(resourceName, new Partition(partition));
+
+            boolean result = compareMap(evInstanceStateMap, bpInstanceStateMap);
+            if (result == false) {
+              LOG.info("externalView is different from bestPossibleState for partition:" + partition);
+
+              // System.err.println("externalView is different from bestPossibleState for partition: "
+              // + partition + ", actual: " + evInstanceStateMap + ", bestPoss: " +
+              // bpInstanceStateMap);
+              return false;
+            }
+          }
+        }
+        return true;
+      } catch (Exception e) {
+        LOG.error("exception in verification", e);
+        return false;
+      }
+    }
+
+    /**
+     * calculate the best possible state note that DROPPED states are not checked since when
+     * kick off the BestPossibleStateCalcStage we are providing an empty current state map
+     *
+     * @param cache
+     * @return
+     * @throws Exception
+     */
+    private BestPossibleStateOutput calcBestPossState(ClusterDataCache cache, Set<String> resources)
+        throws Exception {
+      ClusterEvent event = new ClusterEvent("sampleEvent");
+      event.addAttribute("ClusterDataCache", cache);
+
+      ResourceComputationStage rcState = new ResourceComputationStage();
+      CurrentStateComputationStage csStage = new CurrentStateComputationStage();
+      BestPossibleStateCalcStage bpStage = new BestPossibleStateCalcStage();
+
+      runStage(event, rcState);
+
+      // Filter resources if specified
+      if (resources != null) {
+        Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.toString());
+        resourceMap.keySet().retainAll(resources);
+      }
+
+      runStage(event, csStage);
+      runStage(event, bpStage);
+
+      BestPossibleStateOutput output =
+          event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString());
+
+      // System.out.println("output:" + output);
+      return output;
+    }
+
+    private void runStage(ClusterEvent event, Stage stage) throws Exception {
+      StageContext context = new StageContext();
+      stage.init(context);
+      stage.preProcess();
+      stage.process(event);
+      stage.postProcess();
+    }
+
+    private <K, V> boolean compareMap(Map<K, V> map1, Map<K, V> map2) {
+      boolean isEqual = true;
+      if (map1 == null && map2 == null) {
+        // OK
+      } else if (map1 == null && map2 != null) {
+        if (!map2.isEmpty()) {
+          isEqual = false;
+        }
+      } else if (map1 != null && map2 == null) {
+        if (!map1.isEmpty()) {
+          isEqual = false;
+        }
+      } else {
+        // verify size
+        if (map1.size() != map2.size()) {
+          isEqual = false;
+        }
+        // verify each <key, value> in map1 is contained in map2
+        for (K key : map1.keySet()) {
+          if (!map1.get(key).equals(map2.get(key))) {
+            LOG.debug(
+                "different value for key: " + key + "(map1: " + map1.get(key) + ", map2: " + map2
+                    .get(key) + ")");
+            isEqual = false;
+            break;
+          }
+        }
+      }
+      return isEqual;
+    }
+
+    @Override
+    public ZkClient getZkClient() {
+      return zkClient;
+    }
+
+    @Override
+    public String getClusterName() {
+      return clusterName;
+    }
+
+    @Override
+    public String toString() {
+      String verifierName = getClass().getName();
+      verifierName = verifierName.substring(verifierName.lastIndexOf('.') + 1, verifierName.length());
+      return verifierName + "(" + clusterName + "@" + zkClient.getServers() + ")";
+    }
+  }
+
+
+  public static class MasterNbInExtViewVerifier implements ZkVerifier {
+    private final String clusterName;
+    private final ZkClient zkClient;
+
+    public MasterNbInExtViewVerifier(String zkAddr, String clusterName) {
+      this(validateAndGetClient(zkAddr, clusterName), clusterName);
+    }
+
+    public MasterNbInExtViewVerifier(ZkClient zkClient, String clusterName) {
+      if (zkClient == null || clusterName == null) {
+        throw new IllegalArgumentException("requires zkClient|clusterName");
+      }
+      this.clusterName = clusterName;
+      this.zkClient = zkClient;
+    }
+
+    @Override
+    public boolean verify() {
+      try {
+        ZKHelixDataAccessor accessor =
+            new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
+
+        return verifyMasterNbInExtView(accessor);
+      } catch (Exception e) {
+        LOG.error("exception in verification", e);
+      }
+      return false;
+    }
+
+    @Override
+    public ZkClient getZkClient() {
+      return zkClient;
+    }
+
+    @Override
+    public String getClusterName() {
+      return clusterName;
+    }
+
+    private boolean verifyMasterNbInExtView(HelixDataAccessor accessor) {
+      Builder keyBuilder = accessor.keyBuilder();
+
+      Map<String, IdealState> idealStates = accessor.getChildValuesMap(keyBuilder.idealStates());
+      if (idealStates == null || idealStates.size() == 0) {
+        LOG.info("No resource idealState");
+        return true;
+      }
+
+      Map<String, ExternalView> extViews = accessor.getChildValuesMap(keyBuilder.externalViews());
+      if (extViews == null || extViews.size() < idealStates.size()) {
+        LOG.info("No externalViews | externalView.size() < idealState.size()");
+        return false;
+      }
+
+      for (String resource : extViews.keySet()) {
+        int partitions = idealStates.get(resource).getNumPartitions();
+        Map<String, Map<String, String>> instanceStateMap =
+            extViews.get(resource).getRecord().getMapFields();
+        if (instanceStateMap.size() < partitions) {
+          LOG.info("Number of externalViews (" + instanceStateMap.size() + ") < partitions ("
+              + partitions + ")");
+          return false;
+        }
+
+        for (String partition : instanceStateMap.keySet()) {
+          boolean foundMaster = false;
+          for (String instance : instanceStateMap.get(partition).keySet()) {
+            if (instanceStateMap.get(partition).get(instance).equalsIgnoreCase("MASTER")) {
+              foundMaster = true;
+              break;
+            }
+          }
+          if (!foundMaster) {
+            LOG.info("No MASTER for partition: " + partition);
+            return false;
+          }
+        }
+      }
+      return true;
+    }
+  }
+
+  public static boolean verifyByPolling(Verifier verifier) {
+    return verifyByPolling(verifier, 30 * 1000);
+  }
+
+  public static boolean verifyByPolling(Verifier verifier, long timeout) {
+    return verifyByPolling(verifier, timeout, 1000);
+  }
+
+  public static boolean verifyByPolling(Verifier verifier, long timeout, long period) {
+    long startTime = System.currentTimeMillis();
+    boolean result = false;
+    try {
+      long curTime;
+      do {
+        Thread.sleep(period);
+        result = verifier.verify();
+        if (result == true) {
+          break;
+        }
+        curTime = System.currentTimeMillis();
+      } while (curTime <= startTime + timeout);
+      return result;
+    } catch (Exception e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    } finally {
+      long endTime = System.currentTimeMillis();
+
+      // debug
+      System.err.println(result + ": " + verifier + ": wait " + (endTime - startTime)
+          + "ms to verify");
+
+    }
+    return false;
+  }
+
+  public static boolean verifyByZkCallback(ZkVerifier verifier) {
+    return verifyByZkCallback(verifier, 30000);
+  }
+
+  /**
+   * This function should be always single threaded
+   *
+   * @param verifier
+   * @param timeout
+   * @return
+   */
+  public static boolean verifyByZkCallback(ZkVerifier verifier, long timeout) {
+    long startTime = System.currentTimeMillis();
+    CountDownLatch countDown = new CountDownLatch(1);
+    ZkClient zkClient = verifier.getZkClient();
+    String clusterName = verifier.getClusterName();
+
+    // add an ephemeral node to /{clusterName}/CONFIGS/CLUSTER/verify
+    // so when analyze zk log, we know when a test ends
+    try {
+      zkClient.createEphemeral("/" + clusterName + "/CONFIGS/CLUSTER/verify");
+    } catch (ZkNodeExistsException ex) {
+      LOG.error("There is already a verification in progress", ex);
+      throw ex;
+    }
+
+    ExtViewVeriferZkListener listener = new ExtViewVeriferZkListener(countDown, zkClient, verifier);
+
+    String extViewPath = PropertyPathBuilder.getPath(PropertyType.EXTERNALVIEW, clusterName);
+    zkClient.subscribeChildChanges(extViewPath, listener);
+    for (String child : zkClient.getChildren(extViewPath)) {
+      String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
+      zkClient.subscribeDataChanges(childPath, listener);
+    }
+
+    // do initial verify
+    boolean result = verifier.verify();
+    if (result == false) {
+      try {
+        result = countDown.await(timeout, TimeUnit.MILLISECONDS);
+        if (result == false) {
+          // make a final try if timeout
+          result = verifier.verify();
+        }
+      } catch (Exception e) {
+        // TODO Auto-generated catch block
+        e.printStackTrace();
+      }
+    }
+
+    // clean up
+    zkClient.unsubscribeChildChanges(extViewPath, listener);
+    for (String child : zkClient.getChildren(extViewPath)) {
+      String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
+      zkClient.unsubscribeDataChanges(childPath, listener);
+    }
+
+    long endTime = System.currentTimeMillis();
+
+    zkClient.delete("/" + clusterName + "/CONFIGS/CLUSTER/verify");
+    // debug
+    System.err.println(result + ": wait " + (endTime - startTime) + "ms, " + verifier);
+
+    return result;
+  }
+
+  @SuppressWarnings("static-access")
+  private static Options constructCommandLineOptions() {
+    Option helpOption =
+        OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
+            .create();
+
+    Option zkServerOption =
+        OptionBuilder.withLongOpt(zkServerAddress).withDescription("Provide zookeeper address")
+            .create();
+    zkServerOption.setArgs(1);
+    zkServerOption.setRequired(true);
+    zkServerOption.setArgName("ZookeeperServerAddress(Required)");
+
+    Option clusterOption =
+        OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create();
+    clusterOption.setArgs(1);
+    clusterOption.setRequired(true);
+    clusterOption.setArgName("Cluster name (Required)");
+
+    Option timeoutOption =
+        OptionBuilder.withLongOpt(timeout).withDescription("Timeout value for verification")
+            .create();
+    timeoutOption.setArgs(1);
+    timeoutOption.setArgName("Timeout value (Optional), default=30s");
+
+    Option sleepIntervalOption =
+        OptionBuilder.withLongOpt(period).withDescription("Polling period for verification")
+            .create();
+    sleepIntervalOption.setArgs(1);
+    sleepIntervalOption.setArgName("Polling period value (Optional), default=1s");
+
+    Option resourcesOption =
+        OptionBuilder.withLongOpt(resources).withDescription("Specific set of resources to verify")
+            .create();
+    resourcesOption.setArgs(1);
+    resourcesOption.setArgName("Comma-separated resource names, default is all resources");
+
+    Options options = new Options();
+    options.addOption(helpOption);
+    options.addOption(zkServerOption);
+    options.addOption(clusterOption);
+    options.addOption(timeoutOption);
+    options.addOption(sleepIntervalOption);
+    options.addOption(resourcesOption);
+
+    return options;
+  }
+
+  public static void printUsage(Options cliOptions) {
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.setWidth(1000);
+    helpFormatter.printHelp("java " + ClusterSetup.class.getName(), cliOptions);
+  }
+
+  public static CommandLine processCommandLineArgs(String[] cliArgs) {
+    CommandLineParser cliParser = new GnuParser();
+    Options cliOptions = constructCommandLineOptions();
+    // CommandLine cmd = null;
+
+    try {
+      return cliParser.parse(cliOptions, cliArgs);
+    } catch (ParseException pe) {
+      System.err.println("CommandLineClient: failed to parse command-line options: "
+          + pe.toString());
+      printUsage(cliOptions);
+      System.exit(1);
+    }
+    return null;
+  }
+
+  public static boolean verifyState(String[] args) {
+    // TODO Auto-generated method stub
+    String clusterName = "storage-cluster";
+    String zkServer = "localhost:2181";
+    long timeoutValue = 0;
+    long periodValue = 1000;
+
+    Set<String> resourceSet = null;
+    if (args.length > 0) {
+      CommandLine cmd = processCommandLineArgs(args);
+      zkServer = cmd.getOptionValue(zkServerAddress);
+      clusterName = cmd.getOptionValue(cluster);
+      String timeoutStr = cmd.getOptionValue(timeout);
+      String periodStr = cmd.getOptionValue(period);
+      String resourceStr = cmd.getOptionValue(resources);
+
+      if (timeoutStr != null) {
+        try {
+          timeoutValue = Long.parseLong(timeoutStr);
+        } catch (Exception e) {
+          System.err.println("Exception in converting " + timeoutStr + " to long. Use default (0)");
+        }
+      }
+
+      if (periodStr != null) {
+        try {
+          periodValue = Long.parseLong(periodStr);
+        } catch (Exception e) {
+          System.err.println("Exception in converting " + periodStr
+              + " to long. Use default (1000)");
+        }
+      }
+
+      // Allow specifying resources explicitly
+      if (resourceStr != null) {
+        String[] resources = resourceStr.split("[\\s,]");
+        resourceSet = Sets.newHashSet(resources);
+      }
+
+    }
+    // return verifyByPolling(new BestPossAndExtViewZkVerifier(zkServer, clusterName),
+    // timeoutValue,
+    // periodValue);
+
+    ZkVerifier verifier;
+    if (resourceSet == null) {
+      verifier = new BestPossAndExtViewZkVerifier(zkServer, clusterName);
+    } else {
+      verifier = new BestPossAndExtViewZkVerifier(zkServer, clusterName, null, resourceSet);
+    }
+    return verifyByZkCallback(verifier, timeoutValue);
+  }
+
+  public static void main(String[] args) {
+    boolean result = verifyState(args);
+    System.out.println(result ? "Successful" : "failed");
+    System.exit(1);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterVerifier.java
new file mode 100644
index 0000000..13d98f4
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterVerifier.java
@@ -0,0 +1,148 @@
+package org.apache.helix.tools.ClusterVerifiers;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.I0Itec.zkclient.IZkChildListener;
+import org.I0Itec.zkclient.IZkDataListener;
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.PropertyKey;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.manager.zk.ZKHelixDataAccessor;
+import org.apache.helix.manager.zk.ZkBaseDataAccessor;
+import org.apache.helix.manager.zk.ZkClient;
+import org.apache.log4j.Logger;
+
+@Deprecated
+public abstract class ClusterVerifier implements IZkChildListener, IZkDataListener {
+  private static Logger LOG = Logger.getLogger(ClusterVerifier.class);
+
+  protected final ZkClient _zkclient;
+  protected final String _clusterName;
+  protected final HelixDataAccessor _accessor;
+  protected final PropertyKey.Builder _keyBuilder;
+  private CountDownLatch _countdown;
+
+  static class ClusterVerifyTrigger {
+    final PropertyKey _triggerKey;
+    final boolean _triggerOnChildDataChange;
+
+    public ClusterVerifyTrigger(PropertyKey triggerKey, boolean triggerOnChildDataChange) {
+      _triggerKey = triggerKey;
+      _triggerOnChildDataChange = triggerOnChildDataChange;
+    }
+  }
+
+  public ClusterVerifier(ZkClient zkclient, String clusterName) {
+    _zkclient = zkclient;
+    _clusterName = clusterName;
+    _accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkclient));
+    _keyBuilder = _accessor.keyBuilder();
+  }
+
+  public boolean verifyByCallback(long timeout, List<ClusterVerifyTrigger> triggers) {
+    _countdown = new CountDownLatch(1);
+
+    for (ClusterVerifyTrigger trigger : triggers) {
+      String path = trigger._triggerKey.getPath();
+      _zkclient.subscribeChildChanges(path, this);
+      if (trigger._triggerOnChildDataChange) {
+        List<String> childs = _zkclient.getChildren(path);
+        for (String child : childs) {
+          String childPath = String.format("%s/%s", path, child);
+          _zkclient.subscribeDataChanges(childPath, this);
+        }
+      }
+    }
+
+    boolean success = false;
+    try {
+      success = verify();
+      if (!success) {
+
+        success = _countdown.await(timeout, TimeUnit.MILLISECONDS);
+        if (!success) {
+          // make a final try if timeout
+          success = verify();
+        }
+      }
+    } catch (Exception e) {
+      LOG.error("Exception in verifier", e);
+    }
+
+    // clean up
+    _zkclient.unsubscribeAll();
+
+    return success;
+  }
+
+  @Override
+  public void handleDataChange(String dataPath, Object data) throws Exception {
+    boolean success = verify();
+    if (success) {
+      _countdown.countDown();
+    }
+  }
+
+  @Override
+  public void handleDataDeleted(String dataPath) throws Exception {
+    _zkclient.unsubscribeDataChanges(dataPath, this);
+  }
+
+  @Override
+  public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
+    for (String child : currentChilds) {
+      String childPath = String.format("%s/%s", parentPath, child);
+      _zkclient.subscribeDataChanges(childPath, this);
+    }
+
+    boolean success = verify();
+    if (success) {
+      _countdown.countDown();
+    }
+  }
+
+  public boolean verifyByPolling(long timeout) {
+    try {
+      long start = System.currentTimeMillis();
+      boolean success;
+      do {
+        success = verify();
+        if (success) {
+          return true;
+        }
+        TimeUnit.MILLISECONDS.sleep(500);
+      } while ((System.currentTimeMillis() - start) <= timeout);
+    } catch (Exception e) {
+      LOG.error("Exception in verifier", e);
+    }
+    return false;
+  }
+
+  /**
+   * verify
+   * @return
+   * @throws Exception
+   */
+  public abstract boolean verify() throws Exception;
+}

http://git-wip-us.apache.org/repos/asf/helix/blob/015a73ce/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/HelixClusterVerifier.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/HelixClusterVerifier.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/HelixClusterVerifier.java
new file mode 100644
index 0000000..b819ff9
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/HelixClusterVerifier.java
@@ -0,0 +1,40 @@
+package org.apache.helix.tools.ClusterVerifiers;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+public interface HelixClusterVerifier {
+  /**
+   *  Verify the cluster.
+   *  The method will be blocked at most {@code timeout}.
+   *  Return true if the verify succeed, otherwise return false.
+   *
+   * @param timeout in milliseconds
+   * @return true if succeed, false if not.
+   */
+  boolean verify(long timeout);
+
+  /**
+   *  Verify the cluster.
+   *  Return true if the verify succeed, otherwise return false.
+   *
+   *  @return true if succeed, false if not.
+   */
+  boolean verify();
+}


Mime
View raw message