ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jonathanhur...@apache.org
Subject ambari git commit: AMBARI-15999 - Review (and adjust) new HDFS script alerts response text (jonathanhurley)
Date Mon, 25 Apr 2016 17:18:28 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk aa6a2a9ce -> b70758413


AMBARI-15999 - Review (and adjust) new HDFS script alerts response text (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b7075841
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b7075841
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b7075841

Branch: refs/heads/trunk
Commit: b70758413013351b4af10239b1ef661327294784
Parents: aa6a2a9
Author: Jonathan Hurley <jhurley@hortonworks.com>
Authored: Wed Apr 20 16:35:01 2016 -0400
Committer: Jonathan Hurley <jhurley@hortonworks.com>
Committed: Mon Apr 25 13:18:12 2016 -0400

----------------------------------------------------------------------
 .../src/main/python/ambari_agent/main.py        |   4 +
 .../server/events/AlertStateChangeEvent.java    |  20 +++-
 .../listeners/alerts/AlertReceivedListener.java |  22 +++-
 .../alerts/AlertStateChangedListener.java       |  17 ++-
 .../server/upgrade/UpgradeCatalog240.java       | 113 +++++++++++--------
 .../common-services/HDFS/2.1.0.2.0/alerts.json  |  96 ++++++++++++++++
 .../package/alerts/alert_metrics_deviation.py   |  89 +++++++++++----
 .../alerts/AggregateAlertListenerTest.java      |   6 +-
 .../state/alerts/AlertReceivedListenerTest.java |  19 +++-
 .../alerts/AlertStateChangedEventTest.java      |  42 ++++++-
 .../state/cluster/AlertDataManagerTest.java     |   9 +-
 .../server/upgrade/UpgradeCatalog240Test.java   |   3 +
 .../2.0.6/HDFS/test_alert_metrics_deviation.py  |  19 ++--
 13 files changed, 365 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-agent/src/main/python/ambari_agent/main.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/main.py b/ambari-agent/src/main/python/ambari_agent/main.py
index 34d4e74..5340239 100644
--- a/ambari-agent/src/main/python/ambari_agent/main.py
+++ b/ambari-agent/src/main/python/ambari_agent/main.py
@@ -49,6 +49,10 @@ from resource_management.core.logger import Logger
 logger = logging.getLogger()
 alerts_logger = logging.getLogger('ambari_alerts')
 
+# use the host's locale for numeric formatting
+import locale
+locale.setlocale(locale.LC_ALL, '')
+
 formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
 agentPid = os.getpid()
 config = AmbariConfig.AmbariConfig()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/main/java/org/apache/ambari/server/events/AlertStateChangeEvent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/AlertStateChangeEvent.java
b/ambari-server/src/main/java/org/apache/ambari/server/events/AlertStateChangeEvent.java
index ac1eb8d..eed8ce3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/AlertStateChangeEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/AlertStateChangeEvent.java
@@ -38,6 +38,11 @@ public class AlertStateChangeEvent extends AlertEvent {
   private final AlertState m_fromState;
 
   /**
+   * The prior alert firmness.
+   */
+  private final AlertFirmness m_fromFirmness;
+
+  /**
    * The current alert, including state and history.
    */
   private final AlertCurrentEntity m_currentAlert;
@@ -52,14 +57,18 @@ public class AlertStateChangeEvent extends AlertEvent {
    *
    * @param clusterId
    * @param alert
+   * @param currentAlert
+   * @param fromState
+   * @param fromFirmness
    */
   public AlertStateChangeEvent(long clusterId, Alert alert,
-      AlertCurrentEntity currentAlert, AlertState fromState) {
+      AlertCurrentEntity currentAlert, AlertState fromState, AlertFirmness fromFirmness)
{
     super(clusterId, alert);
 
     m_currentAlert = currentAlert;
     m_history = currentAlert.getAlertHistory();
     m_fromState = fromState;
+    m_fromFirmness = fromFirmness;
   }
 
   /**
@@ -90,6 +99,15 @@ public class AlertStateChangeEvent extends AlertEvent {
   }
 
   /**
+   * Gets the prior firmness of the alert.
+   *
+   * @return the prior firmness of the alert.
+   */
+  public AlertFirmness getFromFirmness() {
+    return m_fromFirmness;
+  }
+
+  /**
    * {@inheritDoc}
    */
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
index c966ffa..2dcf1d6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
@@ -232,13 +232,26 @@ public class AlertReceivedListener {
           // ensure that if we've met the repeat tolerance and the alert is
           // still SOFT, then we transition it to HARD - we also need to fire an
           // event
-          AlertFirmness currentFirmness = current.getFirmness();
+          AlertFirmness firmness = current.getFirmness();
           int repeatTolerance = getRepeatTolerance(definition, clusterName);
-          if (currentFirmness == AlertFirmness.SOFT && occurrences >= repeatTolerance)
{
+          if (firmness == AlertFirmness.SOFT && occurrences >= repeatTolerance)
{
             current.setFirmness(AlertFirmness.HARD);
 
             // create the event to fire later
-            alertEvents.add(new AlertStateChangeEvent(clusterId, alert, current, alertState));
+            AlertStateChangeEvent stateChangedEvent = new AlertStateChangeEvent(clusterId,
alert,
+                current, alertState, firmness);
+
+            alertEvents.add(stateChangedEvent);
+          }
+        }
+
+        // some special cases for SKIPPED alerts
+        if (alertState == AlertState.SKIPPED) {
+          // set the text on a SKIPPED alert IFF it's not blank; a blank text
+          // field means that the alert doesn't want to change the existing text
+          String alertText = alert.getText();
+          if (StringUtils.isNotBlank(alertText)) {
+            current.setLatestText(alertText);
           }
         }
 
@@ -255,6 +268,7 @@ public class AlertReceivedListener {
 
         AlertHistoryEntity oldHistory = current.getAlertHistory();
         AlertState oldState = oldHistory.getAlertState();
+        AlertFirmness oldFirmness = current.getFirmness();
 
         // insert history, update current
         AlertHistoryEntity history = createHistory(clusterId,
@@ -299,7 +313,7 @@ public class AlertReceivedListener {
         toCreateHistoryAndMerge.add(current);
 
         // create the event to fire later
-        alertEvents.add(new AlertStateChangeEvent(clusterId, alert, current, oldState));
+        alertEvents.add(new AlertStateChangeEvent(clusterId, alert, current, oldState, oldFirmness));
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertStateChangedListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertStateChangedListener.java
b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertStateChangedListener.java
index 73c2f1b..21584fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertStateChangedListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertStateChangedListener.java
@@ -47,11 +47,15 @@ import com.google.inject.Singleton;
 
 /**
  * The {@link AlertStateChangedListener} class response to
- * {@link AlertStateChangeEvent} and updates {@link AlertNoticeEntity} instances
+ * {@link AlertStateChangeEvent} and creates {@link AlertNoticeEntity} instances
  * in the database.
  * <p/>
  * {@link AlertNoticeEntity} instances will only be updated if the firmness of
- * the alert is {@link AlertFirmness#HARD}.
+ * the alert is {@link AlertFirmness#HARD}. In the case of {@link AlertState#OK}
+ * (which is always {@link AlertFirmness#HARD}), then the prior alert must be
+ * {@link AlertFirmness#HARD} for any notifications to be created. This is
+ * because a SOFT non-OK alert (such as CRITICAL) would not have caused a
+ * notification, so changing back from this SOFT state should not either.
  */
 @Singleton
 @EagerSingleton
@@ -119,6 +123,15 @@ public class AlertStateChangedListener {
       return;
     }
 
+    // OK alerts are always HARD, so we need to catch the case where we are
+    // coming from a SOFT non-OK to an OK; in these cases we should not alert
+    //
+    // New State = OK
+    // Old Firmness = SOFT
+    if (history.getAlertState() == AlertState.OK && event.getFromFirmness() == AlertFirmness.SOFT)
{
+      return;
+    }
+
     // don't create any outbound alert notices if in MM
     AlertCurrentEntity currentAlert = event.getCurrentAlert();
     if (null != currentAlert

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index 3583dd1..e76bc5c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -18,15 +18,22 @@
 
 package org.apache.ambari.server.upgrade;
 
-import com.google.common.collect.Lists;
-import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.Transactional;
+import java.sql.Clob;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
@@ -49,21 +56,15 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.jdbc.support.JdbcUtils;
 
-import java.sql.Clob;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
+import com.google.common.collect.Lists;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.gson.JsonPrimitive;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.Transactional;
 
 /**
  * Upgrade catalog for version 2.4.0.
@@ -208,6 +209,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
     updateYarnEnv();
     removeHiveOozieDBConnectionConfigs();
     updateClustersAndHostsVersionStateTableDML();
+    removeStandardDeviationAlerts();
   }
 
   private void createSettingTable() throws SQLException {
@@ -348,18 +350,6 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
       }});
       put("hive_metastore_process", defaultKeytabVisibilityMap);
       put("hive_server_process", defaultKeytabVisibilityMap);
-      put("namenode_service_rpc_queue_latency_hourly", hdfsVisibilityMap);
-      put("namenode_client_rpc_queue_latency_hourly", hdfsVisibilityMap);
-      put("namenode_service_rpc_processing_latency_hourly", hdfsVisibilityMap);
-      put("namenode_client_rpc_processing_latency_hourly", hdfsVisibilityMap);
-      put("increase_nn_heap_usage_daily", hdfsVisibilityMap);
-      put("namenode_service_rpc_processing_latency_daily", hdfsVisibilityMap);
-      put("namenode_client_rpc_processing_latency_daily", hdfsVisibilityMap);
-      put("namenode_service_rpc_queue_latency_daily", hdfsVisibilityMap);
-      put("namenode_client_rpc_queue_latency_daily", hdfsVisibilityMap);
-      put("namenode_increase_in_storage_capacity_usage_daily", hdfsVisibilityMap);
-      put("increase_nn_heap_usage_weekly", hdfsVisibilityMap);
-      put("namenode_increase_in_storage_capacity_usage_weekly", hdfsVisibilityMap);
     }};
 
     Map<String, Map<String, String>> reportingPercentMap = new HashMap<String,
Map<String, String>>(){{
@@ -446,18 +436,6 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
 
     // list of alerts that need to get property updates
     Set<String> alertNamesForPropertyUpdates = new HashSet<String>() {{
-      add("namenode_service_rpc_queue_latency_hourly");
-      add("namenode_client_rpc_queue_latency_hourly");
-      add("namenode_service_rpc_processing_latency_hourly");
-      add("namenode_client_rpc_processing_latency_hourly");
-      add("increase_nn_heap_usage_daily");
-      add("namenode_service_rpc_processing_latency_daily");
-      add("namenode_client_rpc_processing_latency_daily");
-      add("namenode_service_rpc_queue_latency_daily");
-      add("namenode_client_rpc_queue_latency_daily");
-      add("namenode_increase_in_storage_capacity_usage_daily");
-      add("increase_nn_heap_usage_weekly");
-      add("namenode_increase_in_storage_capacity_usage_weekly");
       add("hawq_segment_process_percent");
       add("mapreduce_history_server_cpu");
       add("yarn_nodemanager_webui_percent");
@@ -1527,4 +1505,43 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
     }
   }
 
+  /**
+   * Removes the HDFS/AMS alert definitions for the standard deviation alerts,
+   * including all history, notifications and groupings.
+   * <p/>
+   * These alerts shipped disabled and were not functional in prior versions of
+   * Ambari. This is the cleanest and simplest way to update them all as they
+   * will be read back into Ambari on server startup.
+   *
+   * @throws SQLException
+   */
+  void removeStandardDeviationAlerts() throws SQLException {
+    List<String> deviationAlertNames = Lists.newArrayList(
+        "namenode_service_rpc_queue_latency_hourly",
+        "namenode_client_rpc_queue_latency_hourly",
+        "namenode_service_rpc_processing_latency_hourly",
+        "namenode_client_rpc_processing_latency_hourly",
+        "increase_nn_heap_usage_daily",
+        "namenode_service_rpc_processing_latency_daily",
+        "namenode_client_rpc_processing_latency_daily",
+        "namenode_service_rpc_queue_latency_daily",
+        "namenode_client_rpc_queue_latency_daily",
+        "namenode_increase_in_storage_capacity_usage_daily",
+        "increase_nn_heap_usage_weekly",
+        "namenode_increase_in_storage_capacity_usage_weekly");
+
+    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
+    Clusters clusters = injector.getInstance(Clusters.class);
+    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+    for (final Cluster cluster : clusterMap.values()) {
+      long clusterId = cluster.getClusterId();
+
+      for (String alertName : deviationAlertNames) {
+        AlertDefinitionEntity definition = alertDefinitionDAO.findByName(clusterId, alertName);
+        if (null != definition) {
+          alertDefinitionDAO.remove(definition);
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
index e328552..aedbdfe 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
@@ -613,6 +613,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -685,6 +693,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -757,6 +773,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -829,6 +853,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -893,6 +925,14 @@
               "value": 50,
               "description": "The percentage of NameNode heap usage growth.",
               "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "MB",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -965,6 +1005,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -1037,6 +1085,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -1109,6 +1165,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "MB",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -1181,6 +1245,14 @@
               "type": "NUMERIC",
               "units": "seconds",
               "description": "The minimum latency to measure growth."
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "ms",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -1245,6 +1317,14 @@
               "value": 50,
               "description": "The percentage of storage capacity usage growth.",
               "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "B",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -1309,6 +1389,14 @@
               "value": 50,
               "description": "The percentage of NameNode heap usage growth.",
               "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "MB",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }
@@ -1373,6 +1461,14 @@
               "value": 20,
               "description": "The percentage of storage capacity usage growth.",
               "threshold": "CRITICAL"
+            },
+            {
+              "name": "metric.units",
+              "display_name": "Metric Units",
+              "type": "STRING",
+              "value": "B",
+              "description": "The units that the metric data points are reported in.",
+              "visibility": "HIDDEN"
             }
           ]
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
index 30fb7aa..3296493 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
@@ -18,12 +18,13 @@ See the License for the specific language governing permissions and
 limitations under the License.
 """
 import httplib
-
+import locale
 import json
 import logging
 import urllib
 import time
 import urllib2
+
 from resource_management import Environment
 from ambari_commons.aggregate_functions import sample_standard_deviation, mean
 
@@ -62,6 +63,8 @@ MERGE_HA_METRICS_PARAM_KEY = 'mergeHaMetrics'
 MERGE_HA_METRICS_PARAM_DEFAULT = False
 METRIC_NAME_PARAM_KEY = 'metricName'
 METRIC_NAME_PARAM_DEFAULT = ''
+METRIC_UNITS_PARAM_KEY = 'metric.units'
+METRIC_UNITS_DEFAULT = ''
 APP_ID_PARAM_KEY = 'appId'
 APP_ID_PARAM_DEFAULT = 'NAMENODE'
 
@@ -82,6 +85,12 @@ MINIMUM_VALUE_THRESHOLD_KEY = 'minimumValue'
 
 AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
 
+# The variance for this alert is 27MB which is 27% of the 100MB average (20MB is the limit)
+DEVIATION_THRESHOLD_MESSAGE = "The variance for this alert is {0}{1} which is {2:.0f}% of
the {3}{4} average ({5}{6} is the limit)"
+
+# The variance for this alert is 15MB which is within 20% of the 904ms average (20MB is the
limit)
+DEVIATION_OK_MESSAGE = "The variance for this alert is {0}{1} which is within {2:.0f}% of
the {3}{4} average ({5}{6} is the limit)"
+
 logger = logging.getLogger()
 
 def get_tokens():
@@ -123,6 +132,10 @@ def execute(configurations={}, parameters={}, host_name=None):
   if METRIC_NAME_PARAM_KEY in parameters:
     metric_name = parameters[METRIC_NAME_PARAM_KEY]
 
+  metric_units = METRIC_UNITS_DEFAULT
+  if METRIC_UNITS_PARAM_KEY in parameters:
+    metric_units = parameters[METRIC_UNITS_PARAM_KEY]
+
   app_id = APP_ID_PARAM_DEFAULT
   if APP_ID_PARAM_KEY in parameters:
     app_id = parameters[APP_ID_PARAM_KEY]
@@ -160,7 +173,8 @@ def execute(configurations={}, parameters={}, host_name=None):
       collector_host = collector_webapp_address[0]
       collector_port = int(collector_webapp_address[1])
     else:
-      return (RESULT_STATE_UNKNOWN, ['{0} value should be set as "fqdn_hostname:port", but
set to {1}'.format(METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY])])
+      return (RESULT_STATE_UNKNOWN, ['{0} value should be set as "fqdn_hostname:port", but
set to {1}'.format(
+        METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY])])
 
   namenode_service_rpc_address = None
   # hdfs-site is required
@@ -263,14 +277,14 @@ def execute(configurations={}, parameters={}, host_name=None):
               namenode_service_rpc_address = hdfs_site[nn_service_rpc_address_key]
           pass
         except:
-          logger.exception("Unable to determine active NameNode")
+          logger.exception("Unable to determine the active NameNode")
     pass
 
     if merge_ha_metrics:
       hostnames = ",".join(namenodes)
       # run only on active NN, no need to run the same requests from the standby
       if host_name not in active_namenodes:
-        return (RESULT_STATE_SKIPPED, ['Another host will report this alert'])
+        return (RESULT_STATE_SKIPPED, ['This alert will be reported by another host.'])
     pass
 
   # Skip service rpc alert if port is not enabled
@@ -296,10 +310,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     data = response.read()
     conn.close()
   except Exception:
-    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from AMS."])
+    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
 
   if response.status != 200:
-    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from AMS."])
+    return (RESULT_STATE_UNKNOWN, ["Unable to retrieve metrics from the Ambari Metrics service."])
 
   data_json = json.loads(data)
   metrics = []
@@ -310,37 +324,69 @@ def execute(configurations={}, parameters={}, host_name=None):
   pass
 
   if not metrics or len(metrics) < 2:
-    return (RESULT_STATE_SKIPPED, ["Unable to calculate the standard deviation for {0} datapoints".format(len(metrics))])
+    number_of_data_points = len(metrics) if metrics else 0
+    return (RESULT_STATE_SKIPPED, ["There are not enough data points to calculate the standard
deviation ({0} sampled)".format(
+      number_of_data_points)])
 
   if minimum_value_threshold:
     # Filter out points below min threshold
     metrics = [metric for metric in metrics if metric > (minimum_value_threshold * 1000)]
     if len(metrics) < 2:
-      return (RESULT_STATE_OK, ['No datapoints found above the minimum threshold of {0} seconds'.format(minimum_value_threshold)])
+      return (RESULT_STATE_OK, ['There were no data points above the minimum threshold of
{0} seconds'.format(minimum_value_threshold)])
 
   mean_value = mean(metrics)
   stddev = sample_standard_deviation(metrics)
-  max_value = max(metrics) / 1000
 
   try:
-    deviation_percent = stddev / mean_value * 100
+    deviation_percent = stddev / float(mean_value) * 100
   except ZeroDivisionError:
     # should not be a case for this alert
-    return (RESULT_STATE_SKIPPED, ["Unable to calculate the standard deviation percentage.
The mean value is 0"])
+    return (RESULT_STATE_SKIPPED, ["Unable to calculate the standard deviation because the
mean value is 0"])
 
-  logger.debug("""
-  AMS request parameters - {0}
-  AMS response - {1}
-  Mean - {2}
-  Standard deviation - {3}
-  Percentage standard deviation - {4}
-  """.format(encoded_get_metrics_parameters, data_json, mean_value, stddev, deviation_percent))
+  # log the AMS request
+  if logger.isEnabledFor(logging.DEBUG):
+    logger.debug("""
+    AMS request parameters - {0}
+    AMS response - {1}
+    Mean - {2}
+    Standard deviation - {3}
+    Percentage standard deviation - {4}
+    """.format(encoded_get_metrics_parameters, data_json, mean_value, stddev, deviation_percent))
 
+  mean_value_localized = locale.format("%.0f", mean_value, grouping=True)
+
+  variance_value = (deviation_percent / 100.0) * mean_value
+  variance_value_localized = locale.format("%.0f", variance_value, grouping=True)
+
+  # check for CRITICAL status
   if deviation_percent > critical_threshold:
-    return (RESULT_STATE_CRITICAL,['CRITICAL. Percentage standard deviation value {0}% is
beyond the critical threshold of {1}% (growing {2} seconds to {3} seconds)'.format("%.2f"
% deviation_percent, "%.2f" % critical_threshold, minimum_value_threshold, "%.2f" % max_value)])
+    threshold_value = ((critical_threshold / 100.0) * mean_value)
+    threshold_value_localized = locale.format("%.0f", threshold_value, grouping=True)
+
+    message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units,
deviation_percent,
+      mean_value_localized, metric_units, threshold_value_localized, metric_units)
+
+    return (RESULT_STATE_CRITICAL,[message])
+
+  # check for WARNING status
   if deviation_percent > warning_threshold:
-    return (RESULT_STATE_WARNING,['WARNING. Percentage standard deviation value {0}% is beyond
the warning threshold of {1}% (growing {2} seconds to {3} seconds)'.format("%.2f" % deviation_percent,
"%.2f" % warning_threshold, minimum_value_threshold, "%.2f" % max_value)])
-  return (RESULT_STATE_OK,['OK. Percentage standard deviation value is {0}%'.format("%.2f"
% deviation_percent)])
+    threshold_value = ((warning_threshold / 100.0) * mean_value)
+    threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
+
+    message = DEVIATION_THRESHOLD_MESSAGE.format(variance_value_localized, metric_units,
deviation_percent,
+      mean_value_localized, metric_units, threshold_value_localized, metric_units)
+
+    return (RESULT_STATE_WARNING, [message])
+
+  # return OK status; use the warning threshold as the value to compare against
+  threshold_value = ((warning_threshold / 100.0) * mean_value)
+  threshold_value_localized = locale.format("%.0f", threshold_value, grouping = True)
+
+  message = DEVIATION_OK_MESSAGE.format(variance_value_localized, metric_units, warning_threshold,
+    mean_value_localized, metric_units, threshold_value_localized, metric_units)
+
+  return (RESULT_STATE_OK,[message])
+
 
 def valid_collector_webapp_address(webapp_address):
   if len(webapp_address) == 2 \
@@ -351,6 +397,7 @@ def valid_collector_webapp_address(webapp_address):
 
   return False
 
+
 def get_jmx(query, connection_timeout):
   response = None
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AggregateAlertListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AggregateAlertListenerTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AggregateAlertListenerTest.java
index 64ee936..85dedba 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AggregateAlertListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AggregateAlertListenerTest.java
@@ -117,7 +117,8 @@ public class AggregateAlertListenerTest {
     // trigger an alert which will trigger the aggregate
     Alert alert = new Alert("mock-alert", null, null, null, null, null);
     AlertAggregateListener aggregateListener = m_injector.getInstance(AlertAggregateListener.class);
-    AlertStateChangeEvent event = new AlertStateChangeEvent(0, alert, currentEntityMock,
null);
+    AlertStateChangeEvent event = new AlertStateChangeEvent(0, alert, currentEntityMock,
null,
+        AlertFirmness.HARD);
     aggregateListener.onAlertStateChangeEvent(event);
 
     // verify that one AlertReceivedEvent was fired (it's the one the listener
@@ -169,7 +170,8 @@ public class AggregateAlertListenerTest {
     // the alert will be SOFT and should not cause a recalculation
     Alert alert = new Alert("mock-alert", null, null, null, null, null);
     AlertAggregateListener aggregateListener = m_injector.getInstance(AlertAggregateListener.class);
-    AlertStateChangeEvent event = new AlertStateChangeEvent(0, alert, currentEntityMock,
null);
+    AlertStateChangeEvent event = new AlertStateChangeEvent(0, alert, currentEntityMock,
null,
+        AlertFirmness.HARD);
     aggregateListener.onAlertStateChangeEvent(event);
 
     // ensure that the aggregate listener did not trigger an alert in response

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
index 0c3e07f..7bf11e3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertReceivedListenerTest.java
@@ -474,7 +474,7 @@ public class AlertReceivedListenerTest {
    * create an entry if there is currently no current alert.
    */
   @Test
-  public void testSkippedAlertUpdatesTimestamp() {
+  public void testSkippedAlertUpdatesTimestampAndText() {
     String definitionName = ALERT_DEFINITION + "1";
     String serviceName = "HDFS";
     String componentName = "NAMENODE";
@@ -504,9 +504,9 @@ public class AlertReceivedListenerTest {
     alert.setState(AlertState.SKIPPED);
     alert.setTimestamp(2L);
 
-    // the logic we have does NOT update the text, so make sure this does not
-    // change
-    alert.setText("INVALID");
+    // we should allow updating the text if the text is provided
+    text = text + " Updated";
+    alert.setText(text);
 
     // get the current make sure the fields were updated
     listener.onAlertEvent(event);
@@ -514,6 +514,17 @@ public class AlertReceivedListenerTest {
     assertEquals(1L, (long) allCurrent.get(0).getOriginalTimestamp());
     assertEquals(2L, (long) allCurrent.get(0).getLatestTimestamp());
     assertEquals(text, allCurrent.get(0).getLatestText());
+
+    // verify that blank text does not update
+    alert.setText("");
+    alert.setTimestamp(3L);
+
+    // get the current make sure the text was not updated
+    listener.onAlertEvent(event);
+    allCurrent = m_dao.findCurrent();
+    assertEquals(1L, (long) allCurrent.get(0).getOriginalTimestamp());
+    assertEquals(3L, (long) allCurrent.get(0).getLatestTimestamp());
+    assertEquals(text, allCurrent.get(0).getLatestText());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertStateChangedEventTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertStateChangedEventTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertStateChangedEventTest.java
index dad1008..f76867a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertStateChangedEventTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertStateChangedEventTest.java
@@ -231,7 +231,6 @@ public class AlertStateChangedEventTest {
     EasyMock.expect(current.getAlertHistory()).andReturn(history).anyTimes();
     EasyMock.expect(current.getFirmness()).andReturn(AlertFirmness.SOFT).atLeastOnce();
 
-
     EasyMock.expect(history.getAlertDefinition()).andReturn(definition).atLeastOnce();
     EasyMock.expect(alert.getText()).andReturn("The HDFS Foo Alert Is Not Good").atLeastOnce();
     EasyMock.expect(alert.getState()).andReturn(AlertState.CRITICAL).atLeastOnce();
@@ -247,6 +246,47 @@ public class AlertStateChangedEventTest {
   }
 
   /**
+   * Tests that an alert with a firmness of {@link AlertFirmness#HARD} and state
+   * of {@link AlertState#OK} does not trigger any notifications when coming
+   * from a {@link AlertFirmness#SOFT} non-OK alert.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testSoftAlertTransitionToHardOKDoesNotCreateNotification() throws Exception
{
+    EasyMock.replay(dispatchDao);
+
+    AlertDefinitionEntity definition = getMockAlertDefinition();
+
+    AlertCurrentEntity current = getMockedAlertCurrentEntity();
+    AlertHistoryEntity history = EasyMock.createNiceMock(AlertHistoryEntity.class);
+    AlertStateChangeEvent event = EasyMock.createNiceMock(AlertStateChangeEvent.class);
+    Alert alert = EasyMock.createNiceMock(Alert.class);
+
+    // register a HARD/OK for the brand new alert coming in
+    EasyMock.expect(current.getAlertHistory()).andReturn(history).anyTimes();
+    EasyMock.expect(current.getFirmness()).andReturn(AlertFirmness.HARD).atLeastOnce();
+    EasyMock.expect(history.getAlertDefinition()).andReturn(definition).atLeastOnce();
+    EasyMock.expect(history.getAlertState()).andReturn(AlertState.OK).atLeastOnce();
+    EasyMock.expect(alert.getText()).andReturn("The HDFS Foo Alert Is Good").atLeastOnce();
+    EasyMock.expect(alert.getState()).andReturn(AlertState.OK).atLeastOnce();
+
+    // set the old state as being a SOFT/CRITICAL
+    EasyMock.expect(event.getFromState()).andReturn(AlertState.CRITICAL).anyTimes();
+    EasyMock.expect(event.getFromFirmness()).andReturn(AlertFirmness.SOFT).atLeastOnce();
+
+    EasyMock.expect(event.getCurrentAlert()).andReturn(current).atLeastOnce();
+    EasyMock.expect(event.getNewHistoricalEntry()).andReturn(history).atLeastOnce();
+    EasyMock.expect(event.getAlert()).andReturn(alert).atLeastOnce();
+
+    EasyMock.replay(definition, current, history, event, alert);
+
+    // async publishing
+    eventPublisher.publish(event);
+    EasyMock.verify(dispatchDao, current, history, event);
+  }
+
+  /**
    * Gets an {@link AlertDefinitionEntity} with some mocked calls expected.
    *
    * @return

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
index e8ecd09..4f56d77 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
@@ -29,8 +29,6 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicReference;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.events.AlertEvent;
 import org.apache.ambari.server.events.AlertReceivedEvent;
 import org.apache.ambari.server.events.AlertStateChangeEvent;
@@ -51,6 +49,7 @@ import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
 import org.apache.ambari.server.orm.entities.AlertNoticeEntity;
 import org.apache.ambari.server.orm.entities.AlertTargetEntity;
 import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.AlertFirmness;
 import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -79,6 +78,8 @@ import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
 import com.google.inject.persist.UnitOfWork;
 
+import junit.framework.Assert;
+
 
 /**
  * Tests the management of {@link AlertEvent}s in the system.
@@ -318,7 +319,7 @@ public class AlertDataManagerTest {
 
     AlertStateChangeEvent event = new AlertStateChangeEvent(
         m_cluster.getClusterId(), alert1,
-        currentAlert, AlertState.CRITICAL);
+        currentAlert, AlertState.CRITICAL, AlertFirmness.HARD);
 
     AlertStateChangedListener listener = m_injector.getInstance(AlertStateChangedListener.class);
     listener.onAlertEvent(event);
@@ -457,7 +458,7 @@ public class AlertDataManagerTest {
         m_cluster.getClusterId(), "h1", definition.getDefinitionName());
 
     AlertStateChangeEvent event = new AlertStateChangeEvent(
-        m_cluster.getClusterId(), alert, current, AlertState.OK);
+        m_cluster.getClusterId(), alert, current, AlertState.OK, AlertFirmness.HARD);
 
     listener.onAlertStateChangeEvent(event);
     assertNotNull(ref.get());

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
index 6439401..ae69589 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
@@ -371,6 +371,7 @@ public class UpgradeCatalog240Test {
     Method updateYarnEnv = UpgradeCatalog240.class.getDeclaredMethod("updateYarnEnv");
     Method removeHiveOozieDBConnectionConfigs = UpgradeCatalog240.class.getDeclaredMethod("removeHiveOozieDBConnectionConfigs");
     Method updateClustersAndHostsVersionStateTableDML = UpgradeCatalog240.class.getDeclaredMethod("updateClustersAndHostsVersionStateTableDML");
+    Method removeStandardDeviationAlerts = UpgradeCatalog240.class.getDeclaredMethod("removeStandardDeviationAlerts");
 
     Capture<String> capturedStatements = newCapture(CaptureType.ALL);
 
@@ -390,6 +391,7 @@ public class UpgradeCatalog240Test {
             .addMockedMethod(updateYarnEnv)
             .addMockedMethod(removeHiveOozieDBConnectionConfigs)
             .addMockedMethod(updateClustersAndHostsVersionStateTableDML)
+            .addMockedMethod(removeStandardDeviationAlerts)
             .createMock();
 
     Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
@@ -407,6 +409,7 @@ public class UpgradeCatalog240Test {
     upgradeCatalog240.updateYarnEnv();
     upgradeCatalog240.removeHiveOozieDBConnectionConfigs();
     upgradeCatalog240.updateClustersAndHostsVersionStateTableDML();
+    upgradeCatalog240.removeStandardDeviationAlerts();
 
     replay(upgradeCatalog240, dbAccessor);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7075841/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_metrics_deviation.py
b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_metrics_deviation.py
index 5809010..519a6f0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_metrics_deviation.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_metrics_deviation.py
@@ -27,6 +27,10 @@ from mock.mock import patch, MagicMock
 # Local imports
 from stacks.utils.RMFTestCase import *
 
+# set locale for formatted strings from this alert
+import locale
+locale.setlocale(locale.LC_ALL, 'en_US')
+
 COMMON_SERVICES_ALERTS_DIR = "HDFS/2.1.0.2.0/package/alerts"
 
 file_path = os.path.dirname(os.path.abspath(__file__))
@@ -81,7 +85,8 @@ class TestAlertMetricsDeviation(RMFTestCase):
       'appId': 'NAMENODE',
       'minimumValue': 30.0,
       'kerberos.kinit.timer': 14400000L,
-      'metricName': 'metric1'
+      'metricName': 'metric1',
+      'metric.units': 'ms'
     }
   
   def test_missing_configs(self):
@@ -107,28 +112,28 @@ class TestAlertMetricsDeviation(RMFTestCase):
     [status, messages] = alert.execute(configurations=configs, parameters=parameters)
     self.assertEqual(status, RESULT_STATE_OK)
     self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertEquals('No datapoints found above the minimum threshold of 30 seconds',messages[0])
+    self.assertEquals('There were no data points above the minimum threshold of 30 seconds',messages[0])
 
     # Unable to calculate the standard deviation for 1 data point
     response.read.return_value = '{"metrics":[{"metricname":"metric1","metrics":{"1459966360838":40000}}]}'
     [status, messages] = alert.execute(configurations=configs, parameters=parameters)
     self.assertEqual(status, RESULT_STATE_SKIPPED)
     self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertEquals('Unable to calculate the standard deviation for 1 datapoints', messages[0])
+    self.assertEquals('There are not enough data points to calculate the standard deviation
(1 sampled)', messages[0])
 
     # OK
     response.read.return_value = '{"metrics":[{"metricname":"metric1","metrics":{"1459966360838":40000,"1459966370838":50000}}]}'
     [status, messages] = alert.execute(configurations=configs, parameters=parameters)
     self.assertEqual(status, RESULT_STATE_OK)
     self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertTrue('OK. Percentage standard deviation value is' in messages[0])
+    self.assertEquals('The variance for this alert is 7,071ms which is within 100% of the
45,000ms average (45,000ms is the limit)', messages[0])
 
     # Warning
     response.read.return_value = '{"metrics":[{"metricname":"metric1","metrics":{"1459966360838":40000,"1459966370838":1000000}}]}'
     [status, messages] = alert.execute(configurations=configs, parameters=parameters)
     self.assertEqual(status, RESULT_STATE_WARNING)
     self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertTrue('WARNING. Percentage standard deviation' in messages[0])
+    self.assertEquals('The variance for this alert is 678,823ms which is 131% of the 520,000ms
average (520,000ms is the limit)', messages[0])
 
     # HTTP request to AMS failed
     response.read.return_value = ''
@@ -136,11 +141,11 @@ class TestAlertMetricsDeviation(RMFTestCase):
     [status, messages] = alert.execute(configurations=configs, parameters=parameters)
     self.assertEqual(status, RESULT_STATE_UNKNOWN)
     self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertEquals('Unable to retrieve metrics from AMS.', messages[0])
+    self.assertEquals('Unable to retrieve metrics from the Ambari Metrics service.', messages[0])
 
     # Unable to connect to AMS
     conn_mock.side_effect = Exception('Unable to connect to AMS')
     [status, messages] = alert.execute(configurations=configs, parameters=parameters)
     self.assertEqual(status, RESULT_STATE_UNKNOWN)
     self.assertTrue(messages is not None and len(messages) == 1)
-    self.assertEquals('Unable to retrieve metrics from AMS.', messages[0])
+    self.assertEquals('Unable to retrieve metrics from the Ambari Metrics service.', messages[0])


Mime
View raw message