ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nc...@apache.org
Subject [07/14] AMBARI-2677. Merge from branch-1.4.0 (ncole)
Date Fri, 19 Jul 2013 16:31:40 GMT
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..fed4933
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,196 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- ResourceManager -->
+
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+  </property>
+  
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+  </property>
+
+  <property>
+   <name>yarn.resourcemanager.scheduler.class</name>
+   <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>1024</value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>8192</value>
+  </property>
+
+<!-- NodeManager -->
+
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>8192</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+  <description>Classpath for typical applications.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+    setting memory limits for containers. Container allocations are
+    expressed in terms of physical memory, and virtual memory usage
+    is allowed to exceed this allocation by this ratio.
+    </description>
+  </property>
+  
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+  </property>
+ 
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce.shuffle</value>
+    <description>Auxilliary services of NodeManager</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/var/log/hadoop/yarn</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>The interval, in milliseconds, for which the node manager
+    waits  between two cycles of monitoring its containers' memory usage. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value>/etc/hadoop/conf/health_check</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value> 
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>36000</value>
+  </property>
+
+	<property>
+		<name>yarn.resourcemanager.history-store.class</name>
+		<value>org.apache.hadoop.yarn.server.resourcemanager.history.db.RMHistoryDBStore</value>
+	</property>
+
+	<property>
+		<name>yarn.resourcemanager.history-store.db.user</name>
+		<value>mapred</value>
+	</property>
+	
+	<property>
+		<name>yarn.resourcemanager.history-store.db.password</name>
+		<value>mapred</value>
+	</property>
+	
+	<property>
+		<name>yarn.resourcemanager.history-store.db.database</name>
+		<value>jdbc:postgresql:ambarirca</value>
+	</property>
+	
+	<property>
+		<name>yarn.resourcemanager.history-store.db.driver</name>
+		<value>org.postgresql.Driver</value>
+	</property>
+	
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/metainfo.xml
new file mode 100644
index 0000000..4d150d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.3/services/YARN/metainfo.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <user>mapred</user>
+    <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+    <version>2.1.0.2.0.3.0</version>
+    <components>
+        <component>
+            <name>RESOURCEMANAGER</name>
+            <category>MASTER</category>
+        </component>
+        <component>
+            <name>NODEMANAGER</name>
+            <category>SLAVE</category>
+        </component>
+       <component>
+            <name>YARN_CLIENT</name>
+            <category>CLIENT</category>
+        </component>
+    </components>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
index da963b6..431a6a3 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
@@ -31,14 +31,18 @@ ALTER TABLE ambari.hostconfigmapping
 ALTER TABLE ambari.clusterconfigmapping
   ADD COLUMN user_name VARCHAR(255) NOT NULL DEFAULT '_db';
 
-CREATE TABLE ambari.hostconfigmapping (cluster_id bigint NOT NULL, host_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255), create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, host_name, type_name, create_timestamp));
+CREATE TABLE ambari.hostconfigmapping (cluster_id bigint NOT NULL, host_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255), create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, PRIMARY KEY (cluster_id, host_name, type_name, create_timestamp));
 GRANT ALL PRIVILEGES ON TABLE ambari.hostconfigmapping TO :username;
 ALTER TABLE ambari.hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
 ALTER TABLE ambari.hostconfigmapping ADD CONSTRAINT FK_hostconfigmapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
 
 ALTER ROLE :username SET search_path to 'ambari';
 
-ALTER TABLE ambari.stage ADD COLUMN request_context VARCHAR(255);
+ALTER SEQUENCE ambari.host_role_command_task_id_seq INCREMENT BY 50;
+SELECT nextval('ambari.host_role_command_task_id_seq');
+
+ALTER TABLE ambari.stage ADD COLUMN request_context VARCHAR(255);SELECT nextval('ambari.host_role_command_task_id_seq');
+
 
 -- portability changes for MySQL/Oracle support
 alter table ambari.hostcomponentdesiredconfigmapping rename to hcdesiredconfigmapping;
@@ -53,7 +57,7 @@ insert into ambari.ambari_sequences(sequence_name, "value")
   union all
   select 'user_id_seq', nextval('ambari.users_user_id_seq')
   union all
-  select 'host_role_command_id_seq', (select max(task_id) from ambari.host_role_command) + 51;
+  select 'host_role_command_id_seq', nextval('ambari.host_role_command_task_id_seq');
 
 drop sequence ambari.host_role_command_task_id_seq;
 drop sequence ambari.users_user_id_seq;

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 75e03b5..2370013 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -18,33 +18,71 @@
 
 package org.apache.ambari.server.controller;
 
-import com.google.gson.Gson;
-import com.google.gson.reflect.TypeToken;
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import org.apache.ambari.server.*;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Type;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ClusterNotFoundException;
+import org.apache.ambari.server.HostNotFoundException;
+import org.apache.ambari.server.ParentObjectNotFoundException;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.ServiceComponentHostNotFoundException;
+import org.apache.ambari.server.ServiceComponentNotFoundException;
+import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.state.*;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.Predicate;
 import org.easymock.Capture;
 import org.junit.Test;
 
-import java.lang.reflect.Field;
-import java.lang.reflect.Type;
-import java.util.*;
-
-import static org.junit.Assert.*;
-import static org.easymock.EasyMock.*;
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
 
 /**
  * AmbariManagementControllerImpl unit tests
  */
 public class AmbariManagementControllerImplTest {
 
+
+
   @Test
   public void testGetClusters() throws Exception {
     // member state mocks
@@ -1490,282 +1528,277 @@ public class AmbariManagementControllerImplTest {
       }
     });
     injector.getInstance(GuiceJpaInitializer.class);
-    AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = injector.getInstance(Clusters.class);
-    Gson gson = new Gson();
-
-    clusters.addHost("host1");
-    clusters.addHost("host2");
-    clusters.addHost("host3");
-    Host host = clusters.getHost("host1");
-    host.setOsType("centos5");
-    host.persist();
-    host = clusters.getHost("host2");
-    host.setOsType("centos5");
-    host.persist();
-    host = clusters.getHost("host3");
-    host.setOsType("centos5");
-    host.persist();
-
-    ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
-    amc.createCluster(clusterRequest);
-
-    Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-
-    amc.createServices(serviceRequests);
-
-    Type confType = new TypeToken<Map<String, String>>() {
-    }.getType();
-
-    ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1",
-        gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
-    );
-    amc.createConfiguration(configurationRequest);
-
-    configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1",
-        gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
-    );
-    amc.createConfiguration(configurationRequest);
-
-    configurationRequest = new ConfigurationRequest("c1", "global", "version1",
-        gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
-    );
-    amc.createConfiguration(configurationRequest);
-
-
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS",
-        gson.<Map<String, String>>fromJson("{\"core-site\": \"version1\", \"hdfs-site\": \"version1\", \"global\" : \"version1\" }", confType)
-        , null));
-//    serviceRequests.add(new ServiceRequest("c1", "MAPREDUCE",
-//        gson.<Map<String, String>>fromJson("{\"core-site\": \"version1\", \"mapred-site\": \"version1\"}", confType)
-//        , null));
-//    serviceRequests.add(new ServiceRequest("c1", "HBASE",
-//        gson.<Map<String, String>>fromJson("{\"hbase-site\": \"version1\", \"hbase-env\": \"version1\"}", confType)
-//        , null));
-//    serviceRequests.add(new ServiceRequest("c1", "NAGIOS",
-//        gson.<Map<String, String>>fromJson("{\"nagios-global\": \"version2\" }", confType)
-//        , null));
-
-    amc.updateServices(serviceRequests, mapRequestProps, true, false);
-
-
-    Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
-
-    amc.createComponents(serviceComponentRequests);
-
-    Set<HostRequest> hostRequests = new HashSet<HostRequest>();
-    hostRequests.add(new HostRequest("host1", "c1", null));
-    hostRequests.add(new HostRequest("host2", "c1", null));
-    hostRequests.add(new HostRequest("host3", "c1", null));
-
-    amc.createHosts(hostRequests);
-
-    Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
-
-
-    amc.createHostComponents(componentHostRequests);
-
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-    amc.updateServices(serviceRequests, mapRequestProps, true, false);
-
-    Cluster cluster = clusters.getCluster("c1");
-    Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-    assertEquals(1, namenodes.size());
-
-    ServiceComponentHost componentHost = namenodes.get("host1");
-
-    Map<String, ServiceComponentHost> hostComponents = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
-    for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-      ServiceComponentHost cHost = entry.getValue();
-      cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-      cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-    }
-    hostComponents = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-    for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-      ServiceComponentHost cHost = entry.getValue();
-      cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-      cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-    }
-    hostComponents = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
-    for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
-      ServiceComponentHost cHost = entry.getValue();
-      cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-      cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
-    }
-
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
-
-    amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-
-    assertEquals(State.MAINTENANCE, componentHost.getState());
-
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "INSTALLED"));
-
-    amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-
-    assertEquals(State.INSTALLED, componentHost.getState());
-
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
-
-    amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-
-    assertEquals(State.MAINTENANCE, componentHost.getState());
-
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, null));
-
-    amc.createHostComponents(componentHostRequests);
-
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, "INSTALLED"));
-
-    amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
-
-    namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-    assertEquals(2, namenodes.size());
-
-    componentHost = namenodes.get("host2");
-    componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
-    componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis()));
-
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
-
-    RequestStatusResponse response = amc.updateServices(serviceRequests,
-      mapRequestProps, true, false);
-    for (ShortTaskStatus shortTaskStatus : response.getTasks()) {
-      assertFalse("host1".equals(shortTaskStatus.getHostName()) && "NAMENODE".equals(shortTaskStatus.getRole()));
-    }
-
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-
-    amc.deleteHostComponents(componentHostRequests);
-    namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-    assertEquals(1, namenodes.size());
-
-    // testing the behavior for runSmokeTest flag
-    // piggybacking on this test to avoid setting up the mock cluster
-    testRunSmokeTestFlag(mapRequestProps, amc, serviceRequests);
-
-    // should be able to add the host component back
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-    amc.createHostComponents(componentHostRequests);
-    namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-    assertEquals(2, namenodes.size());
     
-    
-    // make unknown
-    ServiceComponentHost sch = null;
-    for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
-      if (tmp.getServiceComponentName().equals("DATANODE")) {
-        tmp.setState(State.UNKNOWN);
-        sch = tmp;
+    try {
+      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+      Clusters clusters = injector.getInstance(Clusters.class);
+      Gson gson = new Gson();
+  
+      clusters.addHost("host1");
+      clusters.addHost("host2");
+      clusters.addHost("host3");
+      Host host = clusters.getHost("host1");
+      host.setOsType("centos5");
+      host.persist();
+      host = clusters.getHost("host2");
+      host.setOsType("centos5");
+      host.persist();
+      host = clusters.getHost("host3");
+      host.setOsType("centos5");
+      host.persist();
+  
+      ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
+      amc.createCluster(clusterRequest);
+  
+      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
+  
+      amc.createServices(serviceRequests);
+  
+      Type confType = new TypeToken<Map<String, String>>() {
+      }.getType();
+  
+      ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1",
+          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+  
+      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1",
+          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+  
+      configurationRequest = new ConfigurationRequest("c1", "global", "version1",
+          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+  
+  
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS",
+          gson.<Map<String, String>>fromJson("{\"core-site\": \"version1\", \"hdfs-site\": \"version1\", \"global\" : \"version1\" }", confType)
+          , null));
+  
+      amc.updateServices(serviceRequests, mapRequestProps, true, false);
+  
+      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
+  
+      amc.createComponents(serviceComponentRequests);
+  
+      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
+      hostRequests.add(new HostRequest("host1", "c1", null));
+      hostRequests.add(new HostRequest("host2", "c1", null));
+      hostRequests.add(new HostRequest("host3", "c1", null));
+  
+      amc.createHosts(hostRequests);
+  
+      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
+  
+  
+      amc.createHostComponents(componentHostRequests);
+  
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
+      amc.updateServices(serviceRequests, mapRequestProps, true, false);
+  
+      Cluster cluster = clusters.getCluster("c1");
+      Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      assertEquals(1, namenodes.size());
+  
+      ServiceComponentHost componentHost = namenodes.get("host1");
+  
+      Map<String, ServiceComponentHost> hostComponents = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
+      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+        ServiceComponentHost cHost = entry.getValue();
+        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
       }
-    }
-    assertNotNull(sch);
-
-    // make maintenance
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, "MAINTENANCE"));
-    amc.updateHostComponents(componentHostRequests, mapRequestProps, false);
-    assertEquals(State.MAINTENANCE, sch.getState ());
-    
-    // confirm delete
-    componentHostRequests.clear();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-    amc.deleteHostComponents(componentHostRequests);
-    
-    sch = null;
-    for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
-      if (tmp.getServiceComponentName().equals("DATANODE")) {
-        sch = tmp;
+      hostComponents = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+        ServiceComponentHost cHost = entry.getValue();
+        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
       }
-    }
-    assertNull(sch);
-    
-    /*
-    *Test remove service
-    */
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
-    amc.updateServices(serviceRequests, mapRequestProps, true, false);
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", null, null, null));
-    assertEquals(1, amc.getServices(serviceRequests).size());
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-    amc.deleteServices(serviceRequests);
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", null, null, null));     
-    assertEquals(0, amc.getServices(serviceRequests).size());
-    
-    /*
-    *Test add service again
-    */
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
-    amc.createServices(serviceRequests);
-    assertEquals(1, amc.getServices(serviceRequests).size());
-    //Create new configs
-    configurationRequest = new ConfigurationRequest("c1", "core-site", "version2",
-        gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
-    );
-    amc.createConfiguration(configurationRequest);
-    configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version2",
-        gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
-    );
-    amc.createConfiguration(configurationRequest);
-    configurationRequest = new ConfigurationRequest("c1", "global", "version2",
-        gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
-    );
-    amc.createConfiguration(configurationRequest);    
-    //Add configs to service
-    serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest("c1", "HDFS",
-        gson.<Map<String, String>>fromJson("{\"core-site\": \"version2\", \"hdfs-site\": \"version2\", \"global\" : \"version2\" }", confType)
-        , null));
-    amc.updateServices(serviceRequests, mapRequestProps, true, false);
-    //Crate service components
-    serviceComponentRequests = new HashSet<ServiceComponentRequest>();
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
-    serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
-    amc.createComponents(serviceComponentRequests);
-    
-    //Create ServiceComponentHosts
-    componentHostRequests = new HashSet<ServiceComponentHostRequest>();
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
-    componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
-    amc.createHostComponents(componentHostRequests);    
-
+      hostComponents = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
+      for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
+        ServiceComponentHost cHost = entry.getValue();
+        cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+      }
+  
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
+  
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+  
+      assertEquals(State.MAINTENANCE, componentHost.getState());
+  
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "INSTALLED"));
+  
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+  
+      assertEquals(State.INSTALLED, componentHost.getState());
+  
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE"));
+  
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+  
+      assertEquals(State.MAINTENANCE, componentHost.getState());
+  
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, null));
+  
+      amc.createHostComponents(componentHostRequests);
+  
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, "INSTALLED"));
+  
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
+  
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      assertEquals(2, namenodes.size());
+  
+      componentHost = namenodes.get("host2");
+      componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));
+      componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis()));
+  
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED"));
+  
+      RequestStatusResponse response = amc.updateServices(serviceRequests,
+        mapRequestProps, true, false);
+      for (ShortTaskStatus shortTaskStatus : response.getTasks()) {
+        assertFalse("host1".equals(shortTaskStatus.getHostName()) && "NAMENODE".equals(shortTaskStatus.getRole()));
+      }
+  
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+  
+      amc.deleteHostComponents(componentHostRequests);
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      assertEquals(1, namenodes.size());
+  
+      // testing the behavior for runSmokeTest flag
+      // piggybacking on this test to avoid setting up the mock cluster
+      testRunSmokeTestFlag(mapRequestProps, amc, serviceRequests);
+  
+      // should be able to add the host component back
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+      amc.createHostComponents(componentHostRequests);
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      assertEquals(2, namenodes.size());
+      
+      
+      // make unknown
+      ServiceComponentHost sch = null;
+      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
+        if (tmp.getServiceComponentName().equals("DATANODE")) {
+          tmp.setState(State.UNKNOWN);
+          sch = tmp;
+        }
+      }
+      assertNotNull(sch);
+  
+      // make maintenance
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, "MAINTENANCE"));
+      amc.updateHostComponents(componentHostRequests, mapRequestProps, false);
+      assertEquals(State.MAINTENANCE, sch.getState ());
+      
+      // confirm delete
+      componentHostRequests.clear();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
+      amc.deleteHostComponents(componentHostRequests);
+      
+      sch = null;
+      for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) {
+        if (tmp.getServiceComponentName().equals("DATANODE")) {
+          sch = tmp;
+        }
+      }
+      assertNull(sch);
     
-    namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-    assertEquals(1, namenodes.size());
-    Map<String, ServiceComponentHost> datanodes = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
-    assertEquals(3, datanodes.size());
-    Map<String, ServiceComponentHost> namenodes2 = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
-    assertEquals(1, namenodes2.size());    
+      /*
+      *Test remove service
+      */
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED"));
+      amc.updateServices(serviceRequests, mapRequestProps, true, false);
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", null, null, null));
+      assertEquals(1, amc.getServices(serviceRequests).size());
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
+      amc.deleteServices(serviceRequests);
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", null, null, null));     
+      assertEquals(0, amc.getServices(serviceRequests).size());
+      
+      /*
+      *Test add service again
+      */
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null));
+      amc.createServices(serviceRequests);
+      assertEquals(1, amc.getServices(serviceRequests).size());
+      //Create new configs
+      configurationRequest = new ConfigurationRequest("c1", "core-site", "version2",
+          gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+      configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version2",
+          gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);
+      configurationRequest = new ConfigurationRequest("c1", "global", "version2",
+          gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
+      );
+      amc.createConfiguration(configurationRequest);    
+      //Add configs to service
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest("c1", "HDFS",
+          gson.<Map<String, String>>fromJson("{\"core-site\": \"version2\", \"hdfs-site\": \"version2\", \"global\" : \"version2\" }", confType)
+          , null));
+      amc.updateServices(serviceRequests, mapRequestProps, true, false);
+      //Crate service components
+      serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null));
+      amc.createComponents(serviceComponentRequests);
+      
+      //Create ServiceComponentHosts
+      componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null));
+      amc.createHostComponents(componentHostRequests);    
+  
+      
+      namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
+      assertEquals(1, namenodes.size());
+      Map<String, ServiceComponentHost> datanodes = cluster.getService("HDFS").getServiceComponent("DATANODE").getServiceComponentHosts();
+      assertEquals(3, datanodes.size());
+      Map<String, ServiceComponentHost> namenodes2 = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts();
+      assertEquals(1, namenodes2.size());
+    } finally {
+      injector.getInstance(PersistService.class).stop();
+    }    
   }
 
   private void testRunSmokeTestFlag(Map<String, String> mapRequestProps,
@@ -1821,5 +1854,146 @@ public class AmbariManagementControllerImplTest {
     assertTrue(smokeTestRequired);
   }
 
+
+  @Test
+  public void testScheduleSmokeTest() throws Exception {
+
+    final String HOST1 = "host1";
+    final String OS_TYPE = "centos5";
+    final String STACK_ID = "HDP-2.0.3";
+    final String CLUSTER_NAME = "c1";
+    final String HDFS_SERVICE_CHECK_ROLE = "HDFS_SERVICE_CHECK";
+    final String MAPREDUCE2_SERVICE_CHECK_ROLE = "MAPREDUCE2_SERVICE_CHECK";
+    final String YARN_SERVICE_CHECK_ROLE = "YARN_SERVICE_CHECK";
+
+    Map<String,String> mapRequestProps = Collections.<String,String>emptyMap();
+    Injector injector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        Properties properties = new Properties();
+        properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory");
+
+        properties.setProperty(Configuration.METADETA_DIR_PATH,
+            "src/main/resources/stacks");
+        properties.setProperty(Configuration.SERVER_VERSION_FILE,
+                "../version");
+        properties.setProperty(Configuration.OS_VERSION_KEY, OS_TYPE);
+        try {
+          install(new ControllerModule(properties));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+    injector.getInstance(GuiceJpaInitializer.class);
+    
+    try {
+      AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+      Clusters clusters = injector.getInstance(Clusters.class);
+  
+      clusters.addHost(HOST1);
+      Host host = clusters.getHost(HOST1);
+      host.setOsType(OS_TYPE);
+      host.persist();
+  
+      ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
+      amc.createCluster(clusterRequest);
+  
+      Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, null));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, null));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, null));
+  
+      amc.createServices(serviceRequests);
+  
+      Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null, null));
+      serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null, null));
+  
+      amc.createComponents(serviceComponentRequests);
+  
+      Set<HostRequest> hostRequests = new HashSet<HostRequest>();
+      hostRequests.add(new HostRequest(HOST1, CLUSTER_NAME, null));
+  
+      amc.createHosts(hostRequests);
+  
+      Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null, null));
+      componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null, null));
+  
+      amc.createHostComponents(componentHostRequests);
+  
+      //Install services
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.INSTALLED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.INSTALLED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.INSTALLED.name()));
+  
+      amc.updateServices(serviceRequests, mapRequestProps, true, false);
+  
+      Cluster cluster = clusters.getCluster(CLUSTER_NAME);
+  
+      for (String serviceName : cluster.getServices().keySet() ) {
+  
+        for(String componentName: cluster.getService(serviceName).getServiceComponents().keySet()) {
+  
+          Map<String, ServiceComponentHost> serviceComponentHosts = cluster.getService(serviceName).getServiceComponent(componentName).getServiceComponentHosts();
+  
+          for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHosts.entrySet()) {
+            ServiceComponentHost cHost = entry.getValue();
+            cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), STACK_ID));
+            cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
+          }
+        }
+      }
+  
+      //Start services
+      serviceRequests.clear();
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null, State.STARTED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null, State.STARTED.name()));
+      serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null, State.STARTED.name()));
+  
+      RequestStatusResponse response = amc.updateServices(serviceRequests,
+        mapRequestProps, true, false);
+  
+      Collection<?> hdfsSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(HDFS_SERVICE_CHECK_ROLE));
+      //Ensure that smoke test task was created for HDFS
+      assertEquals(1, hdfsSmokeTasks.size());
+  
+      Collection<?> mapreduce2SmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(MAPREDUCE2_SERVICE_CHECK_ROLE));
+      //Ensure that smoke test task was created for MAPREDUCE2
+      assertEquals(1, mapreduce2SmokeTasks.size());
+  
+      Collection<?> yarnSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(YARN_SERVICE_CHECK_ROLE));
+      //Ensure that smoke test task was created for YARN
+      assertEquals(1, yarnSmokeTasks.size());
+    } finally {
+      injector.getInstance(PersistService.class).stop();
+    }
+  }
+
+  private class RolePredicate implements Predicate {
+
+    private String role;
+
+    public RolePredicate(String role) {
+      this.role = role;
+    }
+
+    @Override
+    public boolean evaluate(Object obj) {
+      ShortTaskStatus task = (ShortTaskStatus)obj;
+      return task.getRole().equals(role);
+    }
+  }
+
   //todo other resources
 }

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 1b26fb1..7ab7d0b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -36,6 +36,9 @@ import java.util.Set;
 
 import javax.persistence.EntityManager;
 
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
 import junit.framework.Assert;
 
 import org.apache.ambari.server.AmbariException;
@@ -92,10 +95,6 @@ import org.junit.rules.ExpectedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
 public class AmbariManagementControllerTest {
 
   private static final Logger LOG =
@@ -108,7 +107,7 @@ public class AmbariManagementControllerTest {
   private static final String REPO_ID = "HDP-1.1.1.16";
   private static final String PROPERTY_NAME = "hbase.regionserver.msginterval";
   private static final String SERVICE_NAME = "HDFS";
-  private static final int STACK_VERSIONS_CNT = 5;
+  private static final int STACK_VERSIONS_CNT = 6;
   private static final int REPOS_CNT = 3;
   private static final int STACKS_CNT = 1;
   private static final int STACK_SERVICES_CNT = 5 ;
@@ -2655,6 +2654,20 @@ public class AmbariManagementControllerTest {
     Assert.assertNull(stage1.getExecutionCommandWrapper(host2, "DATANODE"));
     Assert.assertNotNull(stage3.getExecutionCommandWrapper(host1, "HBASE_SERVICE_CHECK"));
     Assert.assertNotNull(stage2.getExecutionCommandWrapper(host2, "HDFS_SERVICE_CHECK"));
+    
+    for (Stage s : stages) {
+      for (List<ExecutionCommandWrapper> list : s.getExecutionCommands().values()) {
+        for (ExecutionCommandWrapper ecw : list) {
+          if (ecw.getExecutionCommand().getRole().name().contains("SERVICE_CHECK")) {
+            Map<String, String> hostParams = ecw.getExecutionCommand().getHostLevelParams();
+            Assert.assertNotNull(hostParams);
+            Assert.assertTrue(hostParams.size() > 0);
+            Assert.assertTrue(hostParams.containsKey("stack_version"));
+            Assert.assertEquals(hostParams.get("stack_version"), c1.getDesiredStackVersion().getStackVersion());
+          }
+        }
+      }
+    }
 
     // manually set live state
     sch1.setState(State.STARTED);

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
index f0ceeb0..7fbb1a2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
@@ -23,6 +23,7 @@ import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.TemporalInfo;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.controller.utilities.PropertyHelper.MetricsVersion;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -54,7 +55,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -127,7 +128,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.Host),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.Host, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -170,7 +171,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.Host),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.Host, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -204,7 +205,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -237,7 +238,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -275,7 +276,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -306,7 +307,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -339,7 +340,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -372,7 +373,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,
@@ -405,7 +406,7 @@ public class GangliaPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID,

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
index d8642fc..358e0a5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProviderTest.java
@@ -45,7 +45,7 @@ public class GangliaReportPropertyProviderTest {
     TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
 
     GangliaReportPropertyProvider propertyProvider = new GangliaReportPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.Cluster),
+        PropertyHelper.getGangliaPropertyIds(Resource.Type.Cluster, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         CLUSTER_NAME_PROPERTY_ID);

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java
index 5d4d42b..499a920 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java
@@ -37,14 +37,14 @@ public class AbstractPropertyProviderTest {
 
   @Test
   public void testGetComponentMetrics() {
-    Map<String, Map<String, PropertyInfo>> componentMetrics = PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent);
+    Map<String, Map<String, PropertyInfo>> componentMetrics = PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1);
     AbstractPropertyProvider provider = new TestPropertyProvider(componentMetrics);
     Assert.assertEquals(componentMetrics, provider.getComponentMetrics());
   }
 
   @Test
   public void testGetPropertyInfoMap() {
-    AbstractPropertyProvider provider = new TestPropertyProvider(PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent));
+    AbstractPropertyProvider provider = new TestPropertyProvider(PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1));
 
     // specific property
     Map<String, PropertyInfo> propertyInfoMap = provider.getPropertyInfoMap("NAMENODE", "metrics/cpu/cpu_aidle");

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersioningPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersioningPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersioningPropertyProviderTest.java
new file mode 100644
index 0000000..b881c41
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersioningPropertyProviderTest.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * VersioningPropertyProvider Tests
+ */
+public class VersioningPropertyProviderTest {
+  @Test
+  public void testPopulateResources() throws Exception {
+
+    Map<String, PropertyHelper.MetricsVersion> clusterVersionsMap =
+        new HashMap<String, PropertyHelper.MetricsVersion>();
+
+    clusterVersionsMap.put("c1", PropertyHelper.MetricsVersion.HDP1);
+    clusterVersionsMap.put("c2", PropertyHelper.MetricsVersion.HDP2);
+
+    Map<PropertyHelper.MetricsVersion, AbstractPropertyProvider> providers =
+        new HashMap<PropertyHelper.MetricsVersion, AbstractPropertyProvider>();
+
+    TestJMXPropertyProvider propertyProvider1 = new TestJMXPropertyProvider(
+        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1),
+        PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+        PropertyHelper.getPropertyId("HostRoles", "host_name"),
+        PropertyHelper.getPropertyId("HostRoles", "component_name"),
+        PropertyHelper.getPropertyId("HostRoles", "state"),
+        Collections.singleton("STARTED"));
+    providers.put(PropertyHelper.MetricsVersion.HDP1, propertyProvider1);
+
+
+    TestJMXPropertyProvider propertyProvider2 = new TestJMXPropertyProvider(
+        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP2),
+        PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+        PropertyHelper.getPropertyId("HostRoles", "host_name"),
+        PropertyHelper.getPropertyId("HostRoles", "component_name"),
+        PropertyHelper.getPropertyId("HostRoles", "state"),
+        Collections.singleton("STARTED"));
+
+    providers.put(PropertyHelper.MetricsVersion.HDP2, propertyProvider2);
+
+
+    VersioningPropertyProvider provider = new VersioningPropertyProvider(clusterVersionsMap, providers, PropertyHelper.getPropertyId("HostRoles", "cluster_name"));
+
+
+    Request request = PropertyHelper.getReadRequest();
+
+    Resource resource1 = new ResourceImpl(Resource.Type.HostComponent);
+    resource1.setProperty(PropertyHelper.getPropertyId("HostRoles", "cluster_name"), "c1");
+
+
+    provider.populateResources(Collections.singleton(resource1), request, null);
+
+    Assert.assertEquals(resource1, propertyProvider1.getResource());
+    Assert.assertNull(propertyProvider2.getResource());
+
+    propertyProvider1.setResource(null);
+    propertyProvider2.setResource(null);
+
+    Resource resource2 = new ResourceImpl(Resource.Type.HostComponent);
+    resource2.setProperty(PropertyHelper.getPropertyId("HostRoles", "cluster_name"), "c2");
+
+    provider.populateResources(Collections.singleton(resource2), request, null);
+
+    Assert.assertNull(propertyProvider1.getResource());
+    Assert.assertEquals(resource2, propertyProvider2.getResource());
+
+    propertyProvider1.setResource(null);
+    propertyProvider2.setResource(null);
+
+    Set<Resource> resources = new HashSet<Resource>();
+    resources.add(resource1);
+    resources.add(resource2);
+
+    provider.populateResources(resources, request, null);
+
+    Assert.assertEquals(resource1, propertyProvider1.getResource());
+    Assert.assertEquals(resource2, propertyProvider2.getResource());
+  }
+
+  private class TestJMXPropertyProvider extends JMXPropertyProvider {
+
+    private Resource resource = null;
+
+
+    public TestJMXPropertyProvider(Map<String, Map<String, PropertyInfo>> componentMetrics,
+                                   String clusterNamePropertyId,
+                                   String hostNamePropertyId,
+                                   String componentNamePropertyId,
+                                   String statePropertyId,
+                                   Set<String> healthyStates) {
+
+      super(componentMetrics, null, null, clusterNamePropertyId, hostNamePropertyId,
+          componentNamePropertyId, statePropertyId, healthyStates);
+    }
+
+    public Resource getResource() {
+      return resource;
+    }
+
+    public void setResource(Resource resource) {
+      this.resource = resource;
+    }
+
+    @Override
+    public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate) throws SystemException {
+
+      if (resources.size() == 1) {
+        resource = resources.iterator().next();
+      }
+      return resources;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java
index 842078b..b2083db 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java
@@ -47,7 +47,7 @@ public class JMXPropertyProviderTest {
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
-        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
@@ -234,7 +234,7 @@ public class JMXPropertyProviderTest {
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
-        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
@@ -266,12 +266,49 @@ public class JMXPropertyProviderTest {
   }
 
   @Test
+  public void testPopulateResources_HDP2() throws Exception {
+    TestStreamProvider  streamProvider = new TestStreamProvider();
+    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
+
+    JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
+        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP2),
+        streamProvider,
+        hostProvider,
+        PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+        PropertyHelper.getPropertyId("HostRoles", "host_name"),
+        PropertyHelper.getPropertyId("HostRoles", "component_name"),
+        PropertyHelper.getPropertyId("HostRoles", "state"),
+        Collections.singleton("STARTED"));
+
+    // namenode
+    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
+
+    resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-0e-34-e1.compute-1.internal");
+    resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "RESOURCEMANAGER");
+    resource.setProperty(HOST_COMPONENT_STATE_PROPERTY_ID, "STARTED");
+
+    // request with an empty set should get all supported properties
+    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
+
+    Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
+
+    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-0e-34-e1.compute-1.internal", "8088"), streamProvider.getLastSpec());
+
+    // see test/resources/resourcemanager_jmx.json for values
+    Assert.assertEquals(6,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue", "AggregateContainersAllocated")));
+    Assert.assertEquals(6,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue", "AggregateContainersReleased")));
+    Assert.assertEquals(8192,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue", "AvailableMB")));
+    Assert.assertEquals(1,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue", "AvailableVCores")));
+    Assert.assertEquals(2,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/yarn/Queue", "AppsSubmitted")));
+  }
+
+    @Test
   public void testPopulateResourcesUnhealthyResource() throws Exception {
     TestStreamProvider  streamProvider = new TestStreamProvider();
     TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
-        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
@@ -304,7 +341,7 @@ public class JMXPropertyProviderTest {
     Set<Resource> resources = new HashSet<Resource>();
 
     JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
-        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
+        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1),
         streamProvider,
         hostProvider,
         PropertyHelper.getPropertyId("HostRoles", "cluster_name"),

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java
index e99caff..a05ddcc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java
@@ -35,6 +35,7 @@ public class TestStreamProvider implements StreamProvider {
     FILE_MAPPING.put("50030", "mapreduce_jobtracker_jmx.json");
     FILE_MAPPING.put("50060", "mapreduce_tasktracker_jmx.json");
     FILE_MAPPING.put("60010", "hbase_hbasemaster_jmx.json");
+    FILE_MAPPING.put("8088",  "resourcemanager_jmx.json");
   }
 
   /**
@@ -75,7 +76,9 @@ public class TestStreamProvider implements StreamProvider {
   }
 
   private String getPort(String spec) {
-    int n = spec.indexOf(":", 5);
-    return spec.substring(n + 1, n + 6);
+    int colonIndex = spec.indexOf(":", 5);
+    int slashIndex = spec.indexOf("/", colonIndex);
+
+    return spec.substring(colonIndex + 1, slashIndex);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java
index 8217f88..8879268 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java
@@ -17,9 +17,13 @@
  */
 package org.apache.ambari.server.controller.utilities;
 
+import org.apache.ambari.server.controller.internal.PropertyInfo;
+import org.apache.ambari.server.controller.spi.Resource;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.util.Map;
+
 
 /**
  * Property helper tests.
@@ -40,5 +44,29 @@ public class PropertyHelperTest {
     Assert.assertEquals("cat/sub/foo", PropertyHelper.getPropertyId("cat/sub", "foo"));
     Assert.assertEquals("cat/sub/foo", PropertyHelper.getPropertyId("cat/sub", "foo/"));
   }
+
+  @Test
+  public void testGetJMXPropertyIds() {
+
+    //version 1
+    Map<String, Map<String, PropertyInfo>> metrics = PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP1);
+    Map<String, PropertyInfo> componentMetrics = metrics.get("HISTORYSERVER");
+    Assert.assertNull(componentMetrics);
+    componentMetrics = metrics.get("NAMENODE");
+    Assert.assertNotNull(componentMetrics);
+    PropertyInfo info = componentMetrics.get("metrics/jvm/memHeapUsedM");
+    Assert.assertNotNull(info);
+    Assert.assertEquals("Hadoop:service=NameNode,name=jvm.memHeapUsedM", info.getPropertyId());
+
+    //version 2
+    metrics = PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent, PropertyHelper.MetricsVersion.HDP2);
+    componentMetrics = metrics.get("HISTORYSERVER");
+    Assert.assertNotNull(componentMetrics);
+    componentMetrics = metrics.get("NAMENODE");
+    Assert.assertNotNull(componentMetrics);
+    info = componentMetrics.get("metrics/jvm/memHeapUsedM");
+    Assert.assertNotNull(info);
+    Assert.assertEquals("Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM", info.getPropertyId());
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/a718fc45/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
index 15e27ad..01137d2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
@@ -189,5 +189,15 @@ public class RoleGraphTest {
     RoleGraphNode hdfs_service_check = new RoleGraphNode(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE);
     RoleGraphNode snamenode_start = new RoleGraphNode(Role.SECONDARY_NAMENODE, RoleCommand.START);
     Assert.assertEquals(-1, rco.order(snamenode_start, hdfs_service_check));
+    
+    RoleGraphNode mapred2_service_check = new RoleGraphNode(Role.MAPREDUCE2_SERVICE_CHECK, RoleCommand.EXECUTE);
+    RoleGraphNode rm_start = new RoleGraphNode(Role.RESOURCEMANAGER, RoleCommand.START);
+    RoleGraphNode nm_start = new RoleGraphNode(Role.NODEMANAGER, RoleCommand.START);
+    RoleGraphNode hs_start = new RoleGraphNode(Role.HISTORYSERVER, RoleCommand.START);
+    Assert.assertEquals(-1, rco.order(rm_start, mapred2_service_check));
+    Assert.assertEquals(-1, rco.order(nm_start, mapred2_service_check)); 
+    Assert.assertEquals(-1, rco.order(hs_start, mapred2_service_check));
+    Assert.assertEquals(-1, rco.order(hs_start, mapred2_service_check));
+    Assert.assertEquals(1, rco.order(nm_start, rm_start));
   }
 }


Mime
View raw message