incubator-ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From smoha...@apache.org
Subject [1/2] AMBARI 3731. Custom Action: Add support for custom action execution
Date Thu, 14 Nov 2013 06:30:52 GMT
Updated Branches:
  refs/heads/trunk 708e59d9b -> 22f5fdfb7


http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/22f5fdfb/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index d6523b0..a9c73bd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -188,7 +188,7 @@ public class StageUtils {
 
   public static Map<String, List<String>> getClusterHostInfo(
       Map<String, Host> allHosts, Cluster cluster, HostsMap hostsMap,
-      Injector injector) throws AmbariException {
+      Configuration configuration) throws AmbariException {
     Map<String, List<String>> info = new HashMap<String, List<String>>();
     if (cluster.getServices() != null) {
       String hostName = getHostName();
@@ -213,8 +213,7 @@ public class StageUtils {
               info.put(clusterInfoKey, hostList);
             }
             //Set up ambari-rca connection properties, is this a hack?
-//            info.put("ambari_db_server_host", Arrays.asList(hostsMap.getHostMap(getHostName())));
-            Configuration configuration = injector.getInstance(Configuration.class);
+            //info.put("ambari_db_server_host", Arrays.asList(hostsMap.getHostMap(getHostName())));
             String url = configuration.getRcaDatabaseUrl();
             if (url.contains(Configuration.HOSTNAME_MACRO)) {
               url = url.replace(Configuration.HOSTNAME_MACRO, hostsMap.getHostMap(hostName));

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/22f5fdfb/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
index c016d96..96d5956 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
@@ -132,6 +132,10 @@ ALTER TABLE ambari.confgroupclusterconfigmapping ADD CONSTRAINT FK_confgroupclus
 ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_configgroup_id
FOREIGN KEY (config_group_id) REFERENCES ambari.configgroup (group_id);
 ALTER TABLE ambari.configgrouphostmapping ADD CONSTRAINT FK_configgrouphostmapping_host_name
FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
 
+-- required for custom action
+CREATE TABLE ambari.action (action_name VARCHAR(255) NOT NULL, action_type VARCHAR(32) NOT
NULL, inputs VARCHAR(1000),
+target_service VARCHAR(255), target_component VARCHAR(255), default_timeout SMALLINT NOT
NULL, description VARCHAR(1000), target_type VARCHAR(32), PRIMARY KEY (action_name));
+GRANT ALL PRIVILEGES ON TABLE ambari.action TO :username;
 
 --Move cluster host info for old execution commands to stage table
 UPDATE ambari.stage sd

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/22f5fdfb/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
index b0bd728..8c7d839 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
@@ -28,6 +28,7 @@ import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.controller.ExecuteActionRequest;
 import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
@@ -57,9 +58,11 @@ public class TestActionDBAccessorImpl {
   private long stageId = 31;
   private String hostName = "host1";
   private String clusterName = "cluster1";
+  private String actionName = "validate_kerberos";
   private Injector injector;
   ActionDBAccessor db;
   ActionManager am;
+  CustomActionDBAccessor cdb;
 
   @Inject
   private Clusters clusters;
@@ -77,9 +80,10 @@ public class TestActionDBAccessorImpl {
     clusters.getHost(hostName).persist();
     clusters.addCluster(clusterName);
     db = injector.getInstance(ActionDBAccessorImpl.class);
-    
+    cdb = injector.getInstance(CustomActionDBAccessor.class);
+
     am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
-        new HostsMap((String) null), null, injector.getInstance(UnitOfWork.class), null);
+        new HostsMap((String) null), null, injector.getInstance(UnitOfWork.class), cdb);
   }
 
   @After
@@ -150,26 +154,26 @@ public class TestActionDBAccessorImpl {
   @Test
   public void testHostRoleScheduled() throws InterruptedException {
     populateActionDB(db, hostName, requestId, stageId);
-    Stage stage = db.getAction(StageUtils.getActionId(requestId, stageId));
+    Stage stage = db.getStage(StageUtils.getActionId(requestId, stageId));
     assertEquals(HostRoleStatus.PENDING, stage.getHostRoleStatus(hostName, Role.HBASE_MASTER.toString()));
     List<HostRoleCommandEntity> entities=
-        hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
+        hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER.toString());
 
     assertEquals(HostRoleStatus.PENDING, entities.get(0).getStatus());
     stage.setHostRoleStatus(hostName, Role.HBASE_MASTER.toString(), HostRoleStatus.QUEUED);
 
-    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
+    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER.toString());
     assertEquals(HostRoleStatus.QUEUED, stage.getHostRoleStatus(hostName, Role.HBASE_MASTER.toString()));
     assertEquals(HostRoleStatus.PENDING, entities.get(0).getStatus());
     db.hostRoleScheduled(stage, hostName, Role.HBASE_MASTER.toString());
 
-    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
+    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER.toString());
     assertEquals(HostRoleStatus.QUEUED, entities.get(0).getStatus());
 
     Thread thread = new Thread(){
       @Override
       public void run() {
-        Stage stage1 = db.getAction("23-31");
+        Stage stage1 = db.getStage("23-31");
         stage1.setHostRoleStatus(hostName, Role.HBASE_MASTER.toString(), HostRoleStatus.COMPLETED);
         db.hostRoleScheduled(stage1, hostName, Role.HBASE_MASTER.toString());
       }
@@ -178,9 +182,47 @@ public class TestActionDBAccessorImpl {
     thread.start();
     thread.join();
 
-    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
+    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER.toString());
     assertEquals("Concurrent update failed", HostRoleStatus.COMPLETED, entities.get(0).getStatus());
+  }
+
+  @Test
+  public void testCustomActionScheduled() throws InterruptedException {
+    populateActionDBWithCustomAction(db, hostName, requestId, stageId);
+    Stage stage = db.getStage(StageUtils.getActionId(requestId, stageId));
+    assertEquals(HostRoleStatus.PENDING, stage.getHostRoleStatus(hostName, actionName));
+    List<HostRoleCommandEntity> entities =
+        hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, actionName);
+
+    assertEquals(HostRoleStatus.PENDING, entities.get(0).getStatus());
+    stage.setHostRoleStatus(hostName, actionName, HostRoleStatus.QUEUED);
+
+    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, actionName);
+    assertEquals(HostRoleStatus.QUEUED, stage.getHostRoleStatus(hostName, actionName));
+    assertEquals(HostRoleStatus.PENDING, entities.get(0).getStatus());
+
+    long now = System.currentTimeMillis();
+    db.hostRoleScheduled(stage, hostName, actionName);
+
+    entities = hostRoleCommandDAO.findByHostRole(
+        hostName, requestId, stageId, actionName);
+    assertEquals(HostRoleStatus.QUEUED, entities.get(0).getStatus());
+
+
+    Thread thread = new Thread() {
+      @Override
+      public void run() {
+        Stage stage1 = db.getStage("23-31");
+        stage1.setHostRoleStatus(hostName, actionName, HostRoleStatus.COMPLETED);
+        db.hostRoleScheduled(stage1, hostName, actionName);
+      }
+    };
+
+    thread.start();
+    thread.join();
 
+    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, actionName);
+    assertEquals("Concurrent update failed", HostRoleStatus.COMPLETED, entities.get(0).getStatus());
   }
 
   @Test
@@ -199,7 +241,8 @@ public class TestActionDBAccessorImpl {
     commandReport.setExitCode(123);
     db.updateHostRoleState(hostName, requestId, stageId, Role.HBASE_MASTER.toString(), commandReport);
 
-    List<HostRoleCommandEntity> commandEntities = hostRoleCommandDAO.findByHostRole(hostName,
requestId, stageId, Role.HBASE_MASTER);
+    List<HostRoleCommandEntity> commandEntities =
+        hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER.toString());
     assertEquals(1, commandEntities.size());
     HostRoleCommandEntity commandEntity = commandEntities.get(0);
     HostRoleCommand command = db.getTask(commandEntity.getTaskId());
@@ -297,4 +340,19 @@ public class TestActionDBAccessorImpl {
     stages.add(s);
     db.persistActions(stages);
   }
+
+  private void populateActionDBWithCustomAction(ActionDBAccessor db, String hostname,
+                                long requestId, long stageId) {
+    Stage s = new Stage(requestId, "/a/b", "cluster1", "action db accessor test", "");
+    s.setStageId(stageId);
+    s.addHostRoleExecutionCommand(hostname, Role.valueOf(actionName),
+        RoleCommand.ACTIONEXECUTE,
+        new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
+            hostname, System.currentTimeMillis()), "cluster1", "HBASE");
+    List<Stage> stages = new ArrayList<Stage>();
+    stages.add(s);
+    ExecuteActionRequest request = new ExecuteActionRequest("cluster1", null, actionName,
"HBASE",
+        "HBASE_MASTER", null, null);
+    db.persistActions(stages);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/22f5fdfb/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
index d465b59..f5ffe8b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
@@ -175,7 +175,7 @@ public class TestActionManager {
     populateActionDB(db, hostname);
     assertEquals(1, clusters.getClusters().size());
 
-    Cluster cluster = clusters.getCluster(clusterName);
+    clusters.getCluster(clusterName);
 
     assertEquals(1, am.getRequests().size());
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/22f5fdfb/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 3e8448c..2af7eef 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -37,10 +37,12 @@ import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.StackAccessException;
 import org.apache.ambari.server.actionmanager.ActionDBAccessor;
+import org.apache.ambari.server.actionmanager.ActionType;
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -2052,13 +2054,9 @@ public class AmbariManagementControllerTest {
 
   @Test
   public void testGetServiceComponentHosts() throws AmbariException {
-    clusters.addCluster("c1");
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.addHost("h1");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.mapHostToCluster("h1", "c1");
+    Cluster c1 = setupClusterWithHosts("c1", "HDP-0.1", new ArrayList<String>() {{
+      add("h1");
+    }}, "centos5");
     Service s1 = serviceFactory.createNew(c1, "HDFS");
     c1.addService(s1);
     s1.persist();
@@ -2105,24 +2103,29 @@ public class AmbariManagementControllerTest {
 
   }
 
+  private Cluster setupClusterWithHosts(String clusterName, String stackId, List<String>
hosts,
+                             String osType) throws AmbariException {
+    clusters.addCluster(clusterName);
+    Cluster c1 = clusters.getCluster(clusterName);
+    c1.setDesiredStackVersion(new StackId(stackId));
+    for (String host : hosts) {
+      clusters.addHost(host);
+      clusters.getHost(host).setOsType(osType);
+      clusters.getHost(host).persist();
+      clusters.mapHostToCluster(host, clusterName);
+    }
+    return c1;
+  }
+
   @Test
   public void testGetServiceComponentHostsWithFilters() throws AmbariException {
-    clusters.addCluster("c1");
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-
-    clusters.addHost("h1");
-    clusters.addHost("h2");
-    clusters.addHost("h3");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h3").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-    clusters.getHost("h3").persist();
-    clusters.mapHostToCluster("h1", "c1");
-    clusters.mapHostToCluster("h2", "c1");
-    clusters.mapHostToCluster("h3", "c1");
+    Cluster c1 = setupClusterWithHosts("c1", "HDP-0.2",
+        new ArrayList<String>() {{
+          add("h1");
+          add("h2");
+          add("h3");
+        }},
+        "centos5");
 
     Service s1 = serviceFactory.createNew(c1, "HDFS");
     Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
@@ -2279,25 +2282,21 @@ public class AmbariManagementControllerTest {
 
   @Test
   public void testGetHosts() throws AmbariException {
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-    clusters.getCluster("c1").setDesiredStackVersion(new StackId("HDP-0.2"));
-    clusters.getCluster("c2").setDesiredStackVersion(new StackId("HDP-0.2"));
-    clusters.addHost("h1");
-    clusters.addHost("h2");
-    clusters.addHost("h3");
+    setupClusterWithHosts("c1", "HDP-0.2",
+        new ArrayList<String>() {{
+          add("h1");
+          add("h2");
+        }},
+        "centos5");
+
+    setupClusterWithHosts("c2", "HDP-0.2",
+        new ArrayList<String>() {{
+          add("h3");
+        }},
+        "centos5");
     clusters.addHost("h4");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h3").setOsType("centos5");
     clusters.getHost("h4").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-    clusters.getHost("h3").persist();
     clusters.getHost("h4").persist();
-    clusters.mapHostToCluster("h1", "c1");
-    clusters.mapHostToCluster("h2", "c1");
-    clusters.mapHostToCluster("h3", "c2");
 
     Map<String, String> attrs = new HashMap<String, String>();
     attrs.put("a1", "b1");
@@ -3105,6 +3104,7 @@ public class AmbariManagementControllerTest {
     Assert.assertNull(trackAction);
   }
 
+  @Ignore
   @Test
   public void testServiceComponentHostUpdateStackId() throws AmbariException {
     String clusterName = "foo1";
@@ -3264,6 +3264,7 @@ public class AmbariManagementControllerTest {
     }
   }
 
+  @Ignore
   @Test
   public void testServiceComponentHostUpdateStackIdError() throws AmbariException {
     String clusterName = "foo1";
@@ -3442,19 +3443,124 @@ public class AmbariManagementControllerTest {
     // start should fail
   }
 
-  @SuppressWarnings("serial")
   @Test
-  public void testCreateActionsFailures() throws Exception {
-    clusters.addCluster("c1");
-    clusters.getCluster("c1").setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.addHost("h1");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    Set<String> hostNames = new HashSet<String>(){{
-      add("h1");
+  public void testCreateCustomActions() throws Exception {
+    setupClusterWithHosts("c1", "HDP-2.0.6",
+        new ArrayList<String>() {{
+          add("h1");
+          add("h2");
+          add("h3");
+        }},
+        "centos6");
+
+    Cluster cluster = clusters.getCluster("c1");
+    cluster.setDesiredStackVersion(new StackId("HDP-2.0.6"));
+    cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
+
+    ConfigFactory cf = injector.getInstance(ConfigFactory.class);
+    Config config1 = cf.createNew(cluster, "global",
+        new HashMap<String, String>() {{
+          put("key1", "value1");
+        }});
+    config1.setVersionTag("version1");
+
+    Config config2 = cf.createNew(cluster, "core-site",
+        new HashMap<String, String>() {{
+          put("key1", "value1");
+        }});
+    config2.setVersionTag("version1");
+
+    cluster.addConfig(config1);
+    cluster.addConfig(config2);
+
+    Service hdfs = cluster.addService("HDFS");
+    hdfs.persist();
+
+    Service mapred = cluster.addService("YARN");
+    mapred.persist();
+
+    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
+    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
+    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
+
+    mapred.addServiceComponent(Role.RESOURCEMANAGER.name()).persist();
+
+    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost("h1").persist();
+    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost("h1").persist();
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost("h1").persist();
+    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost("h2").persist();
+
+    controller.getActionManager().createActionDefinition(
+        "a1", ActionType.SYSTEM, "test", "Does file exist", "", "",
+        TargetHostType.SPECIFIC, Short.valueOf("100"));
+
+    controller.getActionManager().createActionDefinition(
+        "a2", ActionType.SYSTEM, "", "Does file exist", "HDFS", "DATANODE",
+        TargetHostType.ALL, Short.valueOf("100"));
+
+    Map<String, String> params = new HashMap<String, String>() {{
+      put("test", "test");
     }};
 
-    clusters.mapHostsToCluster(hostNames, "c1");
+    Map<String, String> requestProperties = new HashMap<String, String>();
+    requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
+    long now = System.currentTimeMillis();
+
+    ArrayList<String> hosts = new ArrayList<String>() {{add("h1");}};
+
+    ExecuteActionRequest actionRequest = new ExecuteActionRequest("c1", null, "a1", "HDFS",
"DATANODE", hosts, params);
+    RequestStatusResponse response = controller.createAction(actionRequest, requestProperties);
+    assertEquals(1, response.getTasks().size());
+    ShortTaskStatus taskStatus = response.getTasks().get(0);
+    Assert.assertEquals("h1", taskStatus.getHostName());
+
+    List<HostRoleCommand> storedTasks = actionDB.getRequestTasks(response.getRequestId());
+    Stage stage = actionDB.getAllStages(response.getRequestId()).get(0);
+    Assert.assertNotNull(stage);
+    Assert.assertEquals(1, storedTasks.size());
+    HostRoleCommand task = storedTasks.get(0);
+    Assert.assertEquals(RoleCommand.ACTIONEXECUTE, task.getRoleCommand());
+    Assert.assertEquals("a1", task.getRole().name());
+    Assert.assertEquals("h1", task.getHostName());
+    ExecutionCommand cmd = task.getExecutionCommandWrapper().getExecutionCommand();
+    Assert.assertTrue(cmd.getCommandParams().containsKey("test"));
+    Assert.assertEquals(cmd.getServiceName(), "HDFS");
+    Assert.assertEquals(cmd.getComponentName(), "DATANODE");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a2", "", "", null, params);
+    response = controller.createAction(actionRequest, requestProperties);
+    assertEquals(2, response.getTasks().size());
+
+    final List<HostRoleCommand> storedTasks2 = actionDB.getRequestTasks(response.getRequestId());
+    task = storedTasks2.get(1);
+    Assert.assertEquals(RoleCommand.ACTIONEXECUTE, task.getRoleCommand());
+    Assert.assertEquals("a2", task.getRole().name());
+    HashSet<String> expectedHosts = new HashSet<String>(){{add("h2"); add("h1");}};
+    HashSet<String> actualHosts = new HashSet<String>(){{add(storedTasks2.get(1).getHostName());
add(storedTasks2
+        .get(0).getHostName());}};
+    Assert.assertEquals(expectedHosts, actualHosts);
+
+    cmd = task.getExecutionCommandWrapper().getExecutionCommand();
+    Assert.assertTrue(cmd.getCommandParams().containsKey("test"));
+    Assert.assertEquals(cmd.getServiceName(), "HDFS");
+    Assert.assertEquals(cmd.getComponentName(), "DATANODE");
+
+    hosts = new ArrayList<String>() {{add("h3");}};
+    actionRequest = new ExecuteActionRequest("c1", null, "a1", "", "", hosts, params);
+    response = controller.createAction(actionRequest, requestProperties);
+    assertEquals(1, response.getTasks().size());
+    taskStatus = response.getTasks().get(0);
+    Assert.assertEquals("h3", taskStatus.getHostName());
+  }
+
+  @SuppressWarnings("serial")
+  @Test
+  public void testCreateActionsFailures() throws Exception {
+    setupClusterWithHosts("c1", "HDP-0.1",
+        new ArrayList<String>() {{
+          add("h1");
+        }},
+        "centos5");
 
     Cluster cluster = clusters.getCluster("c1");
     cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
@@ -3462,11 +3568,15 @@ public class AmbariManagementControllerTest {
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config1 = cf.createNew(cluster, "global",
-        new HashMap<String, String>(){{ put("key1", "value1"); }});
+        new HashMap<String, String>() {{
+          put("key1", "value1");
+        }});
     config1.setVersionTag("version1");
 
     Config config2 = cf.createNew(cluster, "core-site",
-        new HashMap<String, String>(){{ put("key1", "value1"); }});
+        new HashMap<String, String>() {{
+          put("key1", "value1");
+        }});
     config2.setVersionTag("version1");
 
     cluster.addConfig(config1);
@@ -3477,10 +3587,15 @@ public class AmbariManagementControllerTest {
     Service hdfs = cluster.addService("HDFS");
     hdfs.persist();
 
+    Service mapred = cluster.addService("MAPREDUCE");
+    mapred.persist();
+
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
     hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
     hdfs.addServiceComponent(Role.DATANODE.name()).persist();
 
+    mapred.addServiceComponent(Role.MAPREDUCE_CLIENT.name()).persist();
+
     hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost("h1").persist();
     hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost("h1").persist();
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost("h1").persist();
@@ -3493,74 +3608,114 @@ public class AmbariManagementControllerTest {
     Map<String, String> requestProperties = new HashMap<String, String>();
     requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
 
-    try {
-      controller.createAction(actionRequest, requestProperties);
-      Assert.fail("createAction should fail for NON_EXISTENT_CHECK");
-    } catch (AmbariException ex) {
-      Assert.assertTrue(ex.getMessage().contains("Unsupported action"));
-    }
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties, "Unsupported action");
 
     actionRequest = new ExecuteActionRequest("c1", "NON_EXISTENT_SERVICE_CHECK", "HDFS",
params);
-    try {
-      controller.createAction(actionRequest, requestProperties);
-      Assert.fail("createAction should fail for NON_EXISTENT_SERVICE_CHECK");
-    } catch (AmbariException ex) {
-      Assert.assertTrue(ex.getMessage().contains("Unsupported action"));
-    }
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Unsupported action");
 
     actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION_DATANODE", "MAPREDUCE",
params);
-    try {
-      controller.createAction(actionRequest, requestProperties);
-      Assert.fail("createAction should fail for DECOMMISSION_DATANODE on MAPREDUCE");
-    } catch (AmbariException ex) {
-      Assert.assertTrue(ex.getMessage().contains("Unsupported action DECOMMISSION_DATANODE
for MAPREDUCE"));
-    }
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Unsupported action DECOMMISSION_DATANODE for MAPREDUCE");
 
     actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION_DATANODE", "HDFS", params);
-    try {
-      controller.createAction(actionRequest, requestProperties);
-      Assert.fail("createAction should fail for DECOMMISSION_DATANODE on HDFS - no excludeFileTag");
-    } catch (IllegalArgumentException ex) {
-      Assert.assertTrue(ex.getMessage().contains("No exclude file specified when decommissioning
datanodes"));
-    }
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "No exclude file specified when decommissioning datanodes");
 
     params.put("excludeFileTag", "tag1");
     actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION_DATANODE", "HDFS", params);
-    try {
-      controller.createAction(actionRequest, requestProperties);
-      Assert.fail("createAction should fail for DECOMMISSION_DATANODE on HDFS - no config
type hdfs-exclude-file");
-    } catch (AmbariException ex) {
-      Assert.assertTrue(ex.getMessage().contains("Decommissioning datanodes requires the
cluster"));
-    }
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Decommissioning datanodes requires the cluster");
 
     actionRequest = new ExecuteActionRequest("c1", null, "DECOMMISSION_DATANODE", "HDFS",
null, null, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action DECOMMISSION_DATANODE does not exist");
+
+    controller.getActionManager().createActionDefinition(
+        "a1", ActionType.SYSTEM, "test,dirName", "Does file exist", "", "",
+        TargetHostType.SPECIFIC, Short.valueOf("100"));
+
+    controller.getActionManager().createActionDefinition(
+        "a2", ActionType.SYSTEM, "", "Does file exist", "HDFS", "DATANODE",
+        TargetHostType.ANY, Short.valueOf("100"));
+
+    controller.getActionManager().createActionDefinition(
+        "a3", ActionType.SYSTEM, "", "Does file exist", "YARN", "NODEMANAGER",
+        TargetHostType.ANY, Short.valueOf("100"));
+
+    controller.getActionManager().createActionDefinition(
+        "a4", ActionType.SYSTEM, "", "Does file exist", "MAPREDUCE", "",
+        TargetHostType.ANY, Short.valueOf("100"));
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a1", null, null, null, null);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a1 requires input 'test' that is not provided");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a1", null, null, null, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a1 requires input 'dirName' that is not provided");
+
+    params.put("dirName", "dirName");
+    actionRequest = new ExecuteActionRequest("c1", null, "a1", null, null, null, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a1 requires explicit target host(s)");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a2", "MAPREDUCE", null, null, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a2 targets service MAPREDUCE that does not match with expected HDFS");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a2", "HDFS", "HDFS_CLIENT", null,
params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a2 targets component HDFS_CLIENT that does not match with expected DATANODE");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a1", "HDFS2", "HDFS_CLIENT", null,
params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a1 targets service HDFS2 that does not exist");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a1", "HDFS", "HDFS_CLIENT2", null,
params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a1 targets component HDFS_CLIENT2 that does not exist");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a1", "", "HDFS_CLIENT2", null,
params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a1 targets component HDFS_CLIENT2 without specifying the target service");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a3", "", "", null, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Action a3 targets service YARN that does not exist");
+
+    List<String> hosts = new ArrayList<String>();
+    hosts.add("h6");
+    actionRequest = new ExecuteActionRequest("c1", null, "a2", "", "", hosts, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Request specifies host h6 but its not a valid host based on the target service=HDFS
and component=DATANODE");
+
+    actionRequest = new ExecuteActionRequest("c1", null, "a4", "MAPREDUCE", "", null, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Suitable hosts not found, component=, service=MAPREDUCE, cluster=c1, actionName=a4");
+
+  }
+
+  private void expectActionCreationErrorWithMessage(ExecuteActionRequest actionRequest,
+                                                    Map<String, String> requestProperties,
String message) {
     try {
       RequestStatusResponse response = controller.createAction(actionRequest, requestProperties);
-      if (response != null) {
-        Assert.fail("createAction should fail for action DECOMMISSION_DATANODE");
-      }
+      Assert.fail("createAction should fail");
     } catch (AmbariException ex) {
-      Assert.assertTrue(ex.getMessage().contains("Invalid action request"));
+      LOG.info(ex.getMessage());
+      Assert.assertTrue(ex.getMessage().contains(message));
     }
   }
 
   @SuppressWarnings("serial")
   @Test
-  public void testCreateActions() throws Exception {
-    clusters.addCluster("c1");
-    clusters.getCluster("c1").setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.addHost("h1");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.addHost("h2");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h2").persist();
-    Set<String> hostNames = new HashSet<String>(){{
-      add("h1");
-      add("h2");
-    }};
-
-    clusters.mapHostsToCluster(hostNames, "c1");
+  public void testCreateServiceCheckActions() throws Exception {
+    setupClusterWithHosts("c1", "HDP-0.1",
+        new ArrayList<String>() {{
+          add("h1");
+          add("h2");
+        }},
+        "centos5");
 
     Cluster cluster = clusters.getCluster("c1");
     cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
@@ -8166,6 +8321,16 @@ public class AmbariManagementControllerTest {
     }
 
     createRequest =
+        new ActionRequest("a1", "SYSTEM", "", "", "", "SS", "ANY", "10");
+    try {
+      ActionResourceProviderTest.createAction(controller, createRequest);
+      Assert.fail("Exception must be thrown");
+    } catch (Exception ex) {
+      LOG.info(ex.getMessage());
+      Assert.assertTrue(ex.getMessage().contains("Default timeout should be between 60 and
600"));
+    }
+
+    createRequest =
         new ActionRequest("a1", "SYSTEM", "", "HDFS", "", "SS", "ANY", "100");
     try {
       ActionResourceProviderTest.createAction(controller, createRequest);
@@ -8185,6 +8350,17 @@ public class AmbariManagementControllerTest {
       LOG.info(ex.getMessage());
       Assert.assertTrue(ex.getMessage().contains("No enum const class"));
     }
+
+    createRequest =
+        new ActionRequest("a1", "SYSTEM", "", "", "DATANODE", "SS", "SPECIFIC", "100");
+    try {
+      ActionResourceProviderTest.createAction(controller, createRequest);
+      Assert.fail("Exception must be thrown");
+    } catch (Exception ex) {
+      LOG.info(ex.getMessage());
+      Assert.assertTrue(ex.getMessage().contains("Target component cannot be specified unless
target service is " +
+          "specified"));
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/22f5fdfb/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
index 586478d..96daeea 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
@@ -187,7 +187,9 @@ public class TestOrmImpl extends Assert {
   public void testAbortHostRoleCommands() {
     injector.getInstance(OrmTestHelper.class).createStageCommands();
     HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    int result = hostRoleCommandDAO.updateStatusByRequestId(0L, HostRoleStatus.ABORTED, Arrays.asList(HostRoleStatus.QUEUED,
HostRoleStatus.IN_PROGRESS, HostRoleStatus.PENDING));
+    int result = hostRoleCommandDAO.updateStatusByRequestId(
+        0L, HostRoleStatus.ABORTED, Arrays.asList(HostRoleStatus.QUEUED,
+        HostRoleStatus.IN_PROGRESS, HostRoleStatus.PENDING));
     //result always 1 in batch mode
     List<HostRoleCommandEntity> commandEntities = hostRoleCommandDAO.findByRequest(0L);
     int count = 0;
@@ -203,7 +205,7 @@ public class TestOrmImpl extends Assert {
   public void testFindStageByHostRole() {
     injector.getInstance(OrmTestHelper.class).createStageCommands();
     HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    List<HostRoleCommandEntity> list = hostRoleCommandDAO.findByHostRole("test_host1",
0L, 0L, Role.DATANODE);
+    List<HostRoleCommandEntity> list = hostRoleCommandDAO.findByHostRole("test_host1",
0L, 0L, Role.DATANODE.toString());
     assertEquals(1, list.size());
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/22f5fdfb/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
b/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
index 516069a..fbb382f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
@@ -211,7 +211,8 @@ public class TestStageUtils {
     addHbaseService(fsm.getCluster("c1"), hostList, injector);
     addMapreduceService(fsm.getCluster("c1"), hostList, injector);
     Map<String, List<String>> info = StageUtils.getClusterHostInfo(fsm.getHostsForCluster("c1"),
-        fsm.getCluster("c1"), new HostsMap(injector.getInstance(Configuration.class)), injector);
+        fsm.getCluster("c1"), new HostsMap(injector.getInstance(Configuration.class)),
+        injector.getInstance(Configuration.class));
     assertEquals(2, info.get("slave_hosts").size());
     assertEquals(2, info.get("mapred_tt_hosts").size());
     assertEquals(2, info.get("hbase_rs_hosts").size());


Mime
View raw message