ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jonathanhur...@apache.org
Subject [1/2] ambari git commit: AMBARI-11086 - Upgrade Pack Configure Task Must Preserve Additions When Deleting (jonathanhurley)
Date Wed, 13 May 2015 14:34:51 GMT
Repository: ambari
Updated Branches:
  refs/heads/trunk cd4f302fd -> 7e4cba5cf


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
index 179c978..02c1006 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
@@ -22,6 +22,7 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.format import format
 from ambari_commons import OSConst
@@ -66,7 +67,7 @@ class ZookeeperClientLinux(ZookeeperClient):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0')
>= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      Execute(format("hdp-select set zookeeper-client {version}"))
+      hdp_select.select("zookeeper-client", params.version)
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class ZookeeperClientWindows(ZookeeperClient):

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
index 2e19100..d685d1d 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
@@ -24,6 +24,7 @@ import sys
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import get_unique_id_and_date
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations,
\
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,
\
@@ -76,7 +77,7 @@ class ZookeeperServerLinux(ZookeeperServer):
 
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0')
>= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      Execute(format("hdp-select set zookeeper-server {version}"))
+      hdp_select.select("zookeeper-server", params.version)
 
   def post_rolling_restart(self, env):
     Logger.info("Executing Rolling Upgrade post-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
index 92b6a85..d3fd187 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ClientRetryPropertyCheckTest.java
@@ -155,7 +155,7 @@ public class ClientRetryPropertyCheckTest {
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
     // pass with right property
-    properties.put("template", "foo bar baz -Doozie.connection.retry.count=5 foobarbaz");
+    properties.put("content", "foo bar baz -Doozie.connection.retry.count=5 foobarbaz");
     check = new PrerequisiteCheck(null, null);
     m_check.perform(check, new PrereqCheckRequest("cluster"));
     Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index d9638bc..99882fd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -28,6 +28,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
@@ -47,8 +49,11 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
 import org.apache.ambari.server.state.stack.upgrade.TransferOperation;
@@ -85,6 +90,12 @@ public class ConfigureActionTest {
   @Inject
   private HostRoleCommandFactory hostRoleCommandFactory;
 
+  @Inject
+  private ServiceFactory serviceFactory;
+
+  @Inject
+  ConfigHelper m_configHelper;
+
   @Before
   public void setup() throws Exception {
     m_injector = Guice.createInjector(new InMemoryDefaultTestModule());
@@ -97,84 +108,82 @@ public class ConfigureActionTest {
     m_injector.getInstance(PersistService.class).stop();
   }
 
-  private void makeUpgradeCluster() throws Exception {
-    String clusterName = "c1";
-    String hostName = "h1";
-
-    Clusters clusters = m_injector.getInstance(Clusters.class);
-    clusters.addCluster(clusterName, HDP_21_STACK);
-
-    StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
-    StackEntity stackEntity = stackDAO.find(HDP_21_STACK.getStackName(),
-        HDP_21_STACK.getStackVersion());
-
-    assertNotNull(stackEntity);
+  @Test
+  public void testConfigActionUpgradeAcrossStack() throws Exception {
+    makeUpgradeCluster();
 
-    Cluster c = clusters.getCluster(clusterName);
-    c.setDesiredStackVersion(HDP_21_STACK);
+    Cluster c = m_injector.getInstance(Clusters.class).getCluster("c1");
+    assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
+    c.setDesiredStackVersion(HDP_22_STACK);
     ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
     Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
           put("initLimit", "10");
         }}, new HashMap<String, Map<String,String>>());
-    config.setTag("version1");
+    config.setTag("version2");
     config.persist();
 
     c.addConfig(config);
     c.addDesiredConfig("user", Collections.singleton(config));
+    assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    // add a host component
-    clusters.addHost(hostName);
-
-    Host host = clusters.getHost(hostName);
 
-    Map<String, String> hostAttributes = new HashMap<String, String>();
-    hostAttributes.put("os_family", "redhat");
-    hostAttributes.put("os_release_version", "6");
-    host.setHostAttributes(hostAttributes);
-    host.persist();
+    Map<String, String> commandParams = new HashMap<String, String>();
+    commandParams.put("upgrade_direction", "upgrade");
+    commandParams.put("version", HDP_2_2_1_0);
+    commandParams.put("clusterName", "c1");
+    commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
+    commandParams.put(ConfigureTask.PARAMETER_KEY, "initLimit");
+    commandParams.put(ConfigureTask.PARAMETER_VALUE, "11");
 
-    String urlInfo = "[{'repositories':["
-        + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'HDP-2.1.1'}"
-        + "], 'OperatingSystems/os_type':'redhat6'}]";
+    ExecutionCommand executionCommand = new ExecutionCommand();
+    executionCommand.setCommandParams(commandParams);
+    executionCommand.setClusterName("c1");
 
-    m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_2_0_0);
-    repoVersionDAO.create(stackEntity, HDP_2_2_1_0, String.valueOf(System.currentTimeMillis()),
-        "pack", urlInfo);
+    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
+        null, null);
 
-    c.createClusterVersion(HDP_21_STACK, HDP_2_2_0_0, "admin", RepositoryVersionState.UPGRADING);
-    c.createClusterVersion(HDP_21_STACK, HDP_2_2_1_0, "admin", RepositoryVersionState.INSTALLING);
+    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(
+        executionCommand));
 
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_0_0, RepositoryVersionState.CURRENT);
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.INSTALLED);
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADING);
-    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADED);
-    c.setCurrentStackVersion(HDP_21_STACK);
+    ConfigureAction action = m_injector.getInstance(ConfigureAction.class);
+    action.setExecutionCommand(executionCommand);
+    action.setHostRoleCommand(hostRoleCommand);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-        RepositoryVersionState.CURRENT);
+    CommandReport report = action.execute(null);
+    assertNotNull(report);
 
-    HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
+    assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    HostVersionEntity entity = new HostVersionEntity();
-    entity.setHostEntity(hostDAO.findByName(hostName));
-    entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(HDP_21_STACK, HDP_2_2_1_0));
-    entity.setState(RepositoryVersionState.UPGRADED);
-    hostVersionDAO.create(entity);
+    config = c.getDesiredConfigByType("zoo.cfg");
+    assertNotNull(config);
+    assertEquals("version2", config.getTag());
+    assertEquals("11", config.getProperties().get("initLimit"));
   }
 
+  /**
+   * Tests that DELETE "*" with edit preserving works correctly.
+   *
+   * @throws Exception
+   */
   @Test
-  public void testConfigActionUpgradeAcrossStack() throws Exception {
+  public void testDeletePreserveChanges() throws Exception {
     makeUpgradeCluster();
 
     Cluster c = m_injector.getInstance(Clusters.class).getCluster("c1");
     assertEquals(1, c.getConfigsByType("zoo.cfg").size());
 
-    c.setDesiredStackVersion(HDP_22_STACK);
+    c.setDesiredStackVersion(HDP_21_STACK);
+
+    // create a config for zoo.cfg with two values; one is a stack value and the
+    // other is custom
     ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
-    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {{
-          put("initLimit", "10");
-        }}, new HashMap<String, Map<String,String>>());
+    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+      {
+        put("tickTime", "2000");
+        put("foo", "bar");
+      }
+    }, new HashMap<String, Map<String, String>>());
     config.setTag("version2");
     config.persist();
 
@@ -182,24 +191,30 @@ public class ConfigureActionTest {
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-
     Map<String, String> commandParams = new HashMap<String, String>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_1_0);
     commandParams.put("clusterName", "c1");
     commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
-    commandParams.put(ConfigureTask.PARAMETER_KEY, "initLimit");
-    commandParams.put(ConfigureTask.PARAMETER_VALUE, "11");
+
+    // delete all keys, preserving edits or additions
+    List<ConfigureTask.Transfer> transfers = new ArrayList<ConfigureTask.Transfer>();
+    ConfigureTask.Transfer transfer = new ConfigureTask.Transfer();
+    transfer.operation = TransferOperation.DELETE;
+    transfer.deleteKey = "*";
+    transfer.preserveEdits = true;
+    transfers.add(transfer);
+
+    commandParams.put(ConfigureTask.PARAMETER_TRANSFERS, new Gson().toJson(transfers));
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
     executionCommand.setClusterName("c1");
+    executionCommand.setRoleParams(new HashMap<String, String>());
+    executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
 
-    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null,
-        null, null);
-
-    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(
-        executionCommand));
+    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
 
     ConfigureAction action = m_injector.getInstance(ConfigureAction.class);
     action.setExecutionCommand(executionCommand);
@@ -208,12 +223,17 @@ public class ConfigureActionTest {
     CommandReport report = action.execute(null);
     assertNotNull(report);
 
-    assertEquals(2, c.getConfigsByType("zoo.cfg").size());
-
+    // make sure there are now 3 versions after the merge
+    assertEquals(3, c.getConfigsByType("zoo.cfg").size());
     config = c.getDesiredConfigByType("zoo.cfg");
     assertNotNull(config);
-    assertEquals("version2", config.getTag());
-    assertEquals("11", config.getProperties().get("initLimit"));
+    assertFalse("version2".equals(config.getTag()));
+
+    // time to check our values; there should only be 1 left since tickTime was
+    // removed
+    Map<String, String> map = config.getProperties();
+    assertEquals("bar", map.get("foo"));
+    assertFalse(map.containsKey("tickTime"));
   }
 
   @Test
@@ -338,9 +358,106 @@ public class ConfigureActionTest {
     assertEquals(4, c.getConfigsByType("zoo.cfg").size());
     config = c.getDesiredConfigByType("zoo.cfg");
     map = config.getProperties();
-    assertEquals(2, map.size());
+    assertEquals(6, map.size());
     assertTrue(map.containsKey("initLimit")); // it just changed to 11 from 10
     assertTrue(map.containsKey("copyKey")); // is new
   }
 
+  private void makeUpgradeCluster() throws Exception {
+    String clusterName = "c1";
+    String hostName = "h1";
+
+    Clusters clusters = m_injector.getInstance(Clusters.class);
+    clusters.addCluster(clusterName, HDP_21_STACK);
+
+    StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
+    StackEntity stackEntity = stackDAO.find(HDP_21_STACK.getStackName(),
+        HDP_21_STACK.getStackVersion());
+
+    assertNotNull(stackEntity);
+
+    Cluster c = clusters.getCluster(clusterName);
+    c.setDesiredStackVersion(HDP_21_STACK);
+
+    // !!! very important, otherwise the loops that walk the list of installed
+    // service properties will not run!
+    installService(c, "ZOOKEEPER");
+
+    ConfigFactory cf = m_injector.getInstance(ConfigFactory.class);
+    Config config = cf.createNew(c, "zoo.cfg", new HashMap<String, String>() {
+      {
+        put("initLimit", "10");
+      }
+    }, new HashMap<String, Map<String, String>>());
+    config.setTag("version1");
+    config.persist();
+
+    c.addConfig(config);
+    c.addDesiredConfig("user", Collections.singleton(config));
+
+    // add a host component
+    clusters.addHost(hostName);
+
+    Host host = clusters.getHost(hostName);
+
+    Map<String, String> hostAttributes = new HashMap<String, String>();
+    hostAttributes.put("os_family", "redhat");
+    hostAttributes.put("os_release_version", "6");
+    host.setHostAttributes(hostAttributes);
+    host.persist();
+
+    String urlInfo = "[{'repositories':["
+        + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'HDP-2.1.1'}"
+        + "], 'OperatingSystems/os_type':'redhat6'}]";
+
+    m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_2_0_0);
+    repoVersionDAO.create(stackEntity, HDP_2_2_1_0, String.valueOf(System.currentTimeMillis()),
+        "pack", urlInfo);
+
+    c.createClusterVersion(HDP_21_STACK, HDP_2_2_0_0, "admin", RepositoryVersionState.UPGRADING);
+    c.createClusterVersion(HDP_21_STACK, HDP_2_2_1_0, "admin", RepositoryVersionState.INSTALLING);
+
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_0_0, RepositoryVersionState.CURRENT);
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.INSTALLED);
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADING);
+    c.transitionClusterVersion(HDP_21_STACK, HDP_2_2_1_0, RepositoryVersionState.UPGRADED);
+    c.setCurrentStackVersion(HDP_21_STACK);
+
+    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
+        RepositoryVersionState.CURRENT);
+
+    HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
+
+    HostVersionEntity entity = new HostVersionEntity();
+    entity.setHostEntity(hostDAO.findByName(hostName));
+    entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(HDP_21_STACK, HDP_2_2_1_0));
+    entity.setState(RepositoryVersionState.UPGRADED);
+    hostVersionDAO.create(entity);
+
+    // verify that our configs are there
+    String tickTime = m_configHelper.getPropertyValueFromStackDefinitions(c, "zoo.cfg", "tickTime");
+    assertNotNull(tickTime);
+  }
+
+  /**
+   * Installs a service in the cluster.
+   *
+   * @param cluster
+   * @param serviceName
+   * @return
+   * @throws AmbariException
+   */
+  private Service installService(Cluster cluster, String serviceName) throws AmbariException
{
+    Service service = null;
+
+    try {
+      service = cluster.getService(serviceName);
+    } catch (ServiceNotFoundException e) {
+      service = serviceFactory.createNew(cluster, serviceName);
+      cluster.addService(service);
+      service.persist();
+    }
+
+    return service;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/test/python/TestUtils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestUtils.py b/ambari-server/src/test/python/TestUtils.py
index 8370986..bbd40bf 100644
--- a/ambari-server/src/test/python/TestUtils.py
+++ b/ambari-server/src/test/python/TestUtils.py
@@ -191,3 +191,38 @@ class TestUtils(TestCase):
     isfile_mock.return_value = True
 
     self.assertEquals(utils.check_exitcode("/tmp/nofile"), 777)
+
+
+  def test_format_with_reload(self):
+    from resource_management.libraries.functions import format
+    from resource_management.libraries.functions.format import ConfigurationFormatter
+    from resource_management.core.environment import Environment
+
+    env = Environment()
+    env._instances.append(env)
+
+
+    # declare some environment variables
+    env_params = {}
+    env_params["envfoo"] = "env-foo1"
+    env_params["envbar"] = "env-bar1"
+    env.config.params = env_params
+
+    # declare some local variables
+    foo = "foo1"
+    bar = "bar1"
+
+    # make sure local variables and env variables work
+    message = "{foo} {bar} {envfoo} {envbar}"
+    formatted_message = format(message)
+    self.assertEquals("foo1 bar1 env-foo1 env-bar1", formatted_message)
+
+    # try the same thing with an instance; we pass in keyword args to be
+    # combined with the env params
+    formatter = ConfigurationFormatter()
+    formatted_message = formatter.format(message, foo="foo2", bar="bar2")
+    self.assertEquals("foo2 bar2 env-foo1 env-bar1", formatted_message)
+
+    # now supply keyword args to override env params
+    formatted_message = formatter.format(message, envfoo="foobar", envbar="foobarbaz", foo="foo3",
bar="bar3")
+    self.assertEquals("foo3 bar3 foobar foobarbaz", formatted_message)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 8978b18..35b783a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1263,6 +1263,8 @@ class TestNamenode(RMFTestCase):
 
   @patch("resource_management.core.shell.call")
   def test_pre_rolling_restart_23_params(self, call_mock):
+    import itertools
+
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/nn_ru_lzo.json"
     with open(config_file, "r") as f:
       json_content = json.load(f)
@@ -1279,7 +1281,7 @@ class TestNamenode(RMFTestCase):
                        config_dict = json_content,
                        hdp_stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
-                       call_mocks = [(0, None), (0, None), (0, None), (0, None), (0, None),
(0, None), (0, None)],
+                       call_mocks = itertools.cycle([(0, None)]),
                        mocks_dict = mocks_dict)
     import sys
     self.assertEquals("/usr/hdp/2.3.0.0-1234/hadoop/conf", sys.modules["params"].hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index fdcafdb..bb1a037 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -995,7 +995,7 @@ class TestOozieServer(RMFTestCase):
       isfile_mock, exists_mock, isdir_mock, tarfile_open_mock):
 
     isdir_mock.return_value = True
-    exists_mock.side_effect = [False,False,True]
+    exists_mock.return_value = False
     isfile_mock.return_value = True
 
     prepare_war_stdout = """INFO: Adding extension: libext/mysql-connector-java.jar

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e4cba5c/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/ZOOKEEPER/configuration/zoo.cfg.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/ZOOKEEPER/configuration/zoo.cfg.xml
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/ZOOKEEPER/configuration/zoo.cfg.xml
new file mode 100644
index 0000000..12e2a00
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/ZOOKEEPER/configuration/zoo.cfg.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>tickTime</name>
+    <value>2000</value>
+    <description>The length of a single tick in milliseconds, which is the basic time
unit used by ZooKeeper</description>
+  </property>
+  <property>
+    <name>initLimit</name>
+    <value>10</value>
+    <description>Ticks to allow for sync at Init.</description>
+  </property>
+  <property>
+    <name>syncLimit</name>
+    <value>5</value>
+    <description>Ticks to allow for sync at Runtime.</description>
+  </property>
+  <property>
+    <name>clientPort</name>
+    <value>2181</value>
+    <description>Port for running ZK Server.</description>
+  </property>
+  <property>
+    <name>dataDir</name>
+    <value>/hadoop/zookeeper</value>
+    <description>Data directory for ZooKeeper.</description>
+  </property>
+  <property>
+    <name>autopurge.snapRetainCount</name>
+    <value>30</value>
+    <description>ZooKeeper purge feature retains the autopurge.snapRetainCount
+      most recent snapshots and the corresponding transaction
+      logs in the dataDir and dataLogDir respectively and deletes the rest. </description>
+  </property>
+  <property>
+    <name>autopurge.purgeInterval</name>
+    <value>24</value>
+    <description>The time interval in hours for which the purge task has to be triggered.
+      Set to a positive integer (1 and above) to enable the auto purging.</description>
+  </property>
+</configuration>
\ No newline at end of file


Mime
View raw message