aurora-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ma...@apache.org
Subject [1/5] incubator-aurora git commit: Removing client v1 code.
Date Thu, 08 Jan 2015 01:07:13 GMT
Repository: incubator-aurora
Updated Branches:
  refs/heads/master fb4d3f9ac -> 9a817f24b


http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/test_maintenance.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/test_maintenance.py b/src/test/python/apache/aurora/client/commands/test_maintenance.py
deleted file mode 100644
index 20eecc0..0000000
--- a/src/test/python/apache/aurora/client/commands/test_maintenance.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import contextlib
-
-from mock import Mock, patch
-from twitter.common.contextutil import temporary_file
-
-from apache.aurora.admin.host_maintenance import HostMaintenance
-from apache.aurora.client.commands.maintenance import (
-    host_activate,
-    host_deactivate,
-    host_drain,
-    host_status
-)
-
-from .util import AuroraClientCommandTest
-
-from gen.apache.aurora.api.ttypes import (
-    DrainHostsResult,
-    EndMaintenanceResult,
-    Hosts,
-    HostStatus,
-    MaintenanceMode,
-    MaintenanceStatusResult,
-    StartMaintenanceResult
-)
-
-
-class TestMaintenanceCommands(AuroraClientCommandTest):
-  HOSTNAMES = ['us-grf-20', 'us-jim-47', 'us-suz-01']
-
-  def make_mock_options(self):
-    mock_options = Mock()
-    mock_options.filename = None
-    mock_options.hosts = ','.join(self.HOSTNAMES)
-    mock_options.cluster = self.TEST_CLUSTER
-    mock_options.verbosity = False
-    mock_options.disable_all_hooks = False
-    mock_options.percentage = None
-    mock_options.duration = None
-    mock_options.reason = None
-    return mock_options
-
-  def create_host_statuses(self, maintenance_mode, skip_hosts=None):
-    return [HostStatus(host=hostname, mode=maintenance_mode) for hostname in self.HOSTNAMES
-            if not skip_hosts or hostname not in skip_hosts]
-
-  def create_start_maintenance_result(self, skip_hosts=None):
-    host_statuses = self.create_host_statuses(MaintenanceMode.SCHEDULED, skip_hosts)
-    response = self.create_simple_success_response()
-    response.result.startMaintenanceResult = StartMaintenanceResult(statuses=set(host_statuses))
-    return response
-
-  def create_end_maintenance_result(self):
-    host_statuses = self.create_host_statuses(MaintenanceMode.NONE)
-    response = self.create_simple_success_response()
-    response.result.endMaintenanceResult = EndMaintenanceResult(statuses=set(host_statuses))
-    return response
-
-  def create_drain_hosts_result(self):
-    host_statuses = self.create_host_statuses(MaintenanceMode.DRAINING)
-    response = self.create_simple_success_response()
-    response.result.drainHostsResult = DrainHostsResult(statuses=set(host_statuses))
-    return response
-
-  def create_maintenance_status_result(self):
-    host_statuses = self.create_host_statuses(MaintenanceMode.NONE)
-    response = self.create_simple_success_response()
-    response.result.maintenanceStatusResult = MaintenanceStatusResult(statuses=set(host_statuses))
-    return response
-
-  def create_drained_status_result(self, hosts):
-    host_statuses = [
-        HostStatus(host=hostname, mode=MaintenanceMode.DRAINED) for hostname in hosts.hostNames]
-    response = self.create_simple_success_response()
-    response.result.maintenanceStatusResult = MaintenanceStatusResult(statuses=set(host_statuses))
-    return response
-
-  def test_start_maintenance_hosts(self):
-    mock_options = self.make_mock_options()
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_scheduler_proxy.startMaintenance.return_value = self.create_start_maintenance_result()
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-      host_deactivate([self.TEST_CLUSTER])
-
-      mock_scheduler_proxy.startMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-
-  def test_end_maintenance_hosts(self):
-    mock_options = self.make_mock_options()
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_scheduler_proxy.endMaintenance.return_value = self.create_end_maintenance_result()
-    mock_scheduler_proxy.maintenanceStatus.return_value = self.create_maintenance_status_result()
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-      host_activate([self.TEST_CLUSTER])
-
-      mock_scheduler_proxy.endMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-      mock_scheduler_proxy.maintenanceStatus.assert_called_with(Hosts(set(self.HOSTNAMES)))
-
-  def test_perform_maintenance_hosts(self):
-    mock_options = self.make_mock_options()
-    mock_options.post_drain_script = 'callback'
-    mock_options.grouping = 'by_host'
-
-    def host_status_results(hostnames):
-      if isinstance(hostnames, Hosts):
-        return self.create_drained_status_result(hostnames)
-      return self.create_maintenance_status_result()
-
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_callback = Mock()
-    mock_scheduler_proxy.maintenanceStatus.side_effect = host_status_results
-    mock_scheduler_proxy.startMaintenance.return_value = self.create_start_maintenance_result()
-    mock_scheduler_proxy.drainHosts.return_value = self.create_start_maintenance_result()
-    mock_vector = self.create_mock_probe_hosts_vector([
-        self.create_probe_hosts(self.HOSTNAMES[0], 95, True, None),
-        self.create_probe_hosts(self.HOSTNAMES[1], 95, True, None),
-        self.create_probe_hosts(self.HOSTNAMES[2], 95, True, None)
-    ])
-
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.api.sla.Sla.get_domain_uptime_vector',
-              return_value=mock_vector),
-        patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('apache.aurora.client.commands.maintenance.parse_script', return_value=mock_callback),
-        patch('threading._Event.wait'),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-      host_drain([self.TEST_CLUSTER])
-
-      mock_scheduler_proxy.startMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-      assert mock_scheduler_proxy.maintenanceStatus.call_count == 3
-      assert mock_scheduler_proxy.drainHosts.call_count == 3
-      assert mock_callback.call_count == 3
-
-  def test_perform_maintenance_hosts_unknown_hosts_skipped(self):
-    mock_options = self.make_mock_options()
-    mock_options.post_drain_script = None
-    mock_options.grouping = 'by_host'
-
-    def host_status_results(hostnames):
-      if isinstance(hostnames, Hosts):
-        return self.create_drained_status_result(hostnames)
-      return self.create_maintenance_status_result()
-
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_scheduler_proxy.maintenanceStatus.side_effect = host_status_results
-    mock_scheduler_proxy.startMaintenance.return_value = self.create_start_maintenance_result(
-        skip_hosts=['us-grf-20'])
-    mock_scheduler_proxy.drainHosts.return_value = self.create_start_maintenance_result()
-    mock_vector = self.create_mock_probe_hosts_vector([
-      self.create_probe_hosts(self.HOSTNAMES[0], 95, True, None),
-      self.create_probe_hosts(self.HOSTNAMES[1], 95, True, None),
-      self.create_probe_hosts(self.HOSTNAMES[2], 95, True, None)
-    ])
-
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.api.sla.Sla.get_domain_uptime_vector',
-              return_value=mock_vector),
-        patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('threading._Event.wait'),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-      host_drain([self.TEST_CLUSTER])
-
-      mock_scheduler_proxy.startMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-      assert mock_scheduler_proxy.maintenanceStatus.call_count == 2
-      assert mock_scheduler_proxy.drainHosts.call_count == 2
-
-  def test_perform_maintenance_hosts_failed_default_sla(self):
-    with temporary_file() as fp:
-      mock_options = self.make_mock_options()
-      mock_options.post_drain_script = None
-      mock_options.grouping = 'by_host'
-      mock_options.unsafe_hosts_filename = fp.name
-
-      mock_api, mock_scheduler_proxy = self.create_mock_api()
-      mock_scheduler_proxy.startMaintenance.return_value = self.create_start_maintenance_result()
-      mock_scheduler_proxy.drainHosts.return_value = self.create_start_maintenance_result()
-      mock_vector = self.create_mock_probe_hosts_vector([
-          self.create_probe_hosts(self.HOSTNAMES[0], 95, False, None),
-          self.create_probe_hosts(self.HOSTNAMES[1], 95, False, None),
-          self.create_probe_hosts(self.HOSTNAMES[2], 95, False, None)
-      ])
-
-      with contextlib.nested(
-          patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-          patch('apache.aurora.client.api.sla.Sla.get_domain_uptime_vector',
-                return_value=mock_vector),
-          patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-          patch('threading._Event.wait'),
-          patch('twitter.common.app.get_options', return_value=mock_options)):
-        host_drain([self.TEST_CLUSTER])
-
-        mock_scheduler_proxy.startMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-
-  def test_perform_maintenance_hosts_failed_custom_sla(self):
-    with temporary_file() as fp:
-      mock_options = self.make_mock_options()
-      mock_options.post_drain_script = None
-      mock_options.grouping = 'by_host'
-      mock_options.percentage = 50
-      mock_options.duration = '10m'
-      mock_options.reason = 'Test overrides'
-      mock_options.unsafe_hosts_filename = fp.name
-
-      mock_api, mock_scheduler_proxy = self.create_mock_api()
-      mock_scheduler_proxy.startMaintenance.return_value = self.create_start_maintenance_result()
-      mock_scheduler_proxy.drainHosts.return_value = self.create_start_maintenance_result()
-      mock_vector = self.create_mock_probe_hosts_vector([
-          self.create_probe_hosts(self.HOSTNAMES[0], 95, False, None),
-          self.create_probe_hosts(self.HOSTNAMES[1], 95, False, None),
-          self.create_probe_hosts(self.HOSTNAMES[2], 95, False, None)
-      ])
-      mock_wait = Mock()
-
-      with contextlib.nested(
-          patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-          patch('apache.aurora.client.api.sla.Sla.get_domain_uptime_vector',
-                return_value=mock_vector),
-          patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-          patch('apache.aurora.admin.admin_util.log_admin_message'),
-          patch('threading._Event.wait', return_value=mock_wait),
-          patch('twitter.common.app.get_options', return_value=mock_options)
-      ) as (_, _, _, log, _, _):
-
-        host_drain([self.TEST_CLUSTER])
-
-        assert 'Test overrides' in log.call_args[0][1]
-        mock_scheduler_proxy.startMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-        mock_wait.called_once_with(HostMaintenance.MAX_STATUS_WAIT)
-
-  def test_perform_maintenance_hosts_no_prod_tasks(self):
-    mock_options = self.make_mock_options()
-    mock_options.post_drain_script = None
-    mock_options.grouping = 'by_host'
-
-    def host_status_results(hostnames):
-      if isinstance(hostnames, Hosts):
-        return self.create_drained_status_result(hostnames)
-      return self.create_maintenance_status_result()
-
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_scheduler_proxy.maintenanceStatus.side_effect = host_status_results
-    mock_scheduler_proxy.startMaintenance.return_value = self.create_start_maintenance_result()
-    mock_scheduler_proxy.drainHosts.return_value = self.create_start_maintenance_result()
-
-    def create_empty_sla_results():
-      mock_vector = Mock()
-      mock_vector.probe_hosts.return_value = []
-      return mock_vector
-
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.api.sla.Sla.get_domain_uptime_vector',
-              return_value=create_empty_sla_results()),
-        patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('threading._Event.wait'),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-
-      host_drain([self.TEST_CLUSTER])
-
-      mock_scheduler_proxy.startMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-      assert mock_scheduler_proxy.maintenanceStatus.call_count == 3
-      assert mock_scheduler_proxy.drainHosts.call_count == 3
-
-  def test_perform_maintenance_hosts_multiple_sla_groups_failure(self):
-    mock_options = self.make_mock_options()
-    mock_options.post_drain_script = None
-    mock_options.grouping = 'by_host'
-    mock_options.unsafe_hosts_filename = None
-
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_scheduler_proxy.startMaintenance.return_value = self.create_start_maintenance_result()
-
-    def create_multiple_sla_results():
-      mock_vector = Mock()
-      mock_vector.probe_hosts.return_value = self.HOSTNAMES
-      return mock_vector
-
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.api.sla.Sla.get_domain_uptime_vector',
-              return_value=create_multiple_sla_results()),
-        patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-
-      host_drain([self.TEST_CLUSTER])
-
-      mock_scheduler_proxy.startMaintenance.assert_called_with(Hosts(set(self.HOSTNAMES)))
-
-  def test_perform_maintenance_hosts_reason_missing(self):
-    mock_options = self.make_mock_options()
-    mock_options.grouping = 'by_host'
-    mock_options.percentage = 50
-    mock_options.duration = '10m'
-
-    with contextlib.nested(
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-      self.assertRaises(SystemExit, host_drain, [self.TEST_CLUSTER])
-
-  def test_host_maintenance_status(self):
-    mock_options = self.make_mock_options()
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_scheduler_proxy.maintenanceStatus.return_value = self.create_maintenance_status_result()
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.commands.maintenance.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-      host_status([self.TEST_CLUSTER])
-
-      mock_scheduler_proxy.maintenanceStatus.assert_called_with(Hosts(set(self.HOSTNAMES)))

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/test_restart.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/test_restart.py b/src/test/python/apache/aurora/client/commands/test_restart.py
deleted file mode 100644
index c71f818..0000000
--- a/src/test/python/apache/aurora/client/commands/test_restart.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import contextlib
-import functools
-from collections import namedtuple
-
-from mock import call, create_autospec, patch
-from twitter.common.contextutil import temporary_file
-
-from apache.aurora.client.api.health_check import Retriable, StatusHealthCheck
-from apache.aurora.client.commands.core import restart
-
-from .util import AuroraClientCommandTest
-
-from gen.apache.aurora.api.constants import ACTIVE_STATES
-from gen.apache.aurora.api.ttypes import (
-    AssignedTask,
-    JobKey,
-    PopulateJobResult,
-    Result,
-    ScheduledTask,
-    ScheduleStatusResult,
-    TaskConfig,
-    TaskQuery
-)
-
-
-class FakeOptions(namedtuple('FakeOptions', ['max_total_failures',
-     'disable_all_hooks_reason',
-     'batch_size',
-     'restart_threshold',
-     'watch_secs',
-     'max_per_shard_failures',
-     'shards',
-     'health_check_interval_seconds',
-     'open_browser'])):
-
-  def __new__(cls,
-      max_total_failures=None,
-      disable_all_hooks_reason=None,
-      batch_size=None,
-      restart_threshold=None,
-      watch_secs=None,
-      max_per_shard_failures=None,
-      shards=None,
-      health_check_interval_seconds=None,
-      open_browser=None):
-    return super(FakeOptions, cls).__new__(
-        cls,
-        max_total_failures,
-        disable_all_hooks_reason,
-        batch_size,
-        restart_threshold,
-        watch_secs,
-        max_per_shard_failures,
-        shards,
-        health_check_interval_seconds,
-        open_browser)
-
-
-class TestRestartCommand(AuroraClientCommandTest):
-
-  @classmethod
-  def setup_mock_scheduler_for_simple_restart(cls, api):
-    """Set up all of the API mocks for scheduler calls during a simple restart"""
-    sched_proxy = api.scheduler_proxy
-    cls.setup_get_tasks_status_calls(sched_proxy)
-    cls.setup_populate_job_config(sched_proxy)
-    sched_proxy.restartShards.return_value = cls.create_simple_success_response()
-
-  @classmethod
-  def setup_populate_job_config(cls, api):
-    populate = cls.create_simple_success_response()
-    populate.result.populateJobResult = create_autospec(
-        spec=PopulateJobResult,
-        spec_set=False,
-        instance=True,
-        watch_secs=None)
-    api.populateJobConfig.return_value = populate
-    configs = []
-    for i in range(20):
-      task_config = create_autospec(spec=TaskConfig, instance=True)
-      configs.append(task_config)
-    populate.result.populateJobResult.populatedDEPRECATED = set(configs)
-    populate.result.populateJobResult.taskConfig = configs[0]
-    return populate
-
-  @classmethod
-  def setup_get_tasks_status_calls(cls, scheduler):
-
-    tasks = []
-    for i in range(20):
-      tasks.append(ScheduledTask(
-        assignedTask=AssignedTask(
-          slaveHost='slave%s' % i,
-          instanceId=i,
-          taskId='task%s' % i,
-          slaveId='slave%s' % i,
-          task=TaskConfig())))
-    status_response = cls.create_simple_success_response()
-    status_response.result = Result(scheduleStatusResult=ScheduleStatusResult(tasks=tasks))
-    scheduler.getTasksWithoutConfigs.return_value = status_response
-
-  @classmethod
-  def setup_health_checks(cls, mock_api):
-    mock_health_check = create_autospec(spec=StatusHealthCheck, instance=True)
-    mock_health_check.health.return_value = Retriable.alive()
-    return mock_health_check
-
-  def test_restart_simple(self):
-    options = FakeOptions(
-        max_total_failures=1,
-        batch_size=5,
-        restart_threshold=10,
-        watch_secs=10)
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_health_check = self.setup_health_checks(mock_api)
-    self.setup_mock_scheduler_for_simple_restart(mock_api)
-    with contextlib.nested(
-        patch('twitter.common.app.get_options', return_value=options),
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck',
-            return_value=mock_health_check),
-        patch('time.time', side_effect=functools.partial(self.fake_time, self)),
-        patch('threading._Event.wait')):
-
-      with temporary_file() as fp:
-        fp.write(self.get_valid_config())
-        fp.flush()
-        restart(['west/mchucarroll/test/hello'], options)
-
-        # Like the update test, the exact number of calls here doesn't matter.
-        # what matters is that it must have been called once before batching, plus
-        # at least once per batch, and there are 4 batches.
-        assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count >= 4
-        # called once per batch
-        assert mock_scheduler_proxy.restartShards.call_count == 4
-        # parameters for all calls are generated by the same code, so we just check one
-        mock_scheduler_proxy.restartShards.assert_called_with(JobKey(environment=self.TEST_ENV,
-            role=self.TEST_ROLE, name=self.TEST_JOB), [15, 16, 17, 18, 19], None)
-
-  def test_restart_simple_invalid_max_failures(self):
-    options = FakeOptions(
-        max_total_failures=None,
-        batch_size=5,
-        restart_threshold=10,
-        watch_secs=10)
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_health_check = self.setup_health_checks(mock_api)
-    self.setup_mock_scheduler_for_simple_restart(mock_api)
-    with contextlib.nested(
-        patch('twitter.common.app.get_options', return_value=options),
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck',
-            return_value=mock_health_check),
-        patch('time.time', side_effect=functools.partial(self.fake_time, self)),
-        patch('threading._Event.wait')):
-
-      with temporary_file() as fp:
-        fp.write(self.get_valid_config())
-        fp.flush()
-        self.assertRaises(SystemExit, restart, ['west/mchucarroll/test/hello'], options)
-        assert mock_scheduler_proxy.mock_calls == []
-
-  def test_restart_failed_status(self):
-    options = FakeOptions()
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_health_check = self.setup_health_checks(mock_api)
-    self.setup_mock_scheduler_for_simple_restart(mock_api)
-    mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_error_response()
-    with contextlib.nested(
-        patch('twitter.common.app.get_options', return_value=options),
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck',
-            return_value=mock_health_check),
-        patch('time.time', side_effect=functools.partial(self.fake_time, self)),
-        patch('threading._Event.wait')
-    ) as (options, scheduler_proxy_class, test_clusters, mock_health_check_factory,
-        time_patch, sleep_patch):
-
-      with temporary_file() as fp:
-        fp.write(self.get_valid_config())
-        fp.flush()
-        self.assertRaises(SystemExit, restart, ['west/mchucarroll/test/hello'], options)
-        # TODO(wfarner): Spread this pattern further, as it flags unexpected method calls.
-        assert mock_scheduler_proxy.mock_calls == [
-          call.getTasksWithoutConfigs(
-              TaskQuery(jobKeys=[JobKey('mchucarroll', 'test', 'hello')], statuses=ACTIVE_STATES))
-        ]
-
-  def test_restart_failed_restart(self):
-    options = FakeOptions(
-        max_total_failures=1,
-        batch_size=5,
-        restart_threshold=10,
-        watch_secs=10)
-    mock_api, mock_scheduler_proxy = self.create_mock_api()
-    mock_health_check = self.setup_health_checks(mock_api)
-    self.setup_mock_scheduler_for_simple_restart(mock_api)
-    mock_scheduler_proxy.restartShards.return_value = self.create_error_response()
-    with contextlib.nested(
-        patch('twitter.common.app.get_options', return_value=options),
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck',
-            return_value=mock_health_check),
-        patch('time.time', side_effect=functools.partial(self.fake_time, self)),
-        patch('threading._Event.wait')):
-
-      with temporary_file() as fp:
-        fp.write(self.get_valid_config())
-        fp.flush()
-        self.assertRaises(SystemExit, restart, ['west/mchucarroll/test/hello'], options)
-        assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count == 1
-        assert mock_scheduler_proxy.restartShards.call_count == 1
-        mock_scheduler_proxy.restartShards.assert_called_with(
-            JobKey(environment=self.TEST_ENV, role=self.TEST_ROLE, name=self.TEST_JOB),
-            [0, 1, 2, 3, 4],
-            None)

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/test_run.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/test_run.py b/src/test/python/apache/aurora/client/commands/test_run.py
deleted file mode 100644
index 528c3e9..0000000
--- a/src/test/python/apache/aurora/client/commands/test_run.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import contextlib
-
-from mock import create_autospec, Mock, patch
-
-from apache.aurora.client.commands.run import run
-
-from .util import AuroraClientCommandTest
-
-from gen.apache.aurora.api.constants import LIVE_STATES
-from gen.apache.aurora.api.ttypes import (
-    AssignedTask,
-    Identity,
-    JobKey,
-    ResponseCode,
-    ScheduledTask,
-    ScheduleStatus,
-    ScheduleStatusResult,
-    TaskConfig,
-    TaskEvent,
-    TaskQuery
-)
-
-
-class TestRunCommand(AuroraClientCommandTest):
-
-  @classmethod
-  def setup_mock_options(cls):
-    """set up to get a mock options object."""
-    mock_options = Mock()
-    mock_options.num_threads = 4
-    mock_options.tunnels = []
-    mock_options.executor_sandbox = False
-    mock_options.ssh_user = None
-    mock_options.disable_all_hooks = False
-    return mock_options
-
-  @classmethod
-  def create_mock_scheduled_tasks(cls):
-    tasks = []
-    for name in ['foo', 'bar', 'baz']:
-      task = ScheduledTask(
-        failureCount=0,
-        status=ScheduleStatus.RUNNING,
-        taskEvents=[
-            TaskEvent(timestamp=123, status=ScheduleStatus.RUNNING, message='Fake message')],
-        assignedTask=AssignedTask(
-            assignedPorts={},
-            slaveHost='slavehost',
-            instanceId=0,
-            taskId='taskid',
-            task=TaskConfig(
-                maxTaskFailures=1,
-                executorConfig='fake data',
-                metadata=[],
-                owner=Identity(role='fakerole'),
-                environment='test',
-                jobName=name,
-                numCpus=2,
-                ramMb=2,
-                diskMb=2
-            )
-        )
-      )
-      tasks.append(task)
-    return tasks
-
-  @classmethod
-  def create_status_response(cls):
-    resp = cls.create_simple_success_response()
-    resp.result.scheduleStatusResult = create_autospec(
-        spec=ScheduleStatusResult,
-        spec_set=False,
-        instance=True)
-    resp.result.scheduleStatusResult.tasks = cls.create_mock_scheduled_tasks()
-    return resp
-
-  @classmethod
-  def create_failed_status_response(cls):
-    return cls.create_blank_response(ResponseCode.INVALID_REQUEST, 'No tasks found for query')
-
-  @classmethod
-  def create_mock_process(cls):
-    process = Mock()
-    process.communicate.return_value = ["hello", "world"]
-    return process
-
-  def test_successful_run(self):
-    """Test the run command."""
-    # Calls api.check_status, which calls scheduler_proxy.getJobs
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response()
-    sandbox_args = {'slave_root': '/slaveroot', 'slave_run_directory': 'slaverun'}
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('apache.aurora.client.commands.run.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options),
-        patch('apache.aurora.client.api.command_runner.DistributedCommandRunner.sandbox_args',
-            return_value=sandbox_args),
-        patch('subprocess.Popen', return_value=self.create_mock_process())) as (
-            mock_scheduler_proxy_class,
-            mock_clusters,
-            mock_clusters_runpatch,
-            options,
-            mock_runner_args_patch,
-            mock_subprocess):
-      run(['west/mchucarroll/test/hello', 'ls'], mock_options)
-
-      # The status command sends a getTasksStatus query to the scheduler,
-      # and then prints the result.
-      mock_scheduler_proxy.getTasksStatus.assert_called_with(
-          TaskQuery(jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')],
-                    statuses=LIVE_STATES))
-
-      # The mock status call returns 3 three ScheduledTasks, so three commands should have been run
-      assert mock_subprocess.call_count == 3
-      mock_subprocess.assert_called_with(['ssh', '-n', '-q', 'mchucarroll@slavehost',
-          'cd /slaveroot/slaves/*/frameworks/*/executors/thermos-taskid/runs/'
-          'slaverun/sandbox;ls'],
-          stderr=-2, stdout=-1)

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/test_ssh.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/test_ssh.py b/src/test/python/apache/aurora/client/commands/test_ssh.py
deleted file mode 100644
index 5af9133..0000000
--- a/src/test/python/apache/aurora/client/commands/test_ssh.py
+++ /dev/null
@@ -1,179 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import contextlib
-
-from mock import create_autospec, Mock, patch
-
-from apache.aurora.client.commands.ssh import ssh
-
-from .util import AuroraClientCommandTest
-
-from gen.apache.aurora.api.constants import LIVE_STATES
-from gen.apache.aurora.api.ttypes import (
-    AssignedTask,
-    Identity,
-    JobKey,
-    ResponseCode,
-    ScheduleStatus,
-    ScheduleStatusResult,
-    TaskConfig,
-    TaskEvent,
-    TaskQuery
-)
-
-
-class TestSshCommand(AuroraClientCommandTest):
-
-  @classmethod
-  def setup_mock_options(cls):
-    """set up to get a mock options object."""
-    mock_options = Mock()
-    mock_options.tunnels = []
-    mock_options.executor_sandbox = False
-    mock_options.ssh_user = None
-    mock_options.disable_all_hooks = False
-    return mock_options
-
-  @classmethod
-  def create_mock_scheduled_tasks(cls):
-    jobs = []
-    for name in ['foo', 'bar', 'baz']:
-      job_key = JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV, name=name)
-      job = Mock()
-      job.key = job_key
-      job.failure_count = 0
-      job.assignedTask = create_autospec(spec=AssignedTask, instance=True)
-      job.assignedTask.taskId = 1287391823
-      job.assignedTask.slaveHost = 'slavehost'
-      job.assignedTask.task = create_autospec(spec=TaskConfig, instance=True)
-      job.assignedTask.task.executorConfig = Mock()
-      job.assignedTask.task.maxTaskFailures = 1
-      job.assignedTask.task.metadata = []
-      job.assignedTask.task.job = job_key
-      job.assignedTask.task.owner = Identity(role=cls.TEST_ROLE)
-      job.assignedTask.task.environment = cls.TEST_ENV
-      job.assignedTask.task.jobName = name
-      job.assignedTask.task.numCpus = 2
-      job.assignedTask.task.ramMb = 2
-      job.assignedTask.task.diskMb = 2
-      job.assignedTask.instanceId = 4237894
-      job.assignedTask.assignedPorts = {}
-      job.status = ScheduleStatus.RUNNING
-      mockEvent = create_autospec(spec=TaskEvent, instance=True)
-      mockEvent.timestamp = 28234726395
-      mockEvent.status = ScheduleStatus.RUNNING
-      mockEvent.message = "Hi there"
-      job.taskEvents = [mockEvent]
-      jobs.append(job)
-    return jobs
-
-  @classmethod
-  def create_status_response(cls):
-    resp = cls.create_simple_success_response()
-    resp.result.scheduleStatusResult = ScheduleStatusResult()
-    resp.result.scheduleStatusResult.tasks = cls.create_mock_scheduled_tasks()
-    return resp
-
-  @classmethod
-  def create_nojob_status_response(cls):
-    resp = cls.create_simple_success_response()
-    resp.result.scheduleStatusResult = ScheduleStatusResult()
-    resp.result.scheduleStatusResult.tasks = []
-    return resp
-
-  @classmethod
-  def create_failed_status_response(cls):
-    return cls.create_blank_response(ResponseCode.INVALID_REQUEST, 'No tasks found for query')
-
-  def test_successful_ssh(self):
-    """Test the ssh command."""
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response()
-    sandbox_args = {'slave_root': '/slaveroot', 'slave_run_directory': 'slaverun'}
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options),
-        patch('apache.aurora.client.api.command_runner.DistributedCommandRunner.sandbox_args',
-            return_value=sandbox_args),
-        patch('subprocess.call', return_value=0)) as (
-            mock_scheduler_proxy_class,
-            mock_clusters,
-            options,
-            mock_runner_args_patch,
-            mock_subprocess):
-      ssh(['west/mchucarroll/test/hello', '1', 'ls'], mock_options)
-
-      # The status command sends a getTasksStatus query to the scheduler,
-      # and then prints the result.
-      mock_scheduler_proxy.getTasksStatus.assert_called_with(
-          TaskQuery(jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')],
-                    instanceIds=set([1]),
-                    statuses=LIVE_STATES))
-      mock_subprocess.assert_called_with(['ssh', '-t', 'mchucarroll@slavehost',
-          'cd /slaveroot/slaves/*/frameworks/*/executors/thermos-1287391823/runs/'
-          'slaverun/sandbox;ls'])
-
-  def test_ssh_deprecation_message(self):
-    """Test the ssh command."""
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response()
-    sandbox_args = {'slave_root': '/slaveroot', 'slave_run_directory': 'slaverun'}
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options),
-        patch('apache.aurora.client.api.command_runner.DistributedCommandRunner.sandbox_args',
-            return_value=sandbox_args),
-        patch('subprocess.call', return_value=0),
-        patch('apache.aurora.client.commands.ssh.v1_deprecation_warning')) as (
-            mock_scheduler_proxy_class,
-            mock_clusters,
-            options,
-            mock_runner_args_patch,
-            mock_subprocess,
-            mock_dep_warn):
-      mock_options.tunnels = ['100:hundred', '1000:thousand']
-      try:
-        ssh(['west/mchucarroll/test/hello', '1', 'ls'], mock_options)
-      except SystemExit:
-        # It's going to fail with an error about unknown ports, but we don't
-        # care: we just want to verify that it generated a deprecation warning
-        # with the correct --tunnels parameters.
-        pass
-      mock_dep_warn.assert_called_with('ssh',
-          ['task', 'ssh', 'west/mchucarroll/test/hello/1',
-           '--tunnels=100:hundred', '--tunnels=1000:thousand',
-           '--command="ls"'])
-
-  def test_ssh_job_not_found(self):
-    """Test the ssh command when the query returns no tasks."""
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_scheduler_proxy.getTasksStatus.return_value = self.create_nojob_status_response()
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options),
-        patch('subprocess.call', return_value=0)) as (
-            mock_scheduler_proxy_class,
-            mock_clusters,
-            options,
-            mock_subprocess):
-      self.assertRaises(SystemExit, ssh, ['west/mchucarroll/test/hello', '1', 'ls'], mock_options)
-
-      assert mock_subprocess.call_count == 0

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/test_status.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/test_status.py b/src/test/python/apache/aurora/client/commands/test_status.py
deleted file mode 100644
index 003af25..0000000
--- a/src/test/python/apache/aurora/client/commands/test_status.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import contextlib
-
-from mock import patch
-
-from apache.aurora.client.commands.core import status
-
-from .util import AuroraClientCommandTest
-
-from gen.apache.aurora.api.ttypes import (
-    AssignedTask,
-    Identity,
-    JobKey,
-    ResponseCode,
-    Result,
-    ScheduledTask,
-    ScheduleStatus,
-    ScheduleStatusResult,
-    TaskConfig,
-    TaskEvent,
-    TaskQuery
-)
-
-
-class TestListJobs(AuroraClientCommandTest):
-  @classmethod
-  def create_mock_scheduled_tasks(cls):
-    tasks = []
-    for name in ['foo', 'bar', 'baz']:
-      tasks.append(ScheduledTask(
-          status=ScheduleStatus.RUNNING,
-          failureCount=0,
-          taskEvents=[TaskEvent(timestamp=123, status=ScheduleStatus.RUNNING, message='Hi there')],
-          assignedTask=AssignedTask(
-              instanceId=0,
-              assignedPorts={},
-              task=TaskConfig(
-                  maxTaskFailures=1,
-                  metadata={},
-                  job=JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV, name=name),
-                  owner=Identity(role=cls.TEST_ROLE),
-                  environment=cls.TEST_ENV,
-                  jobName=name,
-                  numCpus=2,
-                  ramMb=2,
-                  diskMb=2
-              )
-          )
-      ))
-    return tasks
-
-  @classmethod
-  def create_mock_scheduled_task_no_metadata(cls):
-    result = cls.create_mock_scheduled_tasks()
-    for job in result:
-      job.assignedTask.task.metadata = None
-    return result
-
-  @classmethod
-  def create_status_response(cls):
-    resp = cls.create_simple_success_response()
-    resp.result = Result(
-        scheduleStatusResult=ScheduleStatusResult(tasks=set(cls.create_mock_scheduled_tasks())))
-    return resp
-
-  @classmethod
-  def create_status_null_metadata(cls):
-    resp = cls.create_simple_success_response()
-    resp.result = Result(
-        scheduleStatusResult=ScheduleStatusResult(
-            tasks=set(cls.create_mock_scheduled_task_no_metadata())))
-    return resp
-
-  @classmethod
-  def create_failed_status_response(cls):
-    return cls.create_blank_response(ResponseCode.INVALID_REQUEST, 'No tasks found for query')
-
-  def test_successful_status(self):
-    """Test the status command."""
-    # Calls api.check_status, which calls scheduler_proxy.getJobs
-    fake_options = {}
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_status_response()
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=fake_options)):
-
-      status(['west/mchucarroll/test/hello'], fake_options)
-      # The status command sends a getTasksWithoutConfigs query to the scheduler,
-      # and then prints the result.
-      mock_scheduler_proxy.getTasksWithoutConfigs.assert_called_with(
-          TaskQuery(jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')]))
-
-  def test_unsuccessful_status(self):
-    """Test the status command when the user asks the status of a job that doesn't exist."""
-    # Calls api.check_status, which calls scheduler_proxy.getJobs
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_failed_status_response()
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-
-      self.assertRaises(SystemExit, status, ['west/mchucarroll/test/hello'], mock_options)
-      mock_scheduler_proxy.getTasksWithoutConfigs.assert_called_with(
-          TaskQuery(jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')]))
-
-  def test_successful_status_nometadata(self):
-    """Test the status command with no metadata."""
-    # Calls api.check_status, which calls scheduler_proxy.getJobs
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_scheduler_proxy.getTasksWithoutConfigs.return_value = self.create_status_null_metadata()
-    with contextlib.nested(
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('twitter.common.app.get_options', return_value=mock_options)):
-
-      status(['west/mchucarroll/test/hello'], mock_options)
-      mock_scheduler_proxy.getTasksWithoutConfigs.assert_called_with(
-          TaskQuery(jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')]))

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/test_update.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/test_update.py b/src/test/python/apache/aurora/client/commands/test_update.py
deleted file mode 100644
index 0439f60..0000000
--- a/src/test/python/apache/aurora/client/commands/test_update.py
+++ /dev/null
@@ -1,314 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import contextlib
-import functools
-
-from mock import create_autospec, patch
-from twitter.common.contextutil import temporary_file
-
-from apache.aurora.client.api.health_check import Retriable, StatusHealthCheck
-from apache.aurora.client.api.job_monitor import JobMonitor
-from apache.aurora.client.api.quota_check import QuotaCheck
-from apache.aurora.client.api.scheduler_mux import SchedulerMux
-from apache.aurora.client.api.updater import Updater
-from apache.aurora.client.commands.core import update
-from apache.aurora.config import AuroraConfig
-
-from .util import AuroraClientCommandTest
-
-from gen.apache.aurora.api.constants import ACTIVE_STATES
-from gen.apache.aurora.api.ttypes import (
-    AcquireLockResult,
-    AddInstancesConfig,
-    AssignedTask,
-    JobConfiguration,
-    JobKey,
-    Lock,
-    PopulateJobResult,
-    ResponseCode,
-    Result,
-    ScheduledTask,
-    ScheduleStatus,
-    ScheduleStatusResult,
-    TaskConfig,
-    TaskQuery
-)
-
-
-class TestUpdateCommand(AuroraClientCommandTest):
-  class FakeSchedulerMux(SchedulerMux):
-    def enqueue_and_wait(self, command, data, aggregator=None, timeout=None):
-      return command([data])
-
-    def terminate(self):
-      pass
-
-  @classmethod
-  def setup_mock_options(cls):
-    """set up to get a mock options object."""
-    mock_options = create_autospec(
-        spec=object,
-        spec_set=False,
-        instance=True,
-        json=False,
-        shards=None,
-        health_check_interval_seconds=3,
-        force=True,
-        disable_all_hooks_reason='Fake reason')
-    return mock_options
-
-  @classmethod
-  def setup_mock_updater(cls):
-    updater = create_autospec(spec=Updater, instance=True)
-    return updater
-
-  # First, we pretend that the updater isn't really client-side, and test
-  # that the client makes the right API calls.
-  def test_update_command_line_succeeds(self):
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    with contextlib.nested(
-        patch('apache.aurora.client.commands.core.make_client', return_value=mock_api),
-        patch('twitter.common.app.get_options', return_value=mock_options),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)):
-
-      mock_api.update_job.return_value = self.create_simple_success_response()
-
-      with temporary_file() as fp:
-        fp.write(self.get_valid_config())
-        fp.flush()
-        update([self.TEST_JOBSPEC, fp.name])
-
-      assert mock_api.update_job.call_count == 1
-      args, kwargs = mock_api.update_job.call_args
-      assert isinstance(args[0], AuroraConfig)
-      assert args[1] == 3
-      assert args[2] is None
-
-  def test_update_invalid_config(self):
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    # Set up the context to capture the make_client and get_options calls.
-    with contextlib.nested(
-        patch('apache.aurora.client.commands.core.make_client', return_value=mock_api),
-        patch('twitter.common.app.get_options', return_value=mock_options),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)):
-
-      with temporary_file() as fp:
-        fp.write(self.get_invalid_config('invalid_field=False,'))
-        fp.flush()
-        self.assertRaises(AttributeError, update, ([self.TEST_JOBSPEC, fp.name]))
-
-      assert mock_api.update_job.call_count == 0
-
-  @classmethod
-  def setup_mock_scheduler_for_simple_update(cls, api):
-    """Set up all of the API mocks for scheduler calls during a simple update"""
-    sched_proxy = api.scheduler_proxy
-    # First, the updater acquires a lock
-    sched_proxy.acquireLock.return_value = cls.create_acquire_lock_response(ResponseCode.OK, 'OK')
-    # Then in gets the status of the tasks for the updating job.
-    cls.setup_get_tasks_status_calls(sched_proxy)
-    # Next, it needs to populate the update config.
-    cls.setup_populate_job_config(sched_proxy)
-    # Then it does the update, which kills and restarts jobs, and monitors their
-    # health with the status call.
-    cls.setup_kill_tasks(sched_proxy)
-    cls.setup_add_tasks(sched_proxy)
-    # Finally, after successful health checks, it releases the lock.
-    cls.setup_release_lock_response(sched_proxy)
-
-  @classmethod
-  def setup_add_tasks(cls, api):
-    add_response = cls.create_simple_success_response()
-    api.addInstances.return_value = add_response
-    return add_response
-
-  @classmethod
-  def setup_kill_tasks(cls, api):
-    kill_response = cls.create_simple_success_response()
-    api.killTasks.return_value = kill_response
-    return kill_response
-
-  @classmethod
-  def setup_populate_job_config(cls, api):
-    populate = cls.create_simple_success_response()
-
-    api.populateJobConfig.return_value = populate
-    configs = []
-    for _ in range(20):
-      task_config = TaskConfig(
-          numCpus=1.0,
-          ramMb=1,
-          diskMb=1,
-          job=JobKey(role='mchucarroll', environment='test', name='hello'))
-      configs.append(task_config)
-    populate.result = Result(populateJobResult=PopulateJobResult(
-        populatedDEPRECATED=set(configs),
-        taskConfig=configs[0]
-    ))
-    return populate
-
-  @classmethod
-  def create_acquire_lock_response(cls, code, msg):
-    """Set up the response to a startUpdate API call."""
-    start_update_response = cls.create_blank_response(code, msg)
-    start_update_response.result = Result(
-        acquireLockResult=AcquireLockResult(lock=Lock(key='foo', token='token')))
-    return start_update_response
-
-  @classmethod
-  def setup_release_lock_response(cls, api):
-    """Set up the response to a startUpdate API call."""
-    release_lock_response = cls.create_simple_success_response()
-    api.releaseLock.return_value = release_lock_response
-    return release_lock_response
-
-  @classmethod
-  def setup_get_tasks_status_calls(cls, scheduler_proxy):
-    status_response = cls.create_simple_success_response()
-    scheduler_proxy.getTasksStatus.return_value = status_response
-    scheduler_proxy.getTasksWithoutConfigs.return_value = status_response
-    schedule_status = create_autospec(spec=ScheduleStatusResult, instance=True)
-    status_response.result.scheduleStatusResult = schedule_status
-    task_config = TaskConfig(
-        numCpus=1.0,
-        ramMb=10,
-        diskMb=1,
-        job=JobKey(role='mchucarroll', environment='test', name='hello'))
-
-    # This should be a list of ScheduledTask's.
-    schedule_status.tasks = []
-    for i in range(20):
-      task_status = create_autospec(spec=ScheduledTask, instance=True)
-      task_status.assignedTask = create_autospec(spec=AssignedTask, instance=True)
-      task_status.assignedTask.instanceId = i
-      task_status.assignedTask.taskId = "Task%s" % i
-      task_status.assignedTask.slaveId = "Slave%s" % i
-      task_status.slaveHost = "Slave%s" % i
-      task_status.assignedTask.task = task_config
-      schedule_status.tasks.append(task_status)
-
-  @classmethod
-  def assert_start_update_called(cls, mock_scheduler_proxy):
-    assert mock_scheduler_proxy.startUpdate.call_count == 1
-    assert isinstance(mock_scheduler_proxy.startUpdate.call_args[0][0], JobConfiguration)
-
-  @classmethod
-  def setup_health_checks(cls, mock_api):
-    mock_health_check = create_autospec(spec=StatusHealthCheck, instance=True)
-    mock_health_check.health.return_value = Retriable.alive()
-    return mock_health_check
-
-  @classmethod
-  def setup_quota_check(cls):
-    mock_quota_check = create_autospec(spec=QuotaCheck, instance=True)
-    mock_quota_check.validate_quota_from_requested.return_value = (
-        cls.create_simple_success_response())
-    return mock_quota_check
-
-  @classmethod
-  def setup_job_monitor(cls):
-    mock_job_monitor = create_autospec(spec=JobMonitor, instance=True)
-    mock_job_monitor.wait_until.return_value = True
-    return mock_job_monitor
-
-  def test_updater_simple(self):
-    # Test the client-side updater logic in its simplest case: everything succeeds, and no rolling
-    # updates.
-    mock_options = self.setup_mock_options()
-    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
-    mock_health_check = self.setup_health_checks(mock_api)
-    mock_quota_check = self.setup_quota_check()
-    mock_job_monitor = self.setup_job_monitor()
-    fake_mux = self.FakeSchedulerMux()
-
-    with contextlib.nested(
-        patch('twitter.common.app.get_options', return_value=mock_options),
-        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
-        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
-        patch('apache.aurora.client.api.instance_watcher.StatusHealthCheck',
-            return_value=mock_health_check),
-        patch('apache.aurora.client.api.updater.QuotaCheck', return_value=mock_quota_check),
-        patch('apache.aurora.client.api.updater.JobMonitor', return_value=mock_job_monitor),
-        patch('apache.aurora.client.api.updater.SchedulerMux', return_value=fake_mux),
-        patch('time.time', side_effect=functools.partial(self.fake_time, self)),
-        patch('threading._Event.wait')
-
-    ) as (options, scheduler_proxy_class, test_clusters, mock_health_check_factory,
-          mock_quota_check_patch, mock_job_monitor_patch, fake_mux, time_patch, sleep_patch):
-      self.setup_mock_scheduler_for_simple_update(mock_api)
-      with temporary_file() as fp:
-        fp.write(self.get_valid_config())
-        fp.flush()
-        update(['west/mchucarroll/test/hello', fp.name])
-
-      # We don't check all calls. The updater should be able to change. What's important
-      # is that we verify the key parts of the update process, to have some confidence
-      # that update did the right things.
-      # Every update should:
-      # check its options, acquire an update lock, check status,
-      # kill the old tasks, start new ones, wait for the new ones to healthcheck,
-      # and finally release the lock.
-      # The kill/start should happen in rolling batches.
-      assert options.call_count == 2
-      assert mock_scheduler_proxy.acquireLock.call_count == 1
-      self.assert_correct_killtask_calls(mock_scheduler_proxy)
-      self.assert_correct_addinstance_calls(mock_scheduler_proxy)
-      self.assert_correct_status_calls(mock_scheduler_proxy)
-      assert mock_scheduler_proxy.releaseLock.call_count == 1
-
-  @classmethod
-  def assert_correct_addinstance_calls(cls, api):
-    assert api.addInstances.call_count == 20
-    last_addinst = api.addInstances.call_args
-    assert isinstance(last_addinst[0][0], AddInstancesConfig)
-    assert last_addinst[0][0].instanceIds == frozenset([19])
-    assert last_addinst[0][0].key == JobKey(environment='test', role='mchucarroll', name='hello')
-
-  @classmethod
-  def assert_correct_killtask_calls(cls, api):
-    assert api.killTasks.call_count == 20
-    # Check the last call's parameters.
-    api.killTasks.assert_called_with(
-        TaskQuery(taskIds=None,
-            jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')],
-            instanceIds=frozenset([19]),
-            statuses=ACTIVE_STATES),
-        Lock(key='foo', token='token'))
-
-  @classmethod
-  def assert_correct_status_calls(cls, api):
-    # getTasksWithoutConfigs gets called a lot of times. The exact number isn't fixed; it loops
-    # over the health checks until all of them pass for a configured period of time.
-    # The minumum number of calls is 20: once before the tasks are restarted, and then
-    # once for each batch of restarts (Since the batch size is set to 1, and the
-    # total number of tasks is 20, that's 20 batches.)
-    assert api.getTasksWithoutConfigs.call_count >= 4
-
-    status_calls = api.getTasksWithoutConfigs.call_args_list
-    for status_call in status_calls:
-      status_call[0][0] == TaskQuery(
-        taskIds=None,
-        jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')],
-        statuses={ScheduleStatus.RUNNING})
-
-    # getTasksStatus is called only once to build an generate update instructions
-    assert api.getTasksStatus.call_count == 1
-
-    api.getTasksStatus.assert_called_once_with(TaskQuery(
-      taskIds=None,
-      jobKeys=[JobKey(role='mchucarroll', environment='test', name='hello')],
-      statuses=ACTIVE_STATES))

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/test_version.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/test_version.py b/src/test/python/apache/aurora/client/commands/test_version.py
deleted file mode 100644
index cb0f410..0000000
--- a/src/test/python/apache/aurora/client/commands/test_version.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from __future__ import print_function
-
-from unittest import TestCase
-
-from mock import call, create_autospec
-
-from apache.aurora.client.commands.core import (
-    _API_VERSION_MESSAGE,
-    _BUILD_INFO_HEADER,
-    _NO_BUILD_INFO_MESSAGE,
-    _version
-)
-from apache.aurora.common.pex_version import pex_version, UnknownVersion
-
-
-class TestVersionCommand(TestCase):
-
-  def setUp(self):
-    self.mock_print = create_autospec(print, spec_set=True)
-    self.mock_pex_version = create_autospec(pex_version, spec_set=True)
-    self.mock_argv = ['test-aurora.pex']
-
-  def _invoke_version(self):
-    _version(_argv=self.mock_argv, _print=self.mock_print, _pex_version=self.mock_pex_version)
-
-  def test_version(self):
-    self.mock_pex_version.return_value = ("foo", "somedate")
-
-    self._invoke_version()
-
-    self.mock_pex_version.assert_called_once_with(self.mock_argv[0])
-    assert self.mock_print.call_count == 4
-    calls = self.mock_print.mock_calls
-    assert calls[0] == call(_BUILD_INFO_HEADER)
-    assert "foo" in calls[1][1][0]
-    assert "somedate" in calls[2][1][0]
-    assert calls[3] == call(_API_VERSION_MESSAGE)
-
-  def test_unknown_version(self):
-    # If we aren't a PEX we'll be a bad zip file.
-    self.mock_pex_version.side_effect = UnknownVersion
-
-    self._invoke_version()
-
-    self.mock_pex_version.assert_called_once_with(self.mock_argv[0])
-    self.mock_print.assert_has_calls([
-      call(_NO_BUILD_INFO_MESSAGE),
-      call(_API_VERSION_MESSAGE),
-    ])

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/python/apache/aurora/client/commands/util.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/commands/util.py b/src/test/python/apache/aurora/client/commands/util.py
deleted file mode 100644
index 0d93e76..0000000
--- a/src/test/python/apache/aurora/client/commands/util.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import unittest
-from collections import defaultdict
-
-from mock import create_autospec
-
-from apache.aurora.client.api.sla import DomainUpTimeSlaVector, JobUpTimeDetails
-from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI
-from apache.aurora.common.aurora_job_key import AuroraJobKey
-from apache.aurora.common.cluster import Cluster
-from apache.aurora.common.clusters import Clusters
-
-from ..api.api_util import SchedulerProxyApiSpec
-
-from gen.apache.aurora.api.ttypes import Response, ResponseCode, ResponseDetail, Result
-
-
-class AuroraClientCommandTest(unittest.TestCase):
-  @classmethod
-  def create_blank_response(cls, code, msg):
-    # TODO(wfarner): Don't use a mock here.
-    response = create_autospec(spec=Response, instance=True)
-    response.responseCode = code
-    response.result = create_autospec(spec=Result, instance=True)
-    response.details = [ResponseDetail(message=msg)]
-    return response
-
-  @classmethod
-  def create_simple_success_response(cls):
-    return cls.create_blank_response(ResponseCode.OK, 'OK')
-
-  @classmethod
-  def create_error_response(cls):
-    return cls.create_blank_response(ResponseCode.ERROR, 'Damn')
-
-  @classmethod
-  def create_mock_api(cls):
-    """Builds up a mock API object, with a mock SchedulerProxy"""
-    """Builds up a mock API object, with a mock SchedulerProxy.
-    Returns the API and the proxy"""
-    mock_scheduler_proxy = create_autospec(
-        spec=SchedulerProxyApiSpec,
-        spec_set=False,
-        instance=True)
-    mock_scheduler_proxy.url = "http://something_or_other"
-    mock_scheduler_proxy.scheduler_client.return_value = mock_scheduler_proxy
-    mock_api = create_autospec(spec=HookedAuroraClientAPI)
-    mock_api.scheduler_proxy = mock_scheduler_proxy
-    return mock_api, mock_scheduler_proxy
-
-  @classmethod
-  def create_mock_api_factory(cls):
-    """Create a collection of mocks for a test that wants to mock out the client API
-    by patching the api factory."""
-    mock_api, mock_scheduler_proxy = cls.create_mock_api()
-    mock_api_factory = lambda x: mock_api
-    return mock_api_factory, mock_scheduler_proxy
-
-  FAKE_TIME = 42131
-
-  @classmethod
-  def fake_time(cls, ignored):
-    """Utility function used for faking time to speed up tests."""
-    cls.FAKE_TIME += 2
-    return cls.FAKE_TIME
-
-  CONFIG_BASE = """
-HELLO_WORLD = Job(
-  name = '%(job)s',
-  role = '%(role)s',
-  cluster = '%(cluster)s',
-  environment = '%(env)s',
-  instances = 20,
-  %(inner)s
-  update_config = UpdateConfig(
-    batch_size = 1,
-    restart_threshold = 60,
-    watch_secs = 45,
-    max_per_shard_failures = 2,
-  ),
-  task = Task(
-    name = 'test',
-    processes = [Process(name = 'hello_world', cmdline = 'echo {{thermos.ports[http]}}')],
-    resources = Resources(cpu = 0.1, ram = 64 * MB, disk = 64 * MB),
-  )
-)
-jobs = [HELLO_WORLD]
-"""
-
-  TEST_ROLE = 'mchucarroll'
-
-  TEST_ENV = 'test'
-
-  TEST_JOB = 'hello'
-
-  TEST_CLUSTER = 'west'
-
-  TEST_JOBSPEC = 'west/mchucarroll/test/hello'
-
-  TEST_CLUSTERS = Clusters([Cluster(
-      name='west',
-      zk='zookeeper.example.com',
-      scheduler_zk_path='/foo/bar',
-      auth_mechanism='UNAUTHENTICATED')])
-
-  @classmethod
-  def get_test_config(cls, cluster, role, env, job, filler=''):
-    """Create a config from the template"""
-    return cls.CONFIG_BASE % {'job': job, 'role': role, 'env': env, 'cluster': cluster,
-        'inner': filler}
-
-  @classmethod
-  def get_valid_config(cls):
-    return cls.get_test_config(cls.TEST_CLUSTER, cls.TEST_ROLE, cls.TEST_ENV, cls.TEST_JOB)
-
-  @classmethod
-  def get_invalid_config(cls, bad_clause):
-    return cls.get_test_config(cls.TEST_CLUSTER, cls.TEST_ROLE, cls.TEST_ENV, cls.TEST_JOB,
-        bad_clause)
-
-  @classmethod
-  def create_mock_probe_hosts_vector(cls, side_effects):
-    mock_vector = create_autospec(spec=DomainUpTimeSlaVector, instance=True)
-    mock_vector.probe_hosts.side_effect = side_effects
-    return mock_vector
-
-  @classmethod
-  def create_probe_hosts(cls, hostname, predicted, safe, safe_in):
-    hosts = defaultdict(list)
-    job = AuroraJobKey.from_path('west/role/env/job-%s' % hostname)
-    hosts[hostname].append(JobUpTimeDetails(job, predicted, safe, safe_in))
-    return [hosts]
-
-  #TODO(wfarner): Remove this, force tests to call out their flags.
-  @classmethod
-  def setup_mock_options(cls):
-    mock_options = create_autospec(spec=['verbosity'], instance=True)
-    mock_options.verbosity = 'verbose'
-    return mock_options

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/9a817f24/src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
----------------------------------------------------------------------
diff --git a/src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh b/src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
index 2a436b4..578662c 100755
--- a/src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+++ b/src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
@@ -71,13 +71,13 @@ test_http_example() {
   test $? -eq 0
 
   # Run a kill without specifying instances, and verify that it gets an error, and the job
-  # isn't affected. (TODO(mchucarroll): the failed kill should return non-zero!)
+  # isn't affected. (TODO(maxim): the failed kill should return non-zero!)
   vagrant ssh -c "aurora job kill $jobkey" 2>&1 | grep -q "The instances list cannot be omitted in a kill command"
   check_url_live "$base_url/scheduler/$_role/$_env/$_job"
 
   vagrant ssh -c "aurora job kill $jobkey/1"
 
-  vagrant ssh -c "aurora job killall  $jobkey"
+  vagrant ssh -c "aurora job killall $jobkey"
 
   vagrant ssh -c "aurora quota get $_cluster/$_role"
 }
@@ -89,7 +89,7 @@ test_admin() {
 
   echo '== Testing Aurora Admin commands...'
   echo '== Getting leading scheduler'
-  vagrant ssh -c "aurora_admin get_scheduler $_cluster" | grep "$base_url"
+  vagrant ssh -c "aurora_admin get_scheduler $_cluster" | grep ":8081"
 }
 
 RETCODE=1


Mime
View raw message