aurora-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wick...@apache.org
Subject git commit: client v2: added implementations of job diff, job inspect.
Date Tue, 14 Jan 2014 17:41:57 GMT
Updated Branches:
  refs/heads/master 343613a5c -> e1aee67b7


client v2: added implementations of job diff, job inspect.

Implemented client v2 versions of the diff and inspect verbs.

Testing Done:
Added new unit tests for "diff"; inspect doesn't do any API calls that aren't already tested.

Reviewed at https://reviews.apache.org/r/16423/


Project: http://git-wip-us.apache.org/repos/asf/incubator-aurora/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-aurora/commit/e1aee67b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-aurora/tree/e1aee67b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-aurora/diff/e1aee67b

Branch: refs/heads/master
Commit: e1aee67b7f792cd0ebc00e5816388e567df70c0d
Parents: 343613a
Author: Mark Chu-Carroll <mchucarroll@twopensource.com>
Authored: Tue Jan 14 09:41:33 2014 -0800
Committer: Brian Wickman <wickman@twitter.com>
Committed: Tue Jan 14 09:41:33 2014 -0800

----------------------------------------------------------------------
 .../python/apache/aurora/client/cli/__init__.py |   9 +-
 .../python/apache/aurora/client/cli/context.py  | 132 +++++++-
 .../python/apache/aurora/client/cli/jobs.py     | 308 ++++++++++++++++++-
 .../python/apache/aurora/client/cli/options.py  |   4 +-
 src/main/python/apache/aurora/client/options.py |   2 +
 src/main/python/twitter/aurora/client/cli/BUILD |  26 ++
 src/test/python/apache/aurora/client/cli/BUILD  |   2 +-
 .../apache/aurora/client/cli/test_create.py     |  26 +-
 .../apache/aurora/client/cli/test_kill.py       |  29 +-
 .../apache/aurora/client/cli/test_status.py     | 161 ++++++++++
 .../python/apache/aurora/client/cli/util.py     |  45 ++-
 src/test/python/twitter/aurora/client/cli/BUILD |  30 ++
 .../twitter/aurora/client/cli/test_diff.py      | 182 +++++++++++
 13 files changed, 880 insertions(+), 76 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/main/python/apache/aurora/client/cli/__init__.py
----------------------------------------------------------------------
diff --git a/src/main/python/apache/aurora/client/cli/__init__.py b/src/main/python/apache/aurora/client/cli/__init__.py
index 2c08cf9..1df0d9b 100644
--- a/src/main/python/apache/aurora/client/cli/__init__.py
+++ b/src/main/python/apache/aurora/client/cli/__init__.py
@@ -9,7 +9,6 @@ arguments needed by the verb.
 For example:
 - To create a job, the noun is "job", the verb is "create":
   $ aurora job create us-west/www/prod/server server.aurora
-
 - To find out the resource quota for a specific user, the noun is "user" and the verb is
   "get_quota":
   $ aurora user get_quota mchucarroll
@@ -31,6 +30,7 @@ EXIT_INVALID_PARAMETER = 6
 EXIT_NETWORK_ERROR = 7
 EXIT_PERMISSION_VIOLATION = 8
 EXIT_TIMEOUT = 9
+EXIT_API_ERROR = 10
 EXIT_UNKNOWN_ERROR = 20
 
 
@@ -45,6 +45,10 @@ class Context(object):
       self.msg = msg
       self.code = code
 
+  @classmethod
+  def exit(cls, code, msg):
+    raise cls.CommandError(code, msg)
+
   def set_options(self, options):
     """Add the options object to a context.
     This is separated from the constructor to make patching tests easier.
@@ -54,7 +58,8 @@ class Context(object):
 
 class CommandOption(object):
   """A lightweight encapsulation of an argparse option specification, which can be used to
-  define options that can be reused by multiple commands."""
+  define options that can be reused by multiple commands.
+  """
 
   def __init__(self, *args, **kwargs):
     self.args = args

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/main/python/apache/aurora/client/cli/context.py
----------------------------------------------------------------------
diff --git a/src/main/python/apache/aurora/client/cli/context.py b/src/main/python/apache/aurora/client/cli/context.py
index c50f1a1..be156bb 100644
--- a/src/main/python/apache/aurora/client/cli/context.py
+++ b/src/main/python/apache/aurora/client/cli/context.py
@@ -1,6 +1,17 @@
+from __future__ import print_function
+from collections import namedtuple
+from fnmatch import fnmatch
+import sys
 
+from apache.aurora.common.clusters import CLUSTERS
+from apache.aurora.common.aurora_job_key import AuroraJobKey
 from apache.aurora.client.base import synthesize_url
-from apache.aurora.client.cli import Context, EXIT_NETWORK_ERROR
+from apache.aurora.client.cli import (
+    Context,
+    EXIT_NETWORK_ERROR,
+    EXIT_INVALID_PARAMETER,
+    EXIT_NETWORK_ERROR
+)
 from apache.aurora.client.config import get_config
 from apache.aurora.client.factory import make_client
 from twitter.common import log
@@ -8,42 +19,129 @@ from twitter.common import log
 from gen.apache.aurora.ttypes import ResponseCode
 
 
+# Utility type, representing job keys with wildcards.
+PartialJobKey = namedtuple('PartialJobKey', ['cluster', 'role', 'env', 'name'])
+
+
 class AuroraCommandContext(Context):
   """A context object used by Aurora commands to manage command processing state
   and common operations.
   """
 
+  def __init__(self):
+    super(AuroraCommandContext, self).__init__()
+    self.apis = {}
+
   def get_api(self, cluster):
-    """Creates an API object for a specified cluster"""
-    return make_client(cluster)
+    """Gets an API object for a specified cluster
+    Keeps the API handle cached, so that only one handle for each cluster will be created in a
+    session.
+    """
+    if cluster not in self.apis:
+      api = make_client(cluster)
+      self.apis[cluster] = api
+    return self.apis[cluster]
 
-  def get_job_config(self, job_key, config_file):
-    """Loads a job configuration from a config file"""
-    jobname = job_key.name
+  def get_job_config(self, jobkey, config_file):
+    """Loads a job configuration from a config file."""
+    jobname = jobkey.name
     return get_config(
       jobname,
       config_file,
       self.options.json,
       self.options.bindings,
-      select_cluster=job_key.cluster,
-      select_role=job_key.role,
-      select_env=job_key.env)
+      select_cluster=jobkey.cluster,
+      select_role=jobkey.role,
+      select_env=jobkey.env)
+
+  def print_out(self, str):
+    """Prints output. For debugging purposes, it's nice to be able to patch this
+    and capture output.
+    """
+    print(str)
+
+  def print_err(self, str):
+    """Prints output to standard error."""
+    print(str, file=sys.stderr)
 
   def open_page(self, url):
     import webbrowser
     webbrowser.open_new_tab(url)
 
-  def open_job_page(self, api, config):
-    self.open_page(synthesize_url(api.scheduler.scheduler().url, config.role(),
-        config.environment(), config.name()))
-
-  def handle_open(self, api):
-    if self.options.open_browser:
-      self.open_page(synthesize_url(api.scheduler.scheduler().url,
-          self.options.jobspec.role, self.options.jobspec.env, self.options.jobspec.name))
+  def open_job_page(self, api, jobkey):
+    """Open the page for a job in the system web browser."""
+    self.open_page(synthesize_url(api.scheduler.scheduler().url, jobkey.role,
+        jobkey.env, jobkey.name))
 
   def check_and_log_response(self, resp):
     log.info('Response from scheduler: %s (message: %s)'
         % (ResponseCode._VALUES_TO_NAMES[resp.responseCode], resp.message))
     if resp.responseCode != ResponseCode.OK:
       raise self.CommandError(EXIT_NETWORK_ERROR, resp.message)
+
+  @classmethod
+  def parse_partial_jobkey(cls, key):
+    """Given a partial jobkey, where parts can be wildcards, parse it.
+    Slots that are wildcards will be replaced by "*".
+    """
+    parts = []
+    for part in key.split('/'):
+      parts.append(part)
+    if len(parts) > 4:
+      raise cls.CommandError(EXIT_INVALID_PARAMETER, 'Job key must have no more than 4 segments')
+    while len(parts) < 4:
+      parts.append('*')
+    return PartialJobKey(*parts)
+
+  def get_job_list(self, clusters, role=None):
+    """Get a list of all jobs from a group of clusters.
+    :param clusters: the clusters to query for jobs
+    :param role: if specified, only return jobs for the role; otherwise, return all jobs.
+    """
+    result = []
+    if '*' in role:
+      role = None
+    for cluster in clusters:
+      api = self.get_api(cluster)
+      resp = api.get_jobs(role)
+      if resp.responseCode is not ResponseCode.OK:
+        raise self.CommandError(EXIT_COMMAND_FAILURE, resp.message)
+      result.extend([AuroraJobKey(cluster, job.key.role, job.key.environment, job.key.name)
+          for job in resp.result.getJobsResult.configs])
+    return result
+
+  def get_jobs_matching_key(self, key):
+    """Finds all jobs matching a key containing wildcard segments.
+    This is potentially slow!
+    TODO: insert a warning to users about slowness if the key contains wildcards!
+    """
+
+    def is_fully_bound(key):
+      """Helper that checks if a key contains wildcards."""
+      return not any('*' in component for component in [key.cluster, key.role, key.env, key.name])
+
+    def filter_job_list(jobs, role, env, name):
+      """Filter a list of jobs to get just the jobs that match the pattern from a key"""
+      return [job for job in jobs if fnmatch(job.role, role) and fnmatch(job.env, env)
+          and fnmatch(job.name, name)]
+
+    # For cluster, we can expand the list of things we're looking for directly.
+    # For other key elements, we need to just get a list of the jobs on the clusters, and filter
+    # it for things that match.
+    if key.cluster == '*':
+      clusters_to_search = CLUSTERS
+    else:
+      clusters_to_search = [key.cluster]
+    if is_fully_bound(key):
+      return [AuroraJobKey(key.cluster, key.role, key.env, key.name)]
+    else:
+      jobs = filter_job_list(self.get_job_list(clusters_to_search, key.role),
+          key.role, key.env, key.name)
+      return jobs
+
+  def get_job_status(self, key):
+    api = self.get_api(key.cluster)
+    resp = api.check_status(key)
+    if resp.responseCode is not ResponseCode.OK:
+      raise self.CommandError(EXIT_INVALID_PARAMETER, resp.message)
+    return resp.result.scheduleStatusResult.tasks or None

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/main/python/apache/aurora/client/cli/jobs.py
----------------------------------------------------------------------
diff --git a/src/main/python/apache/aurora/client/cli/jobs.py b/src/main/python/apache/aurora/client/cli/jobs.py
index b789361..1d0f37e 100644
--- a/src/main/python/apache/aurora/client/cli/jobs.py
+++ b/src/main/python/apache/aurora/client/cli/jobs.py
@@ -1,6 +1,23 @@
+from __future__ import print_function
+from datetime import datetime
+import json
+import os
+import pprint
+import subprocess
+import sys
+from tempfile import NamedTemporaryFile
+
+from apache.aurora.client.api.job_monitor import JobMonitor
+
+from gen.apache.aurora.constants import ACTIVE_STATES
+from gen.apache.aurora.ttypes import ResponseCode, ScheduleStatus
+
 from apache.aurora.client.api.job_monitor import JobMonitor
 from apache.aurora.client.cli import (
+    EXIT_COMMAND_FAILURE,
     EXIT_INVALID_CONFIGURATION,
+    EXIT_INVALID_PARAMETER,
+    EXIT_OK,
     Noun,
     Verb
 )
@@ -8,18 +25,19 @@ from apache.aurora.client.cli.context import AuroraCommandContext
 from apache.aurora.client.cli.options import (
     BIND_OPTION,
     BROWSER_OPTION,
-    CONFIG_OPTION,
-    JOBSPEC_OPTION,
+    CONFIG_ARGUMENT,
+    JOBSPEC_ARGUMENT,
     JSON_OPTION
 )
 from apache.aurora.common.aurora_job_key import AuroraJobKey
 
 from pystachio.config import Config
+from thrift.TSerialization import serialize
+from thrift.protocol import TJSONProtocol
 
 
 def parse_instances(instances):
   """Parse lists of instances or instance ranges into a set().
-
      Examples:
        0-2
        0,1-3,5
@@ -34,6 +52,10 @@ def parse_instances(instances):
   return sorted(result)
 
 
+def arg_type_jobkey(key):
+  return AuroraCommandContext.parse_partial_jobkey(key)
+
+
 class CreateJobCommand(Verb):
   @property
   def name(self):
@@ -41,7 +63,10 @@ class CreateJobCommand(Verb):
 
   @property
   def help(self):
-    return 'Create a job using aurora'
+    return '''Usage: aurora create cluster/role/env/job config.aurora
+
+    Create a job using aurora
+    '''
 
   CREATE_STATES = ('PENDING', 'RUNNING', 'FINISHED')
 
@@ -53,8 +78,8 @@ class CreateJobCommand(Verb):
         default='PENDING',
         help=('Block the client until all the tasks have transitioned into the requested state. '
                         'Default: PENDING'))
-    self.add_option(parser, JOBSPEC_OPTION)
-    self.add_option(parser, CONFIG_OPTION)
+    self.add_option(parser, JOBSPEC_ARGUMENT)
+    self.add_option(parser, CONFIG_ARGUMENT)
 
   def execute(self, context):
     try:
@@ -65,13 +90,17 @@ class CreateJobCommand(Verb):
     api = context.get_api(config.cluster())
     monitor = JobMonitor(api, config.role(), config.environment(), config.name())
     resp = api.create_job(config)
-    context.check_and_log_response(resp)
+    if resp.responseCode == ResponseCode.INVALID_REQUEST:
+      raise context.CommandError(EXIT_INVALID_PARAMETER, 'Job not found')
+    elif resp.responseCode == ResponseCode.ERROR:
+      raise context.CommandError(EXIT_COMMAND_FAILURE, resp.message)
     if context.options.open_browser:
       context.open_job_page(api, config)
     if context.options.wait_until == 'RUNNING':
       monitor.wait_until(monitor.running_or_finished)
     elif context.options.wait_until == 'FINISHED':
       monitor.wait_until(monitor.terminal)
+    return EXIT_OK
 
 
 class KillJobCommand(Verb):
@@ -79,6 +108,13 @@ class KillJobCommand(Verb):
   def name(self):
     return 'kill'
 
+  @property
+  def help(self):
+    return '''Usage: kill cluster/role/env/job
+
+    Kill a scheduled job
+    '''
+
   def setup_options_parser(self, parser):
     self.add_option(parser, BROWSER_OPTION)
     parser.add_argument('--instances', type=parse_instances, dest='instances', default=None,
@@ -87,13 +123,262 @@ class KillJobCommand(Verb):
             'all instances will be acted on.')
     parser.add_argument('--config', type=str, default=None, dest='config',
          help='Config file for the job, possibly containing hooks')
-    self.add_option(parser, JOBSPEC_OPTION)
+    self.add_option(parser, JOBSPEC_ARGUMENT)
 
   def execute(self, context):
+    # TODO: Check for wildcards; we don't allow wildcards for job kill.
     api = context.get_api(context.options.jobspec.cluster)
     resp = api.kill_job(context.options.jobspec, context.options.instances)
-    context.check_and_log_response(resp)
-    context.handle_open(api)
+    if resp.responseCode != ResponseCode.OK:
+      context.print_err('Job %s not found' % context.options.jobspec, file=sys.stderr)
+      return EXIT_INVALID_PARAMETER
+    if context.options.open_browser:
+      context.open_job_page(api, context.options.jobspec)
+
+
+class StatusCommand(Verb):
+  @property
+  def help(self):
+    return '''Usage: aurora status jobspec
+
+    Get status information about a scheduled job or group of jobs. The
+    jobspec parameter can ommit parts of the jobkey, or use shell-style globs.
+    '''
+
+  @property
+  def name(self):
+    return 'status'
+
+  def setup_options_parser(self, parser):
+    parser.add_argument('--json', default=False, action='store_true',
+        help='Show status information in machine-processable JSON format')
+    parser.add_argument('jobspec', type=arg_type_jobkey)
+
+  def render_tasks_json(self, jobkey, active_tasks, inactive_tasks):
+    """Render the tasks running for a job in machine-processable JSON format."""
+    def render_task_json(scheduled_task):
+      """Render a single task into json. This is baroque, but it uses thrift to
+      give us all of the job status data, while allowing us to compose it with
+      other stuff and pretty-print it.
+      """
+      return json.loads(serialize(scheduled_task,
+          protocol_factory=TJSONProtocol.TSimpleJSONProtocolFactory()))
+
+    return {'job': str(jobkey),
+        'active': [render_task_json(task) for task in active_tasks],
+        'inactive': [render_task_json(task) for task in inactive_tasks]}
+
+  def render_tasks_pretty(self, jobkey, active_tasks, inactive_tasks):
+    """Render the tasks for a job in human-friendly format"""
+    def render_task_pretty(scheduled_task):
+      assigned_task = scheduled_task.assignedTask
+      task_info = assigned_task.task
+      task_strings = []
+      if task_info:
+        task_strings.append('''\tcpus: %s, ram: %s MB, disk: %s MB''' % (
+            task_info.numCpus, task_info.ramMb, task_info.diskMb))
+      if assigned_task.assignedPorts:
+        task_strings.append('ports: %s' % assigned_task.assignedPorts)
+        # TODO: only add the max if taskInfo is filled in!
+        task_strings.append('failure count: %s (max %s)' % (scheduled_task.failureCount,
+            task_info.maxTaskFailures))
+        task_strings.append('events:')
+      for event in scheduled_task.taskEvents:
+        task_strings.append('\t %s %s: %s' % (datetime.fromtimestamp(event.timestamp / 1000),
+            ScheduleStatus._VALUES_TO_NAMES[event.status], event.message))
+        task_strings.append('packages:')
+        for pkg in assigned_task.task.packages:
+          task_strings.append('\trole: %s, package: %s, version: %s' %
+              (pkg.role, pkg.name, pkg.version))
+      return '\n\t'.join(task_strings)
+
+    result = ["Active tasks (%s):\n" % len(active_tasks)]
+    for t in active_tasks:
+      result.append(render_task_pretty(t))
+    result.append("Inactive tasks (%s):\n" % len(inactive_tasks))
+    for t in inactive_tasks:
+      result.append(render_task_pretty(t))
+    return ''.join(result)
+
+  def get_status_for_jobs(self, jobkeys, context):
+    """Retrieve and render the status information for a collection of jobs"""
+    def is_active(task):
+      return task.status in ACTIVE_STATES
+
+    result = []
+    for jk in jobkeys:
+      job_tasks = context.get_job_status(jk)
+      active_tasks = [t for t in job_tasks if is_active(t)]
+      inactive_tasks = [t for t in job_tasks if not is_active(t)]
+      if context.options.json:
+        result.append(self.render_tasks_json(jk, active_tasks, inactive_tasks))
+      else:
+        result.append(self.render_tasks_pretty(jk, active_tasks, inactive_tasks))
+    if context.options.json:
+      return json.dumps(result, indent=2, separators=[',', ': '], sort_keys=False)
+    else:
+      return ''.join(result)
+
+  def execute(self, context):
+    jobs = context.get_jobs_matching_key(context.options.jobspec)
+    result = self.get_status_for_jobs(jobs, context)
+    context.print_out(result)
+
+
+class DiffCommand(Verb):
+
+  def __init__(self):
+    super(DiffCommand, self).__init__()
+    self.prettyprinter = pprint.PrettyPrinter(indent=2)
+
+  @property
+  def help(self):
+    return """usage: diff cluster/role/env/job config
+
+  Compares a job configuration against a running job.
+  By default the diff will be displayed using 'diff', though you may choose an alternate
+  diff program by setting the DIFF_VIEWER environment variable.
+  """
+
+  @property
+  def name(self):
+    return 'diff'
+
+  def setup_options_parser(self, parser):
+    self.add_option(parser, BIND_OPTION)
+    self.add_option(parser, JSON_OPTION)
+    parser.add_argument('--from', dest='rename_from', type=AuroraJobKey.from_path, default=None,
+        help='If specified, the job key to diff against.')
+    self.add_option(parser, JOBSPEC_ARGUMENT)
+    self.add_option(parser, CONFIG_ARGUMENT)
+
+  def pretty_print_task(self, task):
+    task.configuration = None
+    task.executorConfig = ExecutorConfig(
+        name=AURORA_EXECUTOR_NAME,
+        data=json.loads(task.executorConfig.data))
+    return self.prettyprinter.pformat(vars(task))
+
+  def pretty_print_tasks(self, tasks):
+    return ',\n'.join(self.pretty_print_task(t) for t in tasks)
+
+  def dump_tasks(self, tasks, out_file):
+    out_file.write(self.pretty_print_tasks(tasks))
+    out_file.write('\n')
+    out_file.flush()
+
+  def execute(self, context):
+    try:
+      config = context.get_job_config(context.options.jobspec, context.options.config_file)
+    except Config.InvalidConfigError as e:
+      raise context.CommandError(EXIT_INVALID_CONFIGURATION,
+          'Error loading job configuration: %s' % e)
+    if context.options.rename_from is not None:
+      cluster = context.options.rename_from.cluster
+      role = context.options.rename_from.role
+      env = context.options.rename_from.environment
+      name = context.options.rename_from.name
+    else:
+      cluster = config.cluster()
+      role = config.role()
+      env = config.environment()
+      name = config.name()
+    api = context.get_api(cluster)
+    resp = api.query(api.build_query(role, name, statuses=ACTIVE_STATES, env=env))
+    if resp.responseCode != ResponseCode.OK:
+      raise context.CommandError(EXIT_INVALID_PARAMETER, 'Could not find job to diff against')
+    remote_tasks = [t.assignedTask.task for t in resp.result.scheduleStatusResult.tasks]
+    resp = api.populate_job_config(config)
+    if resp.responseCode != ResponseCode.OK:
+      raise context.CommandError(EXIT_INVALID_CONFIGURATION,
+          'Error loading configuration: %s' % resp.message)
+    local_tasks = resp.result.populateJobResult.populated
+    diff_program = os.environ.get('DIFF_VIEWER', 'diff')
+    with NamedTemporaryFile() as local:
+      self.dump_tasks(local_tasks, local)
+      with NamedTemporaryFile() as remote:
+        self.dump_tasks(remote_tasks, remote)
+        result = subprocess.call([diff_program, remote.name, local.name])
+        # Unlike most commands, diff doesn't return zero on success; it returns
+        # 1 when a successful diff is non-empty.
+        if result not in (0, 1):
+          raise context.CommandError(EXIT_COMMAND_FAILURE, 'Error running diff command')
+        else:
+          return EXIT_OK
+
+
+class InspectCommand(Verb):
+  @property
+  def help(self):
+    return """usage: inspect cluster/role/env/job config
+
+  Verifies that a job can be parsed from a configuration file, and displays
+  the parsed configuration.
+  """
+
+  @property
+  def name(self):
+    return 'inspect'
+
+  def setup_options_parser(self, parser):
+    self.add_option(parser, BIND_OPTION)
+    self.add_option(parser, JSON_OPTION)
+    parser.add_argument('--local', dest='local', default=False, action='store_true',
+        help='Inspect the configuration as would be created by the "spawn" command.')
+    parser.add_argument('--raw', dest='raw', default=False, action='store_true',
+        help='Show the raw configuration.')
+
+    self.add_option(parser, JOBSPEC_ARGUMENT)
+    self.add_option(parser, CONFIG_ARGUMENT)
+
+  def execute(self, context):
+    config = context.get_job_config(context.options.jobspec, context.options.config_file)
+    if context.options.raw:
+      print(config.job())
+      return EXIT_OK
+    job_thrift = config.job()
+    job = config.raw()
+    job_thrift = config.job()
+    print('Job level information')
+    print('  name:       %s' % job.name())
+    print('  role:       %s' % job.role())
+    print('  contact:    %s' % job.contact())
+    print('  cluster:    %s' % job.cluster())
+    print('  instances:  %s' % job.instances())
+    if job.has_cron_schedule():
+      print('  cron:')
+      print('     schedule: %s' % job.cron_schedule())
+      print('     policy:   %s' % job.cron_collision_policy())
+    if job.has_constraints():
+      print('  constraints:')
+      for constraint, value in job.constraints().get().items():
+        print('    %s: %s' % (constraint, value))
+    print('  service:    %s' % job_thrift.taskConfig.isService)
+    print('  production: %s' % bool(job.production().get()))
+    print()
+
+    task = job.task()
+    print('Task level information')
+    print('  name: %s' % task.name())
+    if len(task.constraints().get()) > 0:
+      print('  constraints:')
+      for constraint in task.constraints():
+        print('    %s' % (' < '.join(st.get() for st in constraint.order() or [])))
+    print()
+
+    processes = task.processes()
+    for process in processes:
+      print('Process %s:' % process.name())
+      if process.daemon().get():
+        print('  daemon')
+      if process.ephemeral().get():
+        print('  ephemeral')
+      if process.final().get():
+        print('  final')
+      print('  cmdline:')
+      for line in process.cmdline().get().splitlines():
+        print('    ' + line)
+      print()
 
 
 class Job(Noun):
@@ -113,3 +398,6 @@ class Job(Noun):
     super(Job, self).__init__()
     self.register_verb(CreateJobCommand())
     self.register_verb(KillJobCommand())
+    self.register_verb(StatusCommand())
+    self.register_verb(DiffCommand())
+    self.register_verb(InspectCommand())

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/main/python/apache/aurora/client/cli/options.py
----------------------------------------------------------------------
diff --git a/src/main/python/apache/aurora/client/cli/options.py b/src/main/python/apache/aurora/client/cli/options.py
index 80fabb9..0aaf880 100644
--- a/src/main/python/apache/aurora/client/cli/options.py
+++ b/src/main/python/apache/aurora/client/cli/options.py
@@ -13,11 +13,11 @@ BROWSER_OPTION = CommandOption('--open-browser', default=False, dest='open_brows
     help='open browser to view job page after job is created')
 
 
-CONFIG_OPTION = CommandOption('config_file', type='str', dest='config_file',
+CONFIG_ARGUMENT = CommandOption('config_file', type=str,
     help='pathname of the aurora configuration file contain the job specification')
 
 
-JOBSPEC_OPTION = CommandOption('jobspec', type=AuroraJobKey.from_path,
+JOBSPEC_ARGUMENT = CommandOption('jobspec', type=AuroraJobKey.from_path,
     help='Fully specified job key, in CLUSTER/ROLE/ENV/NAME format')
 
 

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/main/python/apache/aurora/client/options.py
----------------------------------------------------------------------
diff --git a/src/main/python/apache/aurora/client/options.py b/src/main/python/apache/aurora/client/options.py
index af56351..f398748 100644
--- a/src/main/python/apache/aurora/client/options.py
+++ b/src/main/python/apache/aurora/client/options.py
@@ -82,6 +82,8 @@ def make_env_option(explanation):
     default=None,
     help=explanation)
 
+# Note: in these predefined options, "OPTION" is used in names of optional arguments,
+# and "PARAMETER" is used in names of required ones.
 
 OPEN_BROWSER_OPTION = optparse.Option(
     '-o',

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/main/python/twitter/aurora/client/cli/BUILD
----------------------------------------------------------------------
diff --git a/src/main/python/twitter/aurora/client/cli/BUILD b/src/main/python/twitter/aurora/client/cli/BUILD
new file mode 100644
index 0000000..9ef5c84
--- /dev/null
+++ b/src/main/python/twitter/aurora/client/cli/BUILD
@@ -0,0 +1,26 @@
+
+python_binary(
+  name='client',
+  entry_point = 'twitter.aurora.client.cli:main',
+  dependencies = [ pants(':cli') ],
+  )
+
+python_library(
+  name='cli',
+  sources = [ '__init__.py', 'context.py', 'jobs.py' ],
+  dependencies = [
+    pants('aurora/twitterdeps/src/python/twitter/common/python'),
+    pants('src/main/python/twitter/aurora/client/api:command_runner'),
+    pants('src/main/python/twitter/aurora/client/api:disambiguator'),
+    pants('src/main/python/twitter/aurora/client/api:job_monitor'),
+    pants('src/main/python/twitter/aurora/client/api:updater'),
+    pants('src/main/python/twitter/aurora/client/hooks'),
+    pants('src/main/python/twitter/aurora/client:base'),
+    pants('src/main/python/twitter/aurora/client:config'),
+    pants('src/main/python/twitter/aurora/client:factory'),
+    pants('src/main/python/twitter/aurora/client:options'),
+    pants('src/main/python/twitter/aurora/common'),
+    pants('src/main/thrift/com/twitter/aurora/gen:py-thrift'),
+    pants('src/main/python/twitter/aurora:argparse')
+    ]
+  )

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/test/python/apache/aurora/client/cli/BUILD
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/cli/BUILD b/src/test/python/apache/aurora/client/cli/BUILD
index 9528eed..bf21590 100644
--- a/src/test/python/apache/aurora/client/cli/BUILD
+++ b/src/test/python/apache/aurora/client/cli/BUILD
@@ -14,7 +14,7 @@ python_library(
 
 python_tests(
   name = 'job',
-  sources = [ 'test_create.py', 'test_kill.py' ],
+  sources = [ 'test_create.py', 'test_kill.py', 'test_status.py' ],
   dependencies = [
     pants(':util'),
     pants('src/main/python/apache/aurora/BUILD.thirdparty:mock'),

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/test/python/apache/aurora/client/cli/test_create.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/cli/test_create.py b/src/test/python/apache/aurora/client/cli/test_create.py
index df2386d..2b0c504 100644
--- a/src/test/python/apache/aurora/client/cli/test_create.py
+++ b/src/test/python/apache/aurora/client/cli/test_create.py
@@ -1,13 +1,5 @@
 import contextlib
 
-from apache.aurora.client.cli import (
-    AuroraCommandLine,
-    EXIT_INVALID_CONFIGURATION,
-    EXIT_NETWORK_ERROR
-)
-from apache.aurora.client.cli.util import AuroraClientCommandTest, FakeAuroraCommandContext
-from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI
-from apache.aurora.config import AuroraConfig
 from twitter.common.contextutil import temporary_file
 
 from gen.apache.aurora.ttypes import (
@@ -20,6 +12,14 @@ from gen.apache.aurora.ttypes import (
     TaskQuery,
 )
 
+from apache.aurora.client.cli import (
+    AuroraCommandLine,
+    EXIT_COMMAND_FAILURE,
+    EXIT_INVALID_CONFIGURATION
+)
+from apache.aurora.client.cli.util import AuroraClientCommandTest, FakeAuroraCommandContext
+from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI
+from apache.aurora.config import AuroraConfig
 from mock import Mock, patch
 
 
@@ -121,7 +121,7 @@ class TestClientCreateCommand(AuroraClientCommandTest):
         fp.write(self.get_valid_config())
         fp.flush()
         cmd = AuroraCommandLine()
-        cmd.execute(['job', 'create', '--wait_until=RUNNING', 'west/mchucarroll/test/hello',
+        cmd.execute(['job', 'create', '--wait_until=RUNNING', 'west/bozo/test/hello',
             fp.name])
 
       # Now check that the right API calls got made.
@@ -147,7 +147,7 @@ class TestClientCreateCommand(AuroraClientCommandTest):
         fp.write(self.get_valid_config())
         fp.flush()
         cmd = AuroraCommandLine()
-        cmd.execute(['job', 'create', '--wait_until=RUNNING', 'west/mchucarroll/test/hello',
+        cmd.execute(['job', 'create', '--wait_until=RUNNING', 'west/bozo/test/hello',
             fp.name])
         # Now check that the right API calls got made.
         # Check that create_job was called exactly once, with an AuroraConfig parameter.
@@ -170,8 +170,8 @@ class TestClientCreateCommand(AuroraClientCommandTest):
         fp.flush()
         cmd = AuroraCommandLine()
         result = cmd.execute(['job', 'create', '--wait_until=RUNNING',
-            'west/mchucarroll/test/hello', fp.name])
-        assert result == EXIT_NETWORK_ERROR
+            'west/bozo/test/hello', fp.name])
+        assert result == EXIT_COMMAND_FAILURE
 
       # Now check that the right API calls got made.
       # Check that create_job was called exactly once, with an AuroraConfig parameter.
@@ -190,7 +190,7 @@ class TestClientCreateCommand(AuroraClientCommandTest):
         fp.flush()
         cmd = AuroraCommandLine()
         result = cmd.execute(['job', 'create', '--wait_until=RUNNING',
-            'west/mchucarroll/test/hello', fp.name])
+            'west/bozo/test/hello', fp.name])
         assert result == EXIT_INVALID_CONFIGURATION
 
       # Now check that the right API calls got made.

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/test/python/apache/aurora/client/cli/test_kill.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/cli/test_kill.py b/src/test/python/apache/aurora/client/cli/test_kill.py
index 75fb6b6..66d729d 100644
--- a/src/test/python/apache/aurora/client/cli/test_kill.py
+++ b/src/test/python/apache/aurora/client/cli/test_kill.py
@@ -29,20 +29,6 @@ class TestInstancesParser(unittest.TestCase):
 
 class TestClientKillCommand(AuroraClientCommandTest):
   @classmethod
-  def setup_mock_api(cls):
-    """Builds up a mock API object, with a mock SchedulerProxy.
-    Returns the API and the proxy"""
-
-    mock_scheduler = Mock()
-    mock_scheduler.url = "http://something_or_other"
-    mock_scheduler_client = Mock()
-    mock_scheduler_client.scheduler.return_value = mock_scheduler
-    mock_scheduler_client.url = "http://something_or_other"
-    mock_api = Mock(spec=HookedAuroraClientAPI)
-    mock_api.scheduler = mock_scheduler_client
-    return (mock_api, mock_scheduler_client)
-
-  @classmethod
   def get_kill_job_response(cls):
     return cls.create_simple_success_response()
 
@@ -65,11 +51,11 @@ class TestClientKillCommand(AuroraClientCommandTest):
         fp.write(self.get_valid_config())
         fp.flush()
         cmd = AuroraCommandLine()
-        cmd.execute(['job', 'kill', '--config=%s' % fp.name, 'west/mchucarroll/test/hello'])
+        cmd.execute(['job', 'kill', '--config=%s' % fp.name, 'west/bozo/test/hello'])
 
       # Now check that the right API calls got made.
       assert api.kill_job.call_count == 1
-      api.kill_job.assert_called_with(AuroraJobKey.from_path('west/mchucarroll/test/hello'), None)
+      api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'), None)
 
   def test_kill_job_with_instances(self):
     """Test kill client-side API logic."""
@@ -84,16 +70,15 @@ class TestClientKillCommand(AuroraClientCommandTest):
         fp.flush()
         cmd = AuroraCommandLine()
         cmd.execute(['job', 'kill', '--config=%s' % fp.name, '--instances=0,2,4-6',
-           'west/mchucarroll/test/hello'])
+           'west/bozo/test/hello'])
 
       # Now check that the right API calls got made.
       assert api.kill_job.call_count == 1
-      api.kill_job.assert_called_with(AuroraJobKey.from_path('west/mchucarroll/test/hello'),
+      api.kill_job.assert_called_with(AuroraJobKey.from_path('west/bozo/test/hello'),
           [0, 2, 4, 5, 6])
 
   def test_kill_job_with_instances_deep_api(self):
     """Test kill client-side API logic."""
-    mock_context = FakeAuroraCommandContext()
     (mock_api, mock_scheduler) = self.setup_mock_api()
     with contextlib.nested(
         patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler),
@@ -104,11 +89,9 @@ class TestClientKillCommand(AuroraClientCommandTest):
         fp.flush()
         cmd = AuroraCommandLine()
         cmd.execute(['job', 'kill', '--config=%s' % fp.name, '--instances=0,2,4-6',
-           'west/mchucarroll/test/hello'])
+           'west/bozo/test/hello'])
       # Now check that the right API calls got made.
       assert mock_scheduler.killTasks.call_count == 1
       mock_scheduler.killTasks.assert_called_with(
         TaskQuery(jobName='hello', environment='test', instanceIds=frozenset([0, 2, 4, 5, 6]),
-            owner=Identity(role='mchucarroll')), None)
-
-
+            owner=Identity(role='bozo')), None)

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/test/python/apache/aurora/client/cli/test_status.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/cli/test_status.py b/src/test/python/apache/aurora/client/cli/test_status.py
new file mode 100644
index 0000000..38f3018
--- /dev/null
+++ b/src/test/python/apache/aurora/client/cli/test_status.py
@@ -0,0 +1,161 @@
+import contextlib
+
+from gen.apache.aurora.ttypes import (
+    AssignedTask,
+    Identity,
+    JobKey,
+    ResponseCode,
+    ScheduleStatus,
+    ScheduleStatusResult,
+    TaskConfig,
+    TaskEvent,
+    TaskQuery,
+)
+
+from apache.aurora.client.cli import (
+    AuroraCommandLine,
+    EXIT_INVALID_PARAMETER
+)
+from apache.aurora.common.aurora_job_key import AuroraJobKey
+from apache.aurora.client.cli.util import AuroraClientCommandTest, FakeAuroraCommandContext
+
+from mock import call, Mock, patch
+
+
+class TestJobStatus(AuroraClientCommandTest):
+
+  @classmethod
+  def create_mock_scheduled_tasks(cls):
+    jobs = []
+    for name in ['foo', 'bar', 'baz']:
+      job = Mock()
+      job.key = JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV, name=name)
+      job.failure_count = 0
+      job.assignedTask = Mock(spec=AssignedTask)
+      job.assignedTask.slaveHost = 'slavehost'
+      job.assignedTask.task = Mock(spec=TaskConfig)
+      job.assignedTask.task.maxTaskFailures = 1
+      job.assignedTask.task.packages = []
+      job.assignedTask.task.owner = Identity(role='bozo')
+      job.assignedTask.task.environment = 'test'
+      job.assignedTask.task.jobName = 'woops'
+      job.assignedTask.task.numCpus = 2
+      job.assignedTask.task.ramMb = 2
+      job.assignedTask.task.diskMb = 2
+      job.assignedTask.instanceId = 4237894
+      job.assignedTask.assignedPorts = None
+      job.status = ScheduleStatus.RUNNING
+      mockEvent = Mock(spec=TaskEvent)
+      mockEvent.timestamp = 28234726395
+      mockEvent.status = ScheduleStatus.RUNNING
+      mockEvent.message = "Hi there"
+      job.taskEvents = [mockEvent]
+      jobs.append(job)
+    return jobs
+
+  @classmethod
+  def create_getjobs_response(cls):
+    result = Mock()
+    result.responseCode = ResponseCode.OK
+    result.result = Mock()
+    result.result.getJobsResult = Mock()
+    mock_job_one = Mock()
+    mock_job_one.key = Mock()
+    mock_job_one.key.role = 'RoleA'
+    mock_job_one.key.environment = 'test'
+    mock_job_one.key.name = 'hithere'
+    mock_job_two = Mock()
+    mock_job_two.key = Mock()
+    mock_job_two.key.role = 'bozo'
+    mock_job_two.key.environment = 'test'
+    mock_job_two.key.name = 'hello'
+    result.result.getJobsResult.configs = [mock_job_one, mock_job_two]
+    return result
+
+  @classmethod
+  def create_status_response(cls):
+    resp = cls.create_simple_success_response()
+    resp.result.scheduleStatusResult = Mock(spec=ScheduleStatusResult)
+    resp.result.scheduleStatusResult.tasks = set(cls.create_mock_scheduled_tasks())
+    return resp
+
+  @classmethod
+  def create_failed_status_response(cls):
+    return cls.create_blank_response(ResponseCode.INVALID_REQUEST, 'No tasks found for query')
+
+  def test_successful_status_shallow(self):
+    """Test the status command at the shallowest level: calling status should end up invoking
+    the local APIs get_status method."""
+    mock_context = FakeAuroraCommandContext()
+    mock_api = mock_context.get_api('west')
+    mock_api.check_status.return_value = self.create_status_response()
+    with contextlib.nested(
+        patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
+      cmd = AuroraCommandLine()
+      cmd.execute(['job', 'status', 'west/bozo/test/hello'])
+      mock_api.check_status.assert_called_with(AuroraJobKey('west', 'bozo', 'test', 'hello'))
+
+  def test_successful_status_deep(self):
+    """Test the status command more deeply: in a request with a fully specified
+    job, it should end up doing a query using getTasksStatus."""
+    (mock_api, mock_scheduler) = self.setup_mock_api()
+    mock_scheduler.query.return_value = self.create_status_response()
+    with contextlib.nested(
+        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler),
+        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS)):
+      cmd = AuroraCommandLine()
+      cmd.execute(['job', 'status', 'west/bozo/test/hello'])
+      mock_scheduler.getTasksStatus.assert_called_with(TaskQuery(jobName='hello',
+          environment='test', owner=Identity(role='bozo')))
+
+  def test_status_wildcard(self):
+    """Test status using a wildcard. It should first call api.get_jobs, and then do a
+    getTasksStatus on each job."""
+    mock_context = FakeAuroraCommandContext()
+    mock_api = mock_context.get_api('west')
+    mock_api.check_status.return_value = self.create_status_response()
+    mock_api.get_jobs.return_value = self.create_getjobs_response()
+    with contextlib.nested(
+        patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
+      cmd = AuroraCommandLine()
+      cmd.execute(['job', 'status', '*'])
+
+    # Wildcard should have expanded to two jobs, so there should be two calls
+    # to check_status.
+    assert mock_api.check_status.call_count == 2
+    assert (call(AuroraJobKey('example', 'RoleA', 'test', 'hithere')) in
+        mock_api.check_status.call_args_list)
+    assert (call(AuroraJobKey('example', 'bozo', 'test', 'hello')) in
+        mock_api.check_status.call_args_list)
+
+  def test_status_wildcard_two(self):
+    """Test status using a wildcard. It should first call api.get_jobs, and then do a
+    getTasksStatus on each job. This time, use a pattern that doesn't match all of the jobs."""
+    mock_context = FakeAuroraCommandContext()
+    mock_api = mock_context.get_api('west')
+    mock_api.check_status.return_value = self.create_status_response()
+    mock_api.get_jobs.return_value = self.create_getjobs_response()
+    with contextlib.nested(
+        patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
+      cmd = AuroraCommandLine()
+      cmd.execute(['job', 'status', 'example/*/*/hello'])
+
+    # Wildcard should have expanded to two jobs, but only matched one,
+    # so there should be one call to check_status.
+    assert mock_api.check_status.call_count == 1
+    mock_api.check_status.assert_called_with(
+        AuroraJobKey('example', 'bozo', 'test', 'hello'))
+
+  def test_unsuccessful_status_shallow(self):
+    """Test the status command at the shallowest level: calling status should end up invoking
+    the local APIs get_status method."""
+    # Calls api.check_status, which calls scheduler.getJobs
+    mock_context = FakeAuroraCommandContext()
+    mock_api = mock_context.get_api('west')
+    mock_api.check_status.return_value = self.create_failed_status_response()
+    #    mock_api.scheduler.getTasksStatus.return_value =
+    with contextlib.nested(
+        patch('apache.aurora.client.cli.jobs.Job.create_context', return_value=mock_context)):
+      cmd = AuroraCommandLine()
+      result = cmd.execute(['job', 'status', 'west/bozo/test/hello'])
+      assert result == EXIT_INVALID_PARAMETER

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/test/python/apache/aurora/client/cli/util.py
----------------------------------------------------------------------
diff --git a/src/test/python/apache/aurora/client/cli/util.py b/src/test/python/apache/aurora/client/cli/util.py
index c6d3830..2985865 100644
--- a/src/test/python/apache/aurora/client/cli/util.py
+++ b/src/test/python/apache/aurora/client/cli/util.py
@@ -1,16 +1,16 @@
 import unittest
 
-from apache.aurora.client.cli.context import AuroraCommandContext
-from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI
-from apache.aurora.common.cluster import Cluster
-from apache.aurora.common.clusters import Clusters
-
 from gen.apache.aurora.ttypes import (
     Response,
     ResponseCode,
     Result,
 )
 
+from apache.aurora.client.cli.context import AuroraCommandContext
+from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI
+from apache.aurora.common.cluster import Cluster
+from apache.aurora.common.clusters import Clusters
+
 from mock import Mock
 
 
@@ -26,6 +26,20 @@ class FakeAuroraCommandContext(AuroraCommandContext):
   def get_api(self, cluster):
     return self.fake_api
 
+  @classmethod
+  def setup_mock_api(cls):
+    """Builds up a mock API object, with a mock SchedulerProxy.
+    Returns the API and the proxy"""
+
+    mock_scheduler = Mock()
+    mock_scheduler.url = "http://something_or_other"
+    mock_scheduler_client = Mock()
+    mock_scheduler_client.scheduler.return_value = mock_scheduler
+    mock_scheduler_client.url = "http://something_or_other"
+    mock_api = Mock(spec=HookedAuroraClientAPI)
+    mock_api.scheduler = mock_scheduler_client
+    return (mock_api, mock_scheduler_client)
+
   def setup_fake_api(self):
     # In here, we'd like to get it mocked so that the HookedAuroraClientAPI
     # object, and its underlying AuroraClientAPI objects are not
@@ -33,7 +47,7 @@ class FakeAuroraCommandContext(AuroraCommandContext):
     new_fake = Mock(spec=HookedAuroraClientAPI)
     new_fake.scheduler = Mock()
     new_fake.scheduler.url = 'http://something_or_other'
-    new_fake.scheduler.getTasksStatus.side_effect = []
+#    new_fake.scheduler.getTasksStatus.side_effect = []
     self.fake_api = new_fake
     return self.fake_api
 
@@ -59,6 +73,21 @@ class AuroraClientCommandTest(unittest.TestCase):
     return response
 
   @classmethod
+  def setup_mock_api(cls):
+    """Builds up a mock API object, with a mock SchedulerProxy.
+    Returns the API and the proxy"""
+    # TODO: merge this with setup_fake_api (MESOS-4861)
+    mock_scheduler = Mock()
+    mock_scheduler.url = "http://something_or_other"
+    mock_scheduler_client = Mock()
+    mock_scheduler_client.scheduler.return_value = mock_scheduler
+    mock_scheduler_client.url = "http://something_or_other"
+    mock_api = Mock(spec=HookedAuroraClientAPI)
+    mock_api.scheduler = mock_scheduler_client
+    return (mock_api, mock_scheduler_client)
+
+
+  @classmethod
   def create_simple_success_response(cls):
     return cls.create_blank_response(ResponseCode.OK, 'OK')
 
@@ -119,7 +148,7 @@ HELLO_WORLD = Job(
 jobs = [HELLO_WORLD]
 """
 
-  TEST_ROLE = 'mchucarroll'
+  TEST_ROLE = 'bozo'
 
   TEST_ENV = 'test'
 
@@ -127,7 +156,7 @@ jobs = [HELLO_WORLD]
 
   TEST_CLUSTER = 'west'
 
-  TEST_JOBSPEC = 'west/mchucarroll/test/hello'
+  TEST_JOBSPEC = 'west/bozo/test/hello'
 
   TEST_CLUSTERS = Clusters([Cluster(
       name='west',

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/test/python/twitter/aurora/client/cli/BUILD
----------------------------------------------------------------------
diff --git a/src/test/python/twitter/aurora/client/cli/BUILD b/src/test/python/twitter/aurora/client/cli/BUILD
new file mode 100644
index 0000000..e4ea5da
--- /dev/null
+++ b/src/test/python/twitter/aurora/client/cli/BUILD
@@ -0,0 +1,30 @@
+python_test_suite(
+  name = 'all',
+  dependencies = [ pants(':job') ]
+)
+
+python_library(
+  name = 'util',
+  sources = [ 'util.py' ],
+  dependencies = [
+    pants('src/main/python/apache/aurora/BUILD.thirdparty:mock'),
+    pants('src/main/python/apache/aurora/client/cli'),
+  ]
+)
+
+python_tests(
+  name = 'job',
+<<<<<<< HEAD
+  sources = [ 'test_create.py', 'test_kill.py', 'test_status.py', 'test_diff.py' ],
+=======
+  sources = [ 'test_create.py', 'test_kill.py', 'test_status.py' ],
+>>>>>>> v2.2
+  dependencies = [
+    pants(':util'),
+    pants('src/main/python/apache/aurora/BUILD.thirdparty:mock'),
+    pants('aurora/twitterdeps/src/python/twitter/common/contextutil'),
+    pants('src/main/python/apache/aurora/client/cli'),
+    pants('src/main/python/apache/aurora/client/cli'),
+    pants('src/test/python/apache/aurora/client/commands:util')
+  ]
+)

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/e1aee67b/src/test/python/twitter/aurora/client/cli/test_diff.py
----------------------------------------------------------------------
diff --git a/src/test/python/twitter/aurora/client/cli/test_diff.py b/src/test/python/twitter/aurora/client/cli/test_diff.py
new file mode 100644
index 0000000..363352f
--- /dev/null
+++ b/src/test/python/twitter/aurora/client/cli/test_diff.py
@@ -0,0 +1,182 @@
+import contextlib
+
+from twitter.aurora.client.cli import (
+    AuroraCommandLine,
+    EXIT_INVALID_CONFIGURATION,
+    EXIT_INVALID_PARAMETER
+)
+from twitter.aurora.client.cli.util import AuroraClientCommandTest
+from twitter.common.contextutil import temporary_file
+
+from gen.twitter.aurora.ttypes import (
+    AssignedTask,
+    ExecutorConfig,
+    Identity,
+    JobConfiguration,
+    JobKey,
+    PopulateJobResult,
+    ResponseCode,
+    ScheduleStatus,
+    ScheduleStatusResult,
+    TaskConfig,
+    TaskEvent,
+    TaskQuery,
+)
+
+from mock import Mock, patch
+
+
+class TestDiffCommand(AuroraClientCommandTest):
+  @classmethod
+  def setup_mock_options(cls):
+    """set up to get a mock options object."""
+    mock_options = Mock()
+    mock_options = Mock()
+    mock_options.env = None
+    mock_options.json = False
+    mock_options.bindings = {}
+    mock_options.open_browser = False
+    mock_options.rename_from = None
+    mock_options.cluster = None
+    return mock_options
+
+  @classmethod
+  def create_mock_scheduled_tasks(cls):
+    jobs = []
+    for name in ['foo', 'bar', 'baz']:
+      job = Mock()
+      job.key = JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV, name=name)
+      job.failure_count = 0
+      job.assignedTask = Mock(spec=AssignedTask)
+      job.assignedTask.slaveHost = 'slavehost'
+      job.assignedTask.task = Mock(spec=TaskConfig)
+      job.assignedTask.task.maxTaskFailures = 1
+      job.assignedTask.task.executorConfig = Mock(spec=ExecutorConfig)
+      job.assignedTask.task.executorConfig.data = Mock()
+      job.assignedTask.task.packages = []
+      job.assignedTask.task.owner = Identity(role='mchucarroll')
+      job.assignedTask.task.environment = 'test'
+      job.assignedTask.task.jobName = 'woops'
+      job.assignedTask.task.numCpus = 2
+      job.assignedTask.task.ramMb = 2
+      job.assignedTask.task.diskMb = 2
+      job.assignedTask.instanceId = 4237894
+      job.assignedTask.assignedPorts = None
+      job.status = ScheduleStatus.RUNNING
+      mockEvent = Mock(spec=TaskEvent)
+      mockEvent.timestamp = 28234726395
+      mockEvent.status = ScheduleStatus.RUNNING
+      mockEvent.message = "Hi there"
+      job.taskEvents = [mockEvent]
+      jobs.append(job)
+    return jobs
+
+  @classmethod
+  def create_status_response(cls):
+    resp = cls.create_simple_success_response()
+    resp.result.scheduleStatusResult = Mock(spec=ScheduleStatusResult)
+    resp.result.scheduleStatusResult.tasks = set(cls.create_mock_scheduled_tasks())
+    return resp
+
+  @classmethod
+  def create_failed_status_response(cls):
+    return cls.create_blank_response(ResponseCode.INVALID_REQUEST, 'No tasks found for query')
+
+  @classmethod
+  def setup_populate_job_config(cls, api):
+    populate = cls.create_simple_success_response()
+    populate.result.populateJobResult = Mock(spec=PopulateJobResult)
+    api.populateJobConfig.return_value = populate
+    populate.result.populateJobResult.populated = cls.create_mock_scheduled_tasks()
+    return populate
+
+  def test_successful_diff(self):
+    """Test the diff command."""
+    (mock_api, mock_scheduler) = self.setup_mock_api()
+    with contextlib.nested(
+        patch('twitter.aurora.client.api.SchedulerProxy', return_value=mock_scheduler),
+        patch('twitter.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
+        patch('subprocess.call', return_value=0),
+        patch('json.loads', return_value=Mock())) as (_, _, subprocess_patch, _):
+      mock_scheduler.getTasksStatus.return_value = self.create_status_response()
+      self.setup_populate_job_config(mock_scheduler)
+      with temporary_file() as fp:
+        fp.write(self.get_valid_config())
+        fp.flush()
+        cmd = AuroraCommandLine()
+        cmd.execute(['job', 'diff', 'west/mchucarroll/test/hello', fp.name])
+
+        # Diff should get the task status, populate a config, and run diff.
+        mock_scheduler.getTasksStatus.assert_called_with(
+            TaskQuery(jobName='hello', environment='test', owner=Identity(role='mchucarroll'),
+                statuses=set([ScheduleStatus.PENDING, ScheduleStatus.STARTING,
+                    ScheduleStatus.RUNNING, ScheduleStatus.KILLING, ScheduleStatus.ASSIGNED,
+                    ScheduleStatus.RESTARTING, ScheduleStatus.PREEMPTING])))
+        assert mock_scheduler.populateJobConfig.call_count == 1
+        assert isinstance(mock_scheduler.populateJobConfig.call_args[0][0], JobConfiguration)
+        assert (mock_scheduler.populateJobConfig.call_args[0][0].key ==
+            JobKey(environment=u'test', role=u'mchucarroll', name=u'hello'))
+        # Subprocess should have been used to invoke diff with two parameters.
+        assert subprocess_patch.call_count == 1
+        assert len(subprocess_patch.call_args[0][0]) == 3
+        assert subprocess_patch.call_args[0][0][0] == 'diff'
+
+  def test_diff_invalid_config(self):
+    """Test the diff command if the user passes a config with an error in it."""
+    mock_options = self.setup_mock_options()
+    (mock_api, mock_scheduler) = self.create_mock_api()
+    mock_scheduler.getTasksStatus.return_value = self.create_status_response()
+    self.setup_populate_job_config(mock_scheduler)
+    with contextlib.nested(
+        patch('twitter.aurora.client.api.SchedulerProxy', return_value=mock_scheduler),
+        patch('twitter.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
+        patch('twitter.common.app.get_options', return_value=mock_options),
+        patch('subprocess.call', return_value=0),
+        patch('json.loads', return_value=Mock())) as (
+            mock_scheduler_proxy_class,
+            mock_clusters,
+            options,
+            subprocess_patch,
+            json_patch):
+      with temporary_file() as fp:
+        fp.write(self.get_invalid_config('stupid="me"',))
+        fp.flush()
+        cmd = AuroraCommandLine()
+        result = cmd.execute(['job', 'diff', 'west/mchucarroll/test/hello', fp.name])
+        assert result == EXIT_INVALID_CONFIGURATION
+        assert mock_scheduler.getTasksStatus.call_count == 0
+        assert mock_scheduler.populateJobConfig.call_count == 0
+        assert subprocess_patch.call_count == 0
+
+  def test_diff_server_error(self):
+    """Test the diff command if the user passes a config with an error in it."""
+    mock_options = self.setup_mock_options()
+    (mock_api, mock_scheduler) = self.create_mock_api()
+    mock_scheduler.getTasksStatus.return_value = self.create_failed_status_response()
+    self.setup_populate_job_config(mock_scheduler)
+    with contextlib.nested(
+        patch('twitter.aurora.client.api.SchedulerProxy', return_value=mock_scheduler),
+        patch('twitter.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
+        patch('twitter.common.app.get_options', return_value=mock_options),
+        patch('subprocess.call', return_value=0),
+        patch('json.loads', return_value=Mock())) as (
+            mock_scheduler_proxy_class,
+            mock_clusters,
+            options,
+            subprocess_patch,
+            json_patch):
+      with temporary_file() as fp:
+        fp.write(self.get_valid_config())
+        fp.flush()
+        cmd = AuroraCommandLine()
+        result = cmd.execute(['job', 'diff', 'west/mchucarroll/test/hello', fp.name])
+        assert result == EXIT_INVALID_PARAMETER
+        # In this error case, we should have called the server getTasksStatus;
+        # but since it fails, we shouldn't call populateJobConfig or subprocess.
+        mock_scheduler.getTasksStatus.assert_called_with(
+            TaskQuery(jobName='hello', environment='test', owner=Identity(role='mchucarroll'),
+                statuses=set([ScheduleStatus.PENDING, ScheduleStatus.STARTING,
+                    ScheduleStatus.RUNNING, ScheduleStatus.KILLING, ScheduleStatus.ASSIGNED,
+                    ScheduleStatus.RESTARTING, ScheduleStatus.PREEMPTING])))
+        assert mock_scheduler.populateJobConfig.call_count == 0
+        assert subprocess_patch.call_count == 0


Mime
View raw message