aurora-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kevi...@apache.org
Subject [3/3] incubator-aurora git commit: Extract thrift into an API subproject.
Date Tue, 25 Nov 2014 22:25:34 GMT
Extract thrift into an API subproject.

Testing Done:
./gradlew -Pq build
./pants src/test/python:all

Bugs closed: AURORA-925

Reviewed at https://reviews.apache.org/r/28361/


Project: http://git-wip-us.apache.org/repos/asf/incubator-aurora/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-aurora/commit/91b8d193
Tree: http://git-wip-us.apache.org/repos/asf/incubator-aurora/tree/91b8d193
Diff: http://git-wip-us.apache.org/repos/asf/incubator-aurora/diff/91b8d193

Branch: refs/heads/master
Commit: 91b8d19342b3e2cc16f814a0d51a939111ecc5c9
Parents: 5e80581
Author: Kevin Sweeney <kevints@apache.org>
Authored: Tue Nov 25 14:25:08 2014 -0800
Committer: Kevin Sweeney <kevints@apache.org>
Committed: Tue Nov 25 14:25:08 2014 -0800

----------------------------------------------------------------------
 .gitignore                                      |    3 +
 BUILD                                           |    2 +
 api/src/main/thrift/org/apache/aurora/gen/BUILD |   52 +
 .../thrift/org/apache/aurora/gen/api.thrift     | 1117 ++++++++++++++++++
 .../org/apache/aurora/gen/internal_rpc.thrift   |   28 +
 .../thrift/org/apache/aurora/gen/storage.thrift |  250 ++++
 .../thrift/org/apache/aurora/gen/test.thrift    |  259 ++++
 api/src/main/thrift/org/apache/thermos/BUILD    |   25 +
 .../org/apache/thermos/thermos_internal.thrift  |   99 ++
 build-support/python/make-pycharm-virtualenv    |    4 +-
 build-support/release/make-python-sdists        |    4 +-
 build-support/thrift/thriftw                    |   40 +
 build.gradle                                    |  411 +++----
 .../apache/aurora/CoverageReportCheck.groovy    |  126 --
 .../aurora/build/CoverageReportCheck.groovy     |  128 ++
 .../aurora/build/ThriftEntitiesPlugin.groovy    |  113 ++
 .../org/apache/aurora/build/ThriftPlugin.groovy |  127 ++
 gradle/wrapper/gradle-wrapper.properties        |    4 +-
 settings.gradle                                 |   16 +-
 .../scheduler/http/JettyServerModule.java       |   38 +-
 .../aurora/scheduler/http/api/ApiBeta.java      |    2 +-
 src/main/python/apache/aurora/admin/BUILD       |    2 +-
 src/main/python/apache/aurora/client/BUILD      |    4 +-
 src/main/python/apache/aurora/client/api/BUILD  |   20 +-
 src/main/python/apache/aurora/client/cli/BUILD  |    2 +-
 .../python/apache/aurora/client/commands/BUILD  |    4 +-
 .../python/apache/aurora/client/hooks/BUILD     |    2 +-
 src/main/python/apache/aurora/common/BUILD      |    4 +-
 src/main/python/apache/aurora/common/auth/BUILD |    2 +-
 src/main/python/apache/aurora/config/BUILD      |    2 +-
 .../python/apache/aurora/config/schema/BUILD    |    2 +-
 src/main/python/apache/aurora/executor/BUILD    |    4 +-
 .../python/apache/aurora/executor/common/BUILD  |    2 +-
 .../aurora/tools/java/thrift_wrapper_codegen.py |   11 +-
 src/main/python/apache/thermos/bin/BUILD        |    4 +-
 src/main/python/apache/thermos/common/BUILD     |    4 +-
 src/main/python/apache/thermos/core/BUILD       |   12 +-
 src/main/python/apache/thermos/monitoring/BUILD |    2 +-
 src/main/python/apache/thermos/observer/BUILD   |    2 +-
 src/main/python/apache/thermos/testing/BUILD    |    2 +-
 .../scheduler/assets/scheduler/index.html       |    4 +-
 src/main/thrift/org/apache/aurora/gen/BUILD     |   52 -
 .../thrift/org/apache/aurora/gen/api.thrift     | 1117 ------------------
 .../org/apache/aurora/gen/internal_rpc.thrift   |   28 -
 .../thrift/org/apache/aurora/gen/storage.thrift |  250 ----
 .../thrift/org/apache/aurora/gen/test.thrift    |  259 ----
 src/main/thrift/org/apache/thermos/BUILD        |   25 -
 .../org/apache/thermos/thermos_internal.thrift  |   99 --
 src/test/python/apache/aurora/admin/BUILD       |    2 +-
 src/test/python/apache/aurora/client/api/BUILD  |   20 +-
 .../python/apache/aurora/client/commands/BUILD  |   12 +-
 src/test/python/apache/aurora/common/BUILD      |    2 +-
 src/test/python/apache/aurora/config/BUILD      |    2 +-
 src/test/python/apache/aurora/executor/BUILD    |    6 +-
 .../python/apache/aurora/executor/common/BUILD  |    6 +-
 55 files changed, 2538 insertions(+), 2281 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index ec072d2..8684097 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,12 +9,15 @@
 /.idea/*
 /.pants.*
 /.vagrant/*
+/api/dist/*
+/atlassian-ide-plugin.xml
 /build/*
 /build-support/*.pex
 /build-support/*.venv
 /build-support/python/*.venv
 /build-support/virtualenv-*
 /buildSrc/build/*
+/buildSrc/dist/*
 /dist/*
 /gradle-app.setting
 /out/*

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/BUILD
----------------------------------------------------------------------
diff --git a/BUILD b/BUILD
index b6d7ce9..992f675 100644
--- a/BUILD
+++ b/BUILD
@@ -12,6 +12,8 @@
 # limitations under the License.
 #
 
+source_root('api/src/main/thrift', python_library, python_thrift_library)
+
 source_root('src/main/python', page, python_binary, python_library, resources)
 source_root('src/main/thrift', python_library, python_thrift_library)
 

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/api/src/main/thrift/org/apache/aurora/gen/BUILD
----------------------------------------------------------------------
diff --git a/api/src/main/thrift/org/apache/aurora/gen/BUILD b/api/src/main/thrift/org/apache/aurora/gen/BUILD
new file mode 100644
index 0000000..fe3f83b
--- /dev/null
+++ b/api/src/main/thrift/org/apache/aurora/gen/BUILD
@@ -0,0 +1,52 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+
+TEST_DATA = globs('*test.thrift')
+
+python_thrift_library(
+  name = 'py-thrift',
+  sources = [
+    'api.thrift',
+    'internal_rpc.thrift',
+  ],
+)
+
+python_thrift_library(
+  name = 'py-thrift-test',
+  sources = TEST_DATA,
+)
+
+python_thrift_library(
+  name = 'py-thrift-storage',
+  sources = ['storage.thrift'],
+  dependencies = [
+    ':py-thrift',
+  ],
+)
+
+python_library(
+  name = 'py-thrift-packaged',
+  dependencies = [
+    ':py-thrift',
+    ':py-thrift-test',
+    ':py-thrift-storage',
+  ],
+  provides = setup_py(
+    name = 'apache.gen.aurora',
+    version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+    description = 'Autogenerated Aurora thrift schemas.',
+  )
+)

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/api/src/main/thrift/org/apache/aurora/gen/api.thrift
----------------------------------------------------------------------
diff --git a/api/src/main/thrift/org/apache/aurora/gen/api.thrift b/api/src/main/thrift/org/apache/aurora/gen/api.thrift
new file mode 100644
index 0000000..b91fca9
--- /dev/null
+++ b/api/src/main/thrift/org/apache/aurora/gen/api.thrift
@@ -0,0 +1,1117 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace java org.apache.aurora.gen
+namespace py gen.apache.aurora.api
+
+// Thrift interface definition for the aurora scheduler.
+
+/*
+ * TODO(wfarner): It would be nice if we could put some HTML tags here, regex doesn't handle it though.
+ * The result of an API operation.  A result may only be specified when this is OK.
+ */
+enum ResponseCode {
+  INVALID_REQUEST = 0,
+  OK              = 1,
+  ERROR           = 2,
+  WARNING         = 3,
+  AUTH_FAILED     = 4,
+  /** Raised when a Lock-protected operation failed due to lock validation. */
+  LOCK_ERROR      = 5,
+  /** Raised when a scheduler is transiently unavailable and later retry is recommended. */
+  ERROR_TRANSIENT = 6
+}
+
+const i32 THRIFT_API_VERSION = 3
+
+struct APIVersion {
+  1: required i32 major
+}
+
+// Scheduler Thrift API Version. Increment this when breaking backwards compatibility.
+const APIVersion CURRENT_API_VERSION = {'major': THRIFT_API_VERSION}
+
+// Aurora executor framework name.
+const string AURORA_EXECUTOR_NAME = 'AuroraExecutor'
+
+// TODO(maxim): Remove in 0.7.0. (AURORA-749)
+struct Identity {
+  1: string role
+  2: string user
+}
+
+struct SessionKey {
+  /**
+   * The name of the authentication mechanism, which instructs the server how to interpret the data
+   * field.
+   */
+  4: optional string mechanism
+  /** A blob of data that the server may use for authentication. */
+  5: optional binary data
+}
+
+struct ResourceAggregate {
+  /** Number of CPU cores allotted. */
+  1: double numCpus
+  /** Megabytes of RAM allotted. */
+  2: i64 ramMb
+  /** Megabytes of disk space allotted. */
+  3: i64 diskMb
+}
+
+/** A single host attribute. */
+struct Attribute {
+  1: string name
+  2: set<string> values
+}
+
+enum MaintenanceMode {
+  NONE      = 1,
+  SCHEDULED = 2,
+  DRAINING  = 3,
+  DRAINED   = 4
+}
+
+/** The attributes assigned to a host. */
+struct HostAttributes {
+  1: string          host
+  2: set<Attribute>  attributes
+  3: optional MaintenanceMode mode
+  4: optional string slaveId
+}
+
+/**
+ * A constraint that specifies an explicit set of values, at least one of which must be present
+ * on a host for a task to be scheduled there.
+ */
+struct ValueConstraint {
+  /** If true, treat this as a 'not' - to avoid specific values. */
+  1: bool negated
+  2: set<string> values
+}
+
+/**
+ * A constraint the specifies the maximum number of active tasks on a host with a matching
+ * attribute that may be scheduled simultaneously.
+ */
+struct LimitConstraint {
+  1: i32 limit
+}
+
+/** Types of constraints that may be applied to a task. */
+union TaskConstraint {
+  1: ValueConstraint value
+  2: LimitConstraint limit
+}
+
+/** A constraint that defines whether a task may be scheduled on a host. */
+struct Constraint {
+  /** Mesos slave attribute that the constraint is matched against. */
+  1: string name
+  2: TaskConstraint constraint
+}
+
+struct Package {
+  1: string role
+  2: string name
+  3: i32 version
+}
+
+/** Arbitrary key-value metadata to be included into TaskConfig. */
+struct Metadata {
+  1: string key
+  2: string value
+}
+
+/** A unique identifier for a Job. */
+struct JobKey {
+  /** User role (Unix service account), for example "mesos" */
+  1: string role
+  /** Environment, for example "devel" */
+  2: string environment
+  /** Name, for example "labrat" */
+  3: string name
+}
+
+/** A unique lock key. */
+union LockKey {
+  1: JobKey job
+}
+
+/** A generic lock struct to facilitate context specific resource/operation serialization. */
+struct Lock {
+  /** ID of the lock - unique per storage */
+  1: LockKey key
+  /** UUID - facilitating soft lock authorization */
+  2: string token
+  /** Lock creator */
+  3: string user
+  /** Lock creation timestamp in milliseconds */
+  4: i64 timestampMs
+  /** Optional message to record with the lock */
+  5: optional string message
+}
+
+/** Defines the required lock validation level. */
+enum LockValidation {
+  /** The lock must be valid in order to be released. */
+  CHECKED   = 0
+  /** The lock will be released without validation (aka "force release"). */
+  UNCHECKED = 1
+}
+
+/** A unique identifier for the active task within a job. */
+struct InstanceKey {
+  /** Key identifying the job. */
+  1: JobKey jobKey
+  /** Unique instance ID for the active task in a job. */
+  2: i32 instanceId
+}
+
+struct ExecutorConfig {
+  /** Name identifying the Executor. */
+  1: string name
+  /** Executor configuration data. */
+  2: string data
+}
+
+/** Description of the tasks contained within a job. */
+struct TaskConfig {
+ /** Job task belongs to. */
+ 28: JobKey job
+ // TODO(maxim): Remove in 0.7.0. (AURORA-749)
+ /** contains the role component of JobKey */
+ 17: Identity owner
+ // TODO(maxim): Remove in 0.7.0. (AURORA-749)
+ /** contains the environment component of JobKey */
+ 26: string environment
+ // TODO(maxim): Remove in 0.7.0. (AURORA-749)
+ /** contains the name component of JobKey */
+  3: string jobName
+  7: bool isService
+  8: double numCpus
+  9: i64 ramMb
+ 10: i64 diskMb
+ 11: i32 priority
+ 13: i32 maxTaskFailures
+ /** Whether this is a production task, which can preempt. */
+ 18: optional bool production
+
+ 20: set<Constraint> constraints
+ /** a list of named ports this task requests */
+ 21: set<string> requestedPorts
+
+ /**
+  * Custom links to include when displaying this task on the scheduler dashboard. Keys are anchor
+  * text, values are URLs. Wildcards are supported for dynamic link crafting based on host, ports,
+  * instance, etc.
+  */
+ 22: optional map<string, string> taskLinks
+ 23: optional string contactEmail
+ /** Executor configuration */
+ 25: optional ExecutorConfig executorConfig
+ /** Used to display additional details in the UI. */
+ 27: optional set<Metadata> metadata
+}
+
+/** Defines the policy for launching a new cron job when one is already running. */
+enum CronCollisionPolicy {
+  /** Kills the existing job with the colliding name, and runs the new cron job. */
+  KILL_EXISTING = 0,
+  /** Cancels execution of the new job, leaving the running job in tact. */
+  CANCEL_NEW    = 1,
+  /**
+   * DEPRECATED. For existing jobs, treated the same as CANCEL_NEW.
+   * createJob will reject jobs with this policy.
+   */
+  RUN_OVERLAP   = 2
+}
+
+/**
+ * Description of an Aurora job. One task will be scheduled for each instance within the job.
+ */
+struct JobConfiguration {
+  /**
+   * Key for this job. If not specified name, owner.role, and a reasonable default environment are
+   * used to construct it server-side.
+   */
+  9: JobKey key
+  // TODO(maxim): Remove in 0.7.0. (AURORA-749)
+  /** Owner of this job. */
+  7: Identity owner
+  /**
+   * If present, the job will be handled as a cron job with this crontab-syntax schedule.
+   */
+  4: string cronSchedule
+  /** Collision policy to use when handling overlapping cron runs.  Default is KILL_EXISTING. */
+  5: CronCollisionPolicy cronCollisionPolicy
+  /** Task configuration for this job. */
+  6: TaskConfig taskConfig
+  /**
+   * The number of instances in the job. Generated instance IDs for tasks will be in the range
+   * [0, instances).
+   */
+  8: i32 instanceCount
+}
+
+struct JobStats {
+  /** Number of tasks in active state for this job. */
+  1: i32 activeTaskCount
+  /** Number of tasks in finished state for this job. */
+  2: i32 finishedTaskCount
+  /** Number of failed tasks for this job. */
+  3: i32 failedTaskCount
+  /** Number of tasks in pending state for this job. */
+  4: i32 pendingTaskCount
+}
+
+struct JobSummary {
+  1: JobConfiguration job
+  2: JobStats stats
+  /** Timestamp of next cron run in ms since epoch, for a cron job */
+  3: optional i64 nextCronRunMs
+}
+
+/** A request to add the following instances to an existing job. Used by addInstances. */
+struct AddInstancesConfig {
+  1: JobKey key
+  2: TaskConfig taskConfig
+  3: set<i32> instanceIds
+}
+
+/** Closed range of integers. */
+struct Range {
+  1: i32 first
+  2: i32 last
+}
+
+struct ConfigGroup {
+  1: TaskConfig config
+  2: set<i32> instanceIds   // TODO(maxim): change it to use list<Range> instead.
+}
+
+struct ConfigSummary {
+  1: JobKey key
+  2: set<ConfigGroup> groups
+}
+
+struct PopulateJobResult {
+  // TODO(maxim): Remove populated field in 0.7.0. (AURORA-691)
+  1: set<TaskConfig> populatedDEPRECATED
+  2: TaskConfig taskConfig
+}
+
+struct GetQuotaResult {
+  /** Total allocated resource quota. */
+  1: ResourceAggregate quota
+  /** Resources consumed by production jobs. */
+  2: optional ResourceAggregate prodConsumption
+  /** Resources consumed by non-production jobs. */
+  3: optional ResourceAggregate nonProdConsumption
+}
+
+/** Wraps return results for the acquireLock API. */
+struct AcquireLockResult {
+  /** Acquired Lock instance. */
+  1: Lock lock
+}
+
+/** States that a task may be in. */
+enum ScheduleStatus {
+  // TODO(maxim): This state does not add much value. Consider dropping it completely.
+  /* Initial state for a task.  A task will remain in this state until it has been persisted. */
+  INIT             = 11,
+  /** The task will be rescheduled, but is being throttled for restarting too frequently. */
+  THROTTLED        = 16,
+  /** Task is awaiting assignment to a slave. */
+  PENDING          = 0,
+  /** Task has been assigned to a slave. */
+  ASSIGNED         = 9,
+  /** Slave has acknowledged receipt of task and is bootstrapping the task. */
+  STARTING         = 1,
+  /** The task is running on the slave. */
+  RUNNING          = 2,
+  /** The task terminated with an exit code of zero. */
+  FINISHED         = 3,
+  /** The task is being preempted by another task. */
+  PREEMPTING       = 13,
+  /** The task is being restarted in response to a user request. */
+  RESTARTING       = 12,
+  /** The task is being restarted in response to a host maintenance request. */
+  DRAINING         = 17,
+  /** The task terminated with a non-zero exit code. */
+  FAILED           = 4,
+  /** Execution of the task was terminated by the system. */
+  KILLED           = 5,
+  /** The task is being forcibly killed. */
+  KILLING          = 6,
+  /** A fault in the task environment has caused the system to believe the task no longer exists.
+   * This can happen, for example, when a slave process disappears.
+   */
+  LOST             = 7,
+  // TODO(maxim): Remove SANDBOX_DELETED in 0.7.0. (AURORA-832)
+  /** The task sandbox has been deleted by the executor. */
+  SANDBOX_DELETED  = 10
+}
+
+// States that a task may be in while still considered active.
+const set<ScheduleStatus> ACTIVE_STATES = [ScheduleStatus.ASSIGNED,
+                                           ScheduleStatus.DRAINING,
+                                           ScheduleStatus.KILLING,
+                                           ScheduleStatus.PENDING,
+                                           ScheduleStatus.PREEMPTING,
+                                           ScheduleStatus.RESTARTING
+                                           ScheduleStatus.RUNNING,
+                                           ScheduleStatus.STARTING,
+                                           ScheduleStatus.THROTTLED]
+
+// States that a task may be in while associated with a slave machine and non-terminal.
+const set<ScheduleStatus> SLAVE_ASSIGNED_STATES = [ScheduleStatus.ASSIGNED,
+                                                   ScheduleStatus.DRAINING,
+                                                   ScheduleStatus.KILLING,
+                                                   ScheduleStatus.PREEMPTING,
+                                                   ScheduleStatus.RESTARTING,
+                                                   ScheduleStatus.RUNNING,
+                                                   ScheduleStatus.STARTING]
+
+// States that a task may be in while in an active sandbox.
+const set<ScheduleStatus> LIVE_STATES = [ScheduleStatus.KILLING,
+                                         ScheduleStatus.PREEMPTING,
+                                         ScheduleStatus.RESTARTING,
+                                         ScheduleStatus.DRAINING,
+                                         ScheduleStatus.RUNNING]
+
+// States a completed task may be in.
+const set<ScheduleStatus> TERMINAL_STATES = [ScheduleStatus.FAILED,
+                                             ScheduleStatus.FINISHED,
+                                             ScheduleStatus.KILLED,
+                                             ScheduleStatus.LOST,
+                                             ScheduleStatus.SANDBOX_DELETED]
+
+// Regular expressions for matching valid identifiers for job path components. All expressions
+// below should accept and reject the same set of inputs.
+const string GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
+// JVM: Use with java.util.regex.Pattern#compile
+const string GOOD_IDENTIFIER_PATTERN_JVM = GOOD_IDENTIFIER_PATTERN
+// Python: Use with re.compile
+const string GOOD_IDENTIFIER_PATTERN_PYTHON = GOOD_IDENTIFIER_PATTERN
+
+/** Event marking a state transition within a task's lifecycle. */
+struct TaskEvent {
+  /** Epoch timestamp in milliseconds. */
+  1: i64 timestamp
+  /** New status of the task. */
+  2: ScheduleStatus status
+  /** Audit message that explains why a transition occurred. */
+  3: optional string message
+  /** Hostname of the scheduler machine that performed the event. */
+  4: optional string scheduler
+}
+
+/** A task assignment that is provided to an executor. */
+struct AssignedTask {
+  /** The mesos task ID for this task.  Guaranteed to be globally unique */
+  1: string taskId
+
+  /**
+   * The mesos slave ID that this task has been assigned to.
+   * This will not be populated for a PENDING task.
+   */
+  2: string slaveId
+
+  /**
+   * The name of the machine that this task has been assigned to.
+   * This will not be populated for a PENDING task.
+   */
+  3: string slaveHost
+
+  /** Information about how to run this task. */
+  4: TaskConfig task
+  /** Ports reserved on the machine while this task is running. */
+  5: map<string, i32> assignedPorts
+
+  /**
+   * The instance ID assigned to this task. Instance IDs must be unique and contiguous within a
+   * job, and will be in the range [0, N-1] (inclusive) for a job that has N instances.
+   */
+  6: i32 instanceId
+}
+
+/** A task that has been scheduled. */
+struct ScheduledTask {
+  /** The task that was scheduled. */
+  1: AssignedTask assignedTask
+  /** The current status of this task. */
+  2: ScheduleStatus status
+  /**
+   * The number of failures that this task has accumulated over the multi-generational history of
+   * this task.
+   */
+  3: i32 failureCount
+  /** State change history for this task. */
+  4: list<TaskEvent> taskEvents
+  /**
+   * The task ID of the previous generation of this task.  When a task is automatically rescheduled,
+   * a copy of the task is created and ancestor ID of the previous task's task ID.
+   */
+  5: string ancestorId
+}
+
+struct ScheduleStatusResult {
+  1: list<ScheduledTask> tasks
+}
+
+struct GetJobsResult {
+  1: set<JobConfiguration> configs
+}
+
+/**
+ * Contains a set of restrictions on matching tasks where all restrictions must be met
+ * (terms are AND'ed together).
+ */
+struct TaskQuery {
+  // TODO(maxim): Remove in 0.7.0. (AURORA-749)
+  8: Identity owner
+  14: string role
+  9: string environment
+  2: string jobName
+  4: set<string> taskIds
+  5: set<ScheduleStatus> statuses
+  7: set<i32> instanceIds
+  10: set<string> slaveHosts
+  11: set<JobKey> jobKeys
+  12: i32 offset
+  13: i32 limit
+}
+
+struct HostStatus {
+  1: string host
+  2: MaintenanceMode mode
+}
+
+struct RoleSummary {
+  1: string role
+  2: i32 jobCount
+  3: i32 cronJobCount
+}
+
+struct Hosts {
+  1: set<string> hostNames
+}
+
+struct PendingReason {
+  1: string taskId
+  2: string reason
+}
+
+/** States that a job update may be in. */
+enum JobUpdateStatus {
+  /** Update is in progress. */
+  ROLLING_FORWARD = 0,
+
+  /** Update has failed and is being rolled back. */
+  ROLLING_BACK = 1,
+
+  /** Update has been paused while in progress. */
+  ROLL_FORWARD_PAUSED = 2,
+
+  /** Update has been paused during rollback. */
+  ROLL_BACK_PAUSED = 3,
+
+  /** Update has completed successfully. */
+  ROLLED_FORWARD = 4,
+
+  /** Update has failed and rolled back. */
+  ROLLED_BACK = 5,
+
+  /** Update was aborted. */
+  ABORTED = 6,
+
+  /** Unknown error during update. */
+  ERROR = 7
+
+  /**
+   * Update failed to complete.
+   * This can happen if failure thresholds are met while rolling forward, but rollback is disabled,
+   * or if failure thresholds are met when rolling back.
+   */
+  FAILED = 8
+}
+
+/** States the job update can be in while still considered active. */
+const set<JobUpdateStatus> ACTIVE_JOB_UPDATE_STATES = [JobUpdateStatus.ROLLING_FORWARD,
+                                                       JobUpdateStatus.ROLLING_BACK,
+                                                       JobUpdateStatus.ROLL_FORWARD_PAUSED,
+                                                       JobUpdateStatus.ROLL_BACK_PAUSED]
+
+/** Job update actions that can be applied to job instances. */
+enum JobUpdateAction {
+  /**
+   * An instance was moved to the target state successfully, and declared healthy if the desired
+   * state did not involve deleting the instance.
+   */
+  INSTANCE_UPDATED = 1,
+
+  /**
+   * An instance was rolled back because the job update did not succeed.  The instance was reverted
+   * to the original state prior to the job update, which means that the instance was removed if
+   * the update added instances to the job.
+   */
+  INSTANCE_ROLLED_BACK = 2,
+
+  /**
+   * An instance is being moved from the original state to the desired state.
+   */
+  INSTANCE_UPDATING = 3,
+
+  /**
+   * An instance is being moved from the desired state back to the original state, because the job
+   * update failed.
+   */
+  INSTANCE_ROLLING_BACK = 4,
+
+  /** An instance update was attempted but failed and was not rolled back. */
+  INSTANCE_UPDATE_FAILED = 5,
+
+  /** An instance rollback was attempted but failed. */
+  INSTANCE_ROLLBACK_FAILED = 6
+}
+
+/** Status of the coordinated update. Intended as a response to pulseJobUpdate RPC. */
+enum JobUpdatePulseStatus {
+  /**
+   *  Update is active (ACK).
+   */
+  OK = 1,
+
+  /**
+   * Update is paused and will not progress unless explicitly resumed (NACK).
+   */
+  PAUSED = 2,
+
+  /**
+   * Update has reached terminal state.
+   */
+  FINISHED = 3
+}
+
+/** Job update thresholds and limits. */
+struct JobUpdateSettings {
+  /** Max number of instances being updated at any given moment. */
+  1: i32 updateGroupSize
+
+  /** Max number of instance failures to tolerate before marking instance as FAILED. */
+  2: i32 maxPerInstanceFailures
+
+  /** Max number of FAILED instances to tolerate before terminating the update. */
+  3: i32 maxFailedInstances
+
+  /** Max time to wait until an instance reaches RUNNING state. */
+  4: i32 maxWaitToInstanceRunningMs
+
+  /** Min time to watch a RUNNING instance. */
+  5: i32 minWaitInInstanceRunningMs
+
+  /** If true, enables failed update rollback. */
+  6: bool rollbackOnFailure
+
+  /** Instance IDs to act on. All instances will be affected if this is not set. */
+  7: set<Range> updateOnlyTheseInstances
+
+  /**
+   * If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another
+   * batch until the preceding batch finishes updating.
+   */
+  8: bool waitForBatchCompletion
+
+ /**
+  * If set, requires external calls to pulseJobUpdate RPC within the specified rate for the
+  * update to make progress. If no pulses received within specified interval the update will
+  * block. A blocked update is unable to continue but retains its current status. It may only get
+  * unblocked by a fresh pulseJobUpdate call.
+  */
+  9: i32 blockIfNoPulsesAfterMs
+}
+
+/** Event marking a state transition in job update lifecycle. */
+struct JobUpdateEvent {
+  /** Update status. */
+  1: JobUpdateStatus status
+
+  /** Epoch timestamp in milliseconds. */
+  2: i64 timestampMs
+
+  /** User who performed this event (if user-initiated). */
+  3: optional string user
+}
+
+/** Event marking a state transition in job instance update lifecycle. */
+struct JobInstanceUpdateEvent {
+  /** Job instance ID. */
+  1: i32 instanceId
+
+  /** Epoch timestamp in milliseconds. */
+  2: i64 timestampMs
+
+  /** Job update action taken on the instance. */
+  3: JobUpdateAction action
+}
+
+/** Maps instance IDs to TaskConfigs it. */
+struct InstanceTaskConfig {
+  /** A TaskConfig associated with instances. */
+  1: TaskConfig task
+
+  /** Instances associated with the TaskConfig. */
+  2: set<Range> instances
+}
+
+/** Current job update state including status and created/modified timestamps. */
+struct JobUpdateState {
+  /** Current status of the update. */
+  1: JobUpdateStatus status
+
+  /** Created timestamp in milliseconds. */
+  2: i64 createdTimestampMs
+
+  /** Last modified timestamp in milliseconds. */
+  3: i64 lastModifiedTimestampMs
+}
+
+/** Summary of the job update including job key, user and current state. */
+struct JobUpdateSummary {
+  /** Update ID. */
+  1: string updateId
+
+  /** Job key. */
+  2: JobKey jobKey
+
+  /** User initiated an update. */
+  3: string user
+
+  /** Current job update state. */
+  4: JobUpdateState state
+}
+
+/** Update configuration and setting details. */
+struct JobUpdateInstructions {
+  /** Actual InstanceId -> TaskConfig mapping when the update was requested. */
+  1: set<InstanceTaskConfig> initialState
+
+  /** Desired configuration when the update completes. */
+  2: InstanceTaskConfig desiredState
+
+  /** Update specific settings. */
+  3: JobUpdateSettings settings
+}
+
+/** Full definition of the job update. */
+struct JobUpdate {
+  /** Update summary. */
+  1: JobUpdateSummary summary
+
+  /** Update configuration. */
+  2: JobUpdateInstructions instructions
+}
+
+struct JobUpdateDetails {
+  /** Update definition. */
+  1: JobUpdate update
+
+  /** History for this update. */
+  2: list<JobUpdateEvent> updateEvents
+
+  /** History for the individual instances updated. */
+  3: list<JobInstanceUpdateEvent> instanceEvents
+}
+
+/** A request to update the following instances of an existing job. Used by startUpdate. */
+struct JobUpdateRequest {
+  /** Desired TaskConfig to apply. */
+  1: TaskConfig taskConfig
+
+  /** Desired number of instances of the task config. */
+  2: i32 instanceCount
+
+  /** Update settings and limits. */
+  3: JobUpdateSettings settings
+}
+
+/**
+ * Contains a set of restrictions on matching job updates where all restrictions must be met
+ * (terms are AND'ed together).
+ */
+struct JobUpdateQuery {
+  /** Update ID. */
+  1: string updateId
+
+  /** Job role. */
+  2: string role
+
+  /** Job key. */
+  3: JobKey jobKey
+
+  /** User who created the update. */
+  4: string user
+
+  /** Set of update statuses. */
+  5: set<JobUpdateStatus> updateStatuses
+
+  /** Offset to serve data from. Used by pagination. */
+  6: i32 offset
+
+  /** Number or records to serve. Used by pagination. */
+  7: i32 limit
+}
+
+struct ListBackupsResult {
+  1: set<string> backups
+}
+
+struct StartMaintenanceResult {
+  1: set<HostStatus> statuses
+}
+
+struct DrainHostsResult {
+  1: set<HostStatus> statuses
+}
+
+struct QueryRecoveryResult {
+  1: set<ScheduledTask> tasks
+}
+
+struct MaintenanceStatusResult {
+  1: set<HostStatus> statuses
+}
+
+struct EndMaintenanceResult {
+  1: set<HostStatus> statuses
+}
+
+struct RoleSummaryResult {
+  1: set<RoleSummary> summaries
+}
+
+struct JobSummaryResult {
+  1: set<JobSummary> summaries
+}
+
+struct GetLocksResult {
+  1: set<Lock> locks
+}
+
+struct ConfigSummaryResult {
+  1: ConfigSummary summary
+}
+
+struct GetPendingReasonResult {
+  1: set<PendingReason> reasons
+}
+
+/** Result of the startUpdate call. */
+struct StartJobUpdateResult {
+  /** Job update ID. */
+  1: string updateId
+}
+
+/** Result of the getJobUpdateSummaries call. */
+struct GetJobUpdateSummariesResult {
+  1: list<JobUpdateSummary> updateSummaries
+}
+
+/** Result of the getJobUpdateDetails call. */
+struct GetJobUpdateDetailsResult {
+  1: JobUpdateDetails details
+}
+
+/** Result of the pulseJobUpdate call. */
+struct PulseJobUpdateResult {
+  1: JobUpdatePulseStatus status
+}
+
+/** Information about the scheduler. */
+struct ServerInfo {
+  1: string clusterName
+  2: i32 thriftAPIVersion
+  /** A url prefix for job container stats. */
+  3: string statsUrlPrefix
+}
+
+union Result {
+  1: PopulateJobResult populateJobResult
+  3: ScheduleStatusResult scheduleStatusResult
+  4: GetJobsResult getJobsResult
+  5: GetQuotaResult getQuotaResult
+  6: ListBackupsResult listBackupsResult
+  7: StartMaintenanceResult startMaintenanceResult
+  8: DrainHostsResult drainHostsResult
+  9: QueryRecoveryResult queryRecoveryResult
+  10: MaintenanceStatusResult maintenanceStatusResult
+  11: EndMaintenanceResult endMaintenanceResult
+  15: APIVersion getVersionResult
+  16: AcquireLockResult acquireLockResult
+  17: RoleSummaryResult roleSummaryResult
+  18: JobSummaryResult jobSummaryResult
+  19: GetLocksResult getLocksResult
+  20: ConfigSummaryResult configSummaryResult
+  21: GetPendingReasonResult getPendingReasonResult
+  22: StartJobUpdateResult startJobUpdateResult
+  23: GetJobUpdateSummariesResult getJobUpdateSummariesResult
+  24: GetJobUpdateDetailsResult getJobUpdateDetailsResult
+  25: PulseJobUpdateResult pulseJobUpdateResult
+}
+
+struct ResponseDetail {
+  1: string message
+}
+
+struct Response {
+  1: ResponseCode responseCode
+  // TODO(wfarner): Remove the message field in 0.7.0. (AURORA-466)
+  2: optional string messageDEPRECATED
+  // TODO(wfarner): Remove version field in 0.7.0. (AURORA-467)
+  4: APIVersion DEPRECATEDversion
+  5: ServerInfo serverInfo
+  /** Payload from the invoked RPC. */
+  3: optional Result result
+  /**
+   * Messages from the server relevant to the request, such as warnings or use of deprecated
+   * features.
+   */
+  6: list<ResponseDetail> details
+}
+
+// A service that provides all the read only calls to the Aurora scheduler.
+service ReadOnlyScheduler {
+  /** Returns a summary of the jobs grouped by role. */
+  Response getRoleSummary()
+
+  /** Returns a summary of jobs, optionally only those owned by a specific role. */
+  Response getJobSummary(1: string role)
+
+  /** Fetches the status of tasks. */
+  Response getTasksStatus(1: TaskQuery query)
+
+  /**
+   * Same as getTaskStatus but without the TaskConfig.ExecutorConfig data set.
+   * This is an interim solution until we have a better way to query TaskConfigs (AURORA-541).
+   */
+  Response getTasksWithoutConfigs(1: TaskQuery query)
+
+  /** Returns user-friendly reasons (if available) for tasks retained in PENDING state. */
+  Response getPendingReason(1: TaskQuery query)
+
+  /** Fetches the configuration summary of active tasks for the specified job. */
+  Response getConfigSummary(1: JobKey job)
+
+  /**
+   * Fetches the status of jobs.
+   * ownerRole is optional, in which case all jobs are returned.
+   */
+  Response getJobs(1: string ownerRole)
+
+  /** Fetches the quota allocated for a user. */
+  Response getQuota(1: string ownerRole)
+
+  // TODO(Suman Karumuri): Delete this API once it is no longer used.
+  /**
+   * Returns the current version of the API implementation
+   * NOTE: This method is deprecated.
+   */
+  Response getVersion()
+
+  /**
+   * Populates fields in a job configuration as though it were about to be run.
+   * This can be used to diff a configuration running tasks.
+   */
+  Response populateJobConfig(1: JobConfiguration description)
+
+  /** Returns all stored context specific resource/operation locks. */
+  Response getLocks()
+
+  /** Gets job update summaries. Not implemented yet. */
+  Response getJobUpdateSummaries(1: JobUpdateQuery jobUpdateQuery)
+
+  /** Gets job update details. Not implemented yet. */
+  Response getJobUpdateDetails(1: string updateId)
+}
+
+// Due to assumptions in the client all authenticated RPCs must have a SessionKey as their
+// last argument. Note that the order in this file is what matters, and message numbers should still
+// never be reused.
+service AuroraSchedulerManager extends ReadOnlyScheduler {
+  /**
+   * Creates a new job.  The request will be denied if a job with the provided name already exists
+   * in the cluster.
+   */
+  Response createJob(1: JobConfiguration description, 3: Lock lock, 2: SessionKey session)
+
+  /**
+   * Enters a job into the cron schedule, without actually starting the job.
+   * If the job is already present in the schedule, this will update the schedule entry with the new
+   * configuration.
+   */
+  Response scheduleCronJob(1: JobConfiguration description, 3: Lock lock, 2: SessionKey session)
+
+  /**
+   * Removes a job from the cron schedule. The request will be denied if the job was not previously
+   * scheduled with scheduleCronJob.
+   */
+  Response descheduleCronJob(4: JobKey job, 3: Lock lock, 2: SessionKey session)
+
+  /**
+   * Starts a cron job immediately.  The request will be denied if the specified job does not
+   * exist for the role account, or the job is not a cron job.
+   */
+  Response startCronJob(4: JobKey job, 3: SessionKey session)
+
+  /** Restarts a batch of shards. */
+  Response restartShards(5: JobKey job, 3: set<i32> shardIds, 6: Lock lock 4: SessionKey session)
+
+  /** Initiates a kill on tasks. */
+  Response killTasks(1: TaskQuery query, 3: Lock lock, 2: SessionKey session)
+
+  /**
+   * Adds new instances specified by the AddInstancesConfig. A job represented by the JobKey must be
+   * protected by Lock.
+   */
+  Response addInstances(
+      1: AddInstancesConfig config,
+      2: Lock lock,
+      3: SessionKey session)
+
+  /**
+   * Creates and saves a new Lock instance guarding against multiple mutating operations within the
+   * context defined by LockKey.
+   */
+  Response acquireLock(1: LockKey lockKey, 2: SessionKey session)
+
+  /** Releases the lock acquired earlier in acquireLock call. */
+  Response releaseLock(1: Lock lock, 2: LockValidation validation, 3: SessionKey session)
+
+  /**
+   * Replaces the template (configuration) for the existing cron job.
+   * The cron job template (configuration) must exist for the call to succeed.
+   */
+  Response replaceCronTemplate(1: JobConfiguration config, 2: Lock lock, 3: SessionKey session)
+
+  /** Starts update of the existing service job. Not implemented yet. */
+  Response startJobUpdate(1: JobUpdateRequest request, 2: SessionKey session)
+
+  /**
+   * Pauses the update progress for the specified job. Can be resumed by resumeUpdate call.
+   * Not implemented yet.
+   */
+  Response pauseJobUpdate(1: JobKey jobKey, 2: SessionKey session)
+
+  /** Resumes progress of a previously paused job update. Not implemented yet. */
+  Response resumeJobUpdate(1: JobKey jobKey, 2: SessionKey session)
+
+  /** Permanently aborts the job update. Does not remove the update history. Not implemented yet. */
+  Response abortJobUpdate(1: JobKey jobKey, 2: SessionKey session)
+
+  /**
+   * Allows progress of the job update in case blockIfNoPulsesAfterMs is specified in
+   * JobUpdateSettings. Unblocks progress if the update was previously blocked.
+   * Responds with ResponseCode.INVALID_REQUEST in case an unknown updateId is specified.
+   */
+  Response pulseJobUpdate(1: string updateId, 2: SessionKey session)
+}
+
+struct InstanceConfigRewrite {
+  /** Key for the task to rewrite. */
+  1: InstanceKey instanceKey
+  /** The original configuration. */
+  2: TaskConfig oldTask
+  /** The rewritten configuration. */
+  3: TaskConfig rewrittenTask
+}
+
+struct JobConfigRewrite {
+  /** The original job configuration. */
+  1: JobConfiguration oldJob
+  /** The rewritten job configuration. */
+  2: JobConfiguration rewrittenJob
+}
+
+union ConfigRewrite {
+  1: JobConfigRewrite jobRewrite
+  2: InstanceConfigRewrite instanceRewrite
+}
+
+struct RewriteConfigsRequest {
+  1: list<ConfigRewrite> rewriteCommands
+}
+
+// It would be great to compose these services rather than extend, but that won't be possible until
+// https://issues.apache.org/jira/browse/THRIFT-66 is resolved.
+service AuroraAdmin extends AuroraSchedulerManager {
+  /** Assign quota to a user.  This will overwrite any pre-existing quota for the user. */
+  Response setQuota(1: string ownerRole, 2: ResourceAggregate quota, 3: SessionKey session)
+
+  /**
+   * Forces a task into a specific state.  This does not guarantee the task will enter the given
+   * state, as the task must still transition within the bounds of the state machine.  However,
+   * it attempts to enter that state via the state machine.
+   */
+  Response forceTaskState(
+      1: string taskId,
+      2: ScheduleStatus status,
+      3: SessionKey session)
+
+  /** Immediately writes a storage snapshot to disk. */
+  Response performBackup(1: SessionKey session)
+
+  /** Lists backups that are available for recovery. */
+  Response listBackups(1: SessionKey session)
+
+  /** Loads a backup to an in-memory storage.  This must precede all other recovery operations. */
+  Response stageRecovery(1: string backupId, 2: SessionKey session)
+
+  /** Queries for tasks in a staged recovery. */
+  Response queryRecovery(1: TaskQuery query, 2: SessionKey session)
+
+  /** Deletes tasks from a staged recovery. */
+  Response deleteRecoveryTasks(1: TaskQuery query, 2: SessionKey session)
+
+  /** Commits a staged recovery, completely replacing the previous storage state. */
+  Response commitRecovery(1: SessionKey session)
+
+  /** Unloads (aborts) a staged recovery. */
+  Response unloadRecovery(1: SessionKey session)
+
+  /** Put the given hosts into maintenance mode. */
+  Response startMaintenance(1: Hosts hosts, 2: SessionKey session)
+
+  /** Ask scheduler to begin moving tasks scheduled on given hosts. */
+  Response drainHosts(1: Hosts hosts, 2: SessionKey session)
+
+  /** Retrieve the current maintenance states for a group of hosts. */
+  Response maintenanceStatus(1: Hosts hosts, 2: SessionKey session)
+
+  /** Set the given hosts back into serving mode. */
+  Response endMaintenance(1: Hosts hosts, 2: SessionKey session)
+
+  /** Start a storage snapshot and block until it completes. */
+  Response snapshot(1: SessionKey session)
+
+  /**
+   * Forcibly rewrites the stored definition of user configurations.  This is intended to be used
+   * in a controlled setting, primarily to migrate pieces of configurations that are opaque to the
+   * scheduler (e.g. executorConfig).
+   * The scheduler may do some validation of the rewritten configurations, but it is important
+   * that the caller take care to provide valid input and alter only necessary fields.
+   */
+  Response rewriteConfigs(1: RewriteConfigsRequest request, 2: SessionKey session)
+}

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/api/src/main/thrift/org/apache/aurora/gen/internal_rpc.thrift
----------------------------------------------------------------------
diff --git a/api/src/main/thrift/org/apache/aurora/gen/internal_rpc.thrift b/api/src/main/thrift/org/apache/aurora/gen/internal_rpc.thrift
new file mode 100644
index 0000000..a2c230f
--- /dev/null
+++ b/api/src/main/thrift/org/apache/aurora/gen/internal_rpc.thrift
@@ -0,0 +1,28 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace java org.apache.aurora.gen.comm
+namespace py gen.apache.aurora.comm
+
+include "api.thrift"
+
+// Thrift interface to define the communication between the scheduler and executor.
+
+// Message sent from the scheduler to the executor, indicating that some
+// task history associated with the host may have been purged, and the
+// executor should only retain tasks associated with the provided tasks IDs.
+struct AdjustRetainedTasks {
+  2: map<string, api.ScheduleStatus> retainedTasks  // All tasks that the executor should
+                                                    // retain, and their statuses.
+}

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/api/src/main/thrift/org/apache/aurora/gen/storage.thrift
----------------------------------------------------------------------
diff --git a/api/src/main/thrift/org/apache/aurora/gen/storage.thrift b/api/src/main/thrift/org/apache/aurora/gen/storage.thrift
new file mode 100644
index 0000000..3798797
--- /dev/null
+++ b/api/src/main/thrift/org/apache/aurora/gen/storage.thrift
@@ -0,0 +1,250 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace java org.apache.aurora.gen.storage
+namespace py gen.apache.aurora.storage
+
+include "api.thrift"
+
+// Thrift object definitions for messages used for mesos storage.
+
+// Ops that are direct representations of the data needed to perform local storage mutations.
+struct SaveFrameworkId {
+  1: string id
+}
+
+struct SaveAcceptedJob {
+  1: string managerId
+  2: api.JobConfiguration jobConfig
+}
+
+struct SaveLock {
+  1: api.Lock lock
+}
+
+struct RemoveLock {
+  1: api.LockKey lockKey
+}
+
+struct RemoveJob {
+  2: api.JobKey jobKey
+}
+
+struct SaveTasks {
+  1: set<api.ScheduledTask> tasks
+}
+
+struct RewriteTask {
+  1: string taskId
+  2: api.TaskConfig task
+}
+
+struct RemoveTasks {
+  1: set<string> taskIds
+}
+
+struct SaveQuota {
+  1: string role
+  2: api.ResourceAggregate quota
+}
+
+struct RemoveQuota {
+  1: string role
+}
+
+struct SaveHostAttributes {
+  1: api.HostAttributes hostAttributes
+}
+
+struct SaveJobUpdate {
+  1: api.JobUpdate jobUpdate
+  2: string lockToken
+}
+
+struct StoredJobUpdateDetails {
+  1: api.JobUpdateDetails details
+  /** ID of the lock associated with this update. */
+  2: string lockToken
+}
+
+struct SaveJobUpdateEvent {
+  1: api.JobUpdateEvent event
+  /** ID of the lock associated with this update. */
+  2: string updateId
+}
+
+struct SaveJobInstanceUpdateEvent {
+  1: api.JobInstanceUpdateEvent event
+  2: string updateId
+}
+
+struct PruneJobUpdateHistory {
+  1: i32 perJobRetainCount
+  2: i64 historyPruneThresholdMs
+}
+
+union Op {
+  1: SaveFrameworkId saveFrameworkId
+  2: SaveAcceptedJob saveAcceptedJob
+  5: RemoveJob removeJob
+  6: SaveTasks saveTasks
+  7: RemoveTasks removeTasks
+  8: SaveQuota saveQuota
+  9: RemoveQuota removeQuota
+  10: SaveHostAttributes saveHostAttributes
+  11: RewriteTask rewriteTask
+  12: SaveLock saveLock
+  13: RemoveLock removeLock
+  14: SaveJobUpdate saveJobUpdate
+  15: SaveJobUpdateEvent saveJobUpdateEvent
+  16: SaveJobInstanceUpdateEvent saveJobInstanceUpdateEvent
+  17: PruneJobUpdateHistory pruneJobUpdateHistory
+}
+
+// The current schema version ID.  This should be incremented each time the
+// schema is changed, and support code for schema migrations should be added.
+const i32 CURRENT_SCHEMA_VERSION = 1
+
+// Represents a series of local storage mutations that should be applied in a single atomic
+// transaction.
+struct Transaction {
+  1: list<Op> ops
+  2: i32 schemaVersion
+}
+
+struct StoredJob {
+  1: string jobManagerId
+  3: api.JobConfiguration jobConfiguration
+}
+
+struct SchedulerMetadata {
+  1: string frameworkId
+  // The SHA of the repo.
+  2: string revision
+  // The tag of the repo.
+  3: string tag
+  // The timestamp of the build.
+  4: string timestamp
+  // The user who built the scheduler
+  5: string user
+  // The machine that built the scheduler
+  6: string machine
+  7: api.APIVersion version
+}
+
+struct QuotaConfiguration {
+  1: string role
+  2: api.ResourceAggregate quota
+}
+
+// Represents a complete snapshot of local storage data suitable for restoring the local storage
+// system to its state at the time the snapshot was taken.
+struct Snapshot {
+
+  // The timestamp when the snapshot was made in milliseconds since the epoch.
+  1: i64 timestamp
+
+  3: set<api.HostAttributes> hostAttributes
+  4: set<api.ScheduledTask> tasks
+  5: set<StoredJob> jobs
+  6: SchedulerMetadata schedulerMetadata
+  8: set<QuotaConfiguration> quotaConfigurations
+  9: set<api.Lock> locks
+  10: set<StoredJobUpdateDetails> jobUpdateDetails
+}
+
+// A message header that calls out the number of expected FrameChunks to follow to form a complete
+// message.
+struct FrameHeader {
+
+  // The number of FrameChunks following this FrameHeader required to reconstitute its message.
+  1: i32 chunkCount
+
+  // The MD5 checksum over the binary blob that was chunked across chunkCount chunks to decompose
+  // the message.
+  2: binary checksum
+}
+
+// A chunk of binary data that can be assembled with others to reconstitute a fully framed message.
+struct FrameChunk {
+  2: binary data
+}
+
+// Frames form a series of LogEntries that can be re-assembled into a basic log entry type like a
+// Snapshot.  The Frame protocol is that a single FrameHeader is followed by one or more FrameChunks
+// that can be re-assembled to obtain the binary content of a basic log entry type.
+//
+// In the process of reading a Frame, invalid data should always be logged and skipped as it may
+// represent a failed higher level transaction where a FrameHeader successfully appends but not all
+// the chunks required to complete the full message frame successfully commit.  For example: if a
+// Snaphsot is framed, it might break down into 1 FrameHeader followed by 5 FrameChunks.  It could
+// be that the FrameHeader and 2 chunks get written successfully, but the 3rd and subsequent chunks
+// fail to append.  In this case, the storage mechanism would throw to indicate a failed transaction
+// at write-time leaving a partially framed message in the log stream that should be skipped over at
+// read-time.
+union Frame {
+  1: FrameHeader header
+  2: FrameChunk chunk
+}
+
+// A ScheduledTask with its assignedTask.task field set to null. Deserializers must fill in
+// assignedTask.task with the TaskConfig identified by taskConfigId (which is an index into the
+// DeduplicatedSnapshot's taskConfigs list).
+struct DeduplicatedScheduledTask {
+  1: api.ScheduledTask partialScheduledTask
+  2: i32 taskConfigId
+}
+
+// A Snapshot that has had duplicate TaskConfig structs removed to save space. The
+// partialSnapshot field is a normal Snapshot with the tasks field set to null. To create the
+// full Snapshot deserializers must fill in this field with the result of recreating each
+// partial task using the referenced entry in taskConfigs.
+struct DeduplicatedSnapshot {
+   // Snapshot with its tasks field unset.
+   1: Snapshot partialSnapshot
+   // ScheduledTasks that have had their assignedTask.task field replaced with an ID to save space.
+   2: list<DeduplicatedScheduledTask> partialTasks
+   // Ordered list of taskConfigs. The taskConfigId field of DeduplicatedScheduledTask is an index
+   // into this.
+   3: list<api.TaskConfig> taskConfigs
+}
+
+// A scheduler storage write-ahead log entry consisting of no-ops to skip over or else snapshots or
+// transactions to apply.  Any entry type can also be chopped up into frames if the entry is too big
+// for whatever reason.
+union LogEntry {
+  // The full state of the scheduler at some point-in-time. Transactions appearing before this
+  // entry in the log can be ignored.
+  1: Snapshot snapshot
+
+  // An incremental update to apply to the scheduler storage.
+  2: Transaction transaction
+
+  // The value should be ignored - both true and false signal an equivalent no operation marker.
+  3: bool noop;
+
+  // A frame that can be reassembled with others to form a complete LogEntry.
+  4: Frame frame
+
+  // A LogEntry that is first serialized in the thrift binary format,
+  // then compressed using the "deflate" compression format.
+  // Deflated entries are expected to be un-framed.  They may be pieced together by multiple frames,
+  // but the contents of the deflated entry should not be a Frame.
+  5: binary deflatedEntry
+
+  // The full state of the scheduler at some point-in-time, in a compact layout. Transactions
+  // appearing before this entry in the log can be ignored.
+  6: DeduplicatedSnapshot deduplicatedSnapshot
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/api/src/main/thrift/org/apache/aurora/gen/test.thrift
----------------------------------------------------------------------
diff --git a/api/src/main/thrift/org/apache/aurora/gen/test.thrift b/api/src/main/thrift/org/apache/aurora/gen/test.thrift
new file mode 100644
index 0000000..cd08f28
--- /dev/null
+++ b/api/src/main/thrift/org/apache/aurora/gen/test.thrift
@@ -0,0 +1,259 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace java org.apache.aurora.gen.test
+namespace py gen.apache.aurora.test
+
+// Test data for Thrift interface definition for the Twitter Mesos Scheduler.
+
+// Test data for job path identifiers.
+const set<string> VALID_IDENTIFIERS = ["devel",
+                                       "dev-prod",
+                                       "Dev_prod-",
+                                       "deV.prod",
+                                       ".devprod.."]
+
+const set<string> INVALID_IDENTIFIERS = ["dev/prod",
+                                         "dev prod",
+                                         "/dev/prod",
+                                         "///",
+                                         "new\nline",
+                                         "    hello world."]
+
+
+// Valid cron schedules (taken from a list of cron schedules running at Twitter).
+const set<string> VALID_CRON_SCHEDULES = [
+    "25 */2 * * *",
+    "05 */1 * * *",
+    "5 */4 * * *",
+    "42 * * * *",
+    "30 07 * * *",
+    "28 23 * * 3",
+    "11 * * * *",
+    "23 */2 * * *",
+    "10,40 8,9,10,11 * * *",
+    "22 * * * *",
+    "15 8,13,18 * * *",
+    "15,45 * * * *",
+    "0 */1 * * *",
+    "02 6 * * *",
+    "1 * * * *",
+    "37 */1 * * *",
+    "0 0/3 * * *",
+    "19 12,23 * * *",
+    "41 4,12,20 * * *",
+    "16 09,16 * * *",
+    "17 1,9,17 * * 0",
+    "*/1 * * * *",
+    "0 17,23 * * *",
+    "42 18,21,23,3,7 * * *",
+    "20 12 * * *",
+    "3,13,23,33,43,53 * * * *",
+    "50 07 * * *",
+    "00 22 * * 0,2,5",
+    "6 2,10,18 * * *",
+    "59 */4 * * *",
+    "17 00 * * *",
+    "10 06 * * *",
+    "13,28,43,58 * * * *",
+    "0 */3 * * *",
+    "0 2 * * *",
+    "30 * * * *",
+    "0 8,11,14,17,20 * * *",
+    "10 * * * *",
+    "30 06 * * *",
+    "23 * * * *",
+    "30 9 * * *",
+    "02 6,10,14 * * *",
+    "19 * * * *",
+    "7 12 * * *",
+    "0 * * * *",
+    "*/10 * * * *",
+    "30 14,16,18,20,22 * * *",
+    "0,10,20,30,40,50 * * * *",
+    "30 0,12 * * *",
+    "45 22 * * *",
+    "20 */3 * * *",
+    "14 9,21 * * *",
+    "*/20 * * * *",
+    "1 7,15,23 * * *",
+    "*/2 * * * *",
+    "25 14 * * *",
+    "0 */6 * * *",
+    "30 6 * * *",
+    "0 16,20,0,3,6 * * *",
+    "5,15,25,35,45,55 * * * *",
+    "22 3,15 * * *",
+    "0 1,3,5,7,9,11,13 * * *",
+    "14 8,17 * * *",
+    "30 21 * * 0,2,5",
+    "44 23 * * *",
+    "35,50 * * * *",
+    "10 01,07,13,19 * * *",
+    "1 14 * * *",
+    "29 9,16,22 * * *",
+    "12 2 * * *",
+    "0 22 * * *",
+    "1 */6 * * *",
+    "0 20 * * 4",
+    "0 9,12,15,18,21 * * *",
+    "30 2 * * *",
+    "15 * * * *",
+    "0 20 * * 1",
+    "0 */12 * * *",
+    "17 0,5,17,20 * * *",
+    "0 20 * * *",
+    "30 18,23 * * MON-FRI",
+    "0 22 * * 1",
+    "35 23 * * *",
+    "55 03 * * *",
+    "23 3,15,22 * * *",
+    "20 13 * * *",
+    "30 11 * * *",
+    "*/5 * * * *",
+    "*/6 * * * *",
+    "0 2,4,6,8,10,12 * * *",
+    "10 02,08,12 * * *",
+    "0 11 * * *",
+    "05 02,08,12 * * *",
+    "10 11 * * *",
+    "22 */6 * * *",
+    "00 08 * * *",
+    "0 2 1 * *",
+    "30 19 * * 1,4",
+    "0 */4 * * *",
+    "10,30,50 * * * *",
+    "22 22 * * *",
+    "00 11 * * *",
+    "29 16,17,18,22 * * *",
+    "30 22 * * *",
+    "05 * * * *",
+    "0 23 * * 3",
+    "15 */12 * * *",
+    "30 19 * * 2,5",
+    "*/30 * * * *",
+    "22 6,18 * * *",
+    "0 5 * * *",
+    "15 8 * * 1,3,5",
+    "0 8 * * *",
+    "10 0 * * *",
+    "40 11 * * *",
+    "0 0 * * 1",
+    "17 2,4,11,18 * * *",
+    "30 0,8,16 * * *",
+    "27 8,20 * * *",
+    "0 0 * * *",
+    "20 18 * * 2,4,6",
+    "15 11 * * 2,6",
+    "0,15,30,45 * * * *",
+    "45 * * * *",
+    "36 * * * *",
+    "45 17 * * 0,2,4",
+    "0 */2 * * *",
+    "12 3,15,22 * * *",
+    "2,7,12,17,22,27,32,37,42,47,52,57 * * * *",
+    "0 23 2-31 * *",
+    "10 16 * * *",
+    "3 * * * *",
+    "42 8,20 * * *",
+    "0 15,19,23,2,5 * * *",
+    "30 10 * * *",
+    "2 2 * * *",
+    "0 12 * * *",
+    "15 17 * * *",
+    "0 7 * * *",
+    "1 2 * * *",
+    "30 19 * * 0,3",
+    "15 10 * * 2,6",
+    "55 */1 * * *",
+    "15 */4 * * *",
+    "0 3,6,9,12,15,18,21 * * *",
+    "11 0 * * *",
+    "0 6 * * *",
+    "0 3 * * *",
+    "20 7,12,17 * * *",
+    "21 * * * *",
+    "58 * * * *",
+    "10 02,08,14,20 * * *",
+    "0 1,2,3,4,5,6,7,8,9,10,11,12,13 * * *",
+    "50,59 * * * *",
+    "30,45 18 * * 1",
+    "0 1 * * *",
+    "7 * * * *",
+    "0 14 * * *",
+    "09 22 * * *",
+    "00 17 1-3,5-31 * *",
+    "17 1,9,17 * * 1-6",
+    "45 */4 * * *",
+    "20 2,12,22 * * *",
+    "15 3 * * *",
+    "0 3 1 * *",
+    "42 10 * * *",
+    "12 7 * * *",
+    "1,31 * * * *",
+    "00 01 * * *",
+    "0 9 1 * *",
+    "50 */4 * * *",
+    "43 * * * *",
+    "10,40 9,10,11,12 * * *",
+    "35 * * * *",
+    "8 1,9,17 * * *",
+    "45,54 * * * *",
+    "49 6,18 * * *",
+    "25 12,18,23 * * *",
+    "20 18 * * 1,3,5",
+    "45 17 * * 2,4",
+    "0 17 * * *",
+    "30 */6 * * *",
+    "52 0,6,12,18 * * *",
+    "*/15 * * * *",
+    "1-56/5 * * * *",
+    "0 2,3,4,5,6,7,8,9,10,11,12,13 * * *",
+    "12 * * * *",
+    "*/3 * * * *",
+    "*/4 * * * *",
+    "3 1,9,17 * * *",
+    "00 14 * * *",
+    "10 05 * * *",
+    "8 21 * * *",
+    "0 13 * * 2",
+    "0 13 * * 3",
+    "0 19 * * *",
+    "0 21 * * *",
+    "23 */4 * * *",
+    "10 3,11,19 * * *",
+    "* * * * *",
+    "30 14 * * *",
+    "03,18,33,48 * * * *",
+    "0 11,23 * * *",
+    "30 20 * * 0,2,5",
+    "30 02,08,12 * * *",
+    "45 1,3,5,7,9,11,13,15,17,19,21,23 * * *",
+    "7 */2 * * *",
+    "30 16 * * *",
+    "5 * * * *",
+    "04 06,12,18,23 * * *",
+    "00 */2 * * *",
+    "00 06,15 * * *",
+    "35 */2 * * *",
+    "1 5,13,21 * * *",
+    "47 */2 * * *",
+    "10 21 * * *",
+    "00 21 * * *",
+    "26 2,6,10,14,18,22 * * *",
+    "00 11 * * 0,2,5",
+    "0 18,22 * * MON-FRI",
+    "00 21 * * 0,2,5",
+    "0 17-19 * * 1",
+    "15 9 * * 1,3,5"]

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/api/src/main/thrift/org/apache/thermos/BUILD
----------------------------------------------------------------------
diff --git a/api/src/main/thrift/org/apache/thermos/BUILD b/api/src/main/thrift/org/apache/thermos/BUILD
new file mode 100644
index 0000000..d0d789a
--- /dev/null
+++ b/api/src/main/thrift/org/apache/thermos/BUILD
@@ -0,0 +1,25 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+
+python_thrift_library(
+  name = 'py-thrift',
+  sources = ['thermos_internal.thrift'],
+  provides = setup_py(
+    name = 'apache.gen.thermos',
+    version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+    description = 'Autogenerated Thermos thrift schemas.',
+  )
+)

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/api/src/main/thrift/org/apache/thermos/thermos_internal.thrift
----------------------------------------------------------------------
diff --git a/api/src/main/thrift/org/apache/thermos/thermos_internal.thrift b/api/src/main/thrift/org/apache/thermos/thermos_internal.thrift
new file mode 100644
index 0000000..2c449a4
--- /dev/null
+++ b/api/src/main/thrift/org/apache/thermos/thermos_internal.thrift
@@ -0,0 +1,99 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace py gen.apache.thermos
+
+enum ProcessState {
+  // normal state
+  WAITING   = 0   // blocked on execution dependencies or footprint restrictions
+  FORKED    = 1   // starting, need to wait for signal from Process that it's running
+  RUNNING   = 2   // currently running
+  SUCCESS   = 3   // returncode == 0
+  KILLED    = 4   // Killed by user action or task failure, runner teardown.
+
+  // abnormal states
+  FAILED    = 5   // returncode != 0
+  LOST      = 6   // the coordinator either died or some condition caused us to lose it
+                  // e.g. reboot.
+}
+
+struct ProcessStatus {
+  // Sequence number, must be monotonically increasing for all
+  // ProcessState messages for a particular process across all runs.
+  1: i64             seq
+
+  // Process name
+  3: string          process
+
+  5: ProcessState    state
+
+  // WAITING -> FORKED
+ 10: i32             coordinator_pid
+ 11: double          fork_time
+
+  // FORKED -> RUNNING
+  6: double          start_time
+  7: i32             pid
+
+  // RUNNING -> {FINISHED, FAILED, KILLED}
+  8: double          stop_time
+  9: i32             return_code
+
+  // {FORKED, RUNNING} -> LOST nothing happens.  this ProcessState ceases to exist.
+  // Doesn't count against the run total.
+}
+
+enum TaskState {
+  ACTIVE     = 0  // Regular plan is being executed
+  CLEANING   = 5  // Regular plan has failed/finished and is being cleaned up
+                  // Existing processes get SIGTERMs.
+                  // Once all processes are finished, => FINALIZING
+                  // If finalization wait overflows, SIGKILL and transition to terminal.
+  FINALIZING = 6  // Finalizing plan is being executed
+  SUCCESS    = 1  // Task has succeeded
+  FAILED     = 2  // Task has failed
+  KILLED     = 3  // Task has been killed
+  LOST       = 4  // Task is lost (special state reserved for garbage collection.)
+}
+
+struct TaskStatus {
+  1: TaskState state
+  2: i64       timestamp_ms
+  3: i32       runner_pid
+  4: i32       runner_uid
+}
+
+// The first framed message in the Ckpt stream.
+struct RunnerHeader {
+  1: string task_id
+  2: i64    launch_time_ms  // kill this
+  3: string sandbox
+  7: string log_dir
+  4: string hostname        // kill this
+  5: string user
+  8: i32    uid             // added as a check in case user disappears
+  6: map<string, i64> ports
+}
+
+union RunnerCkpt {
+  1: RunnerHeader       runner_header
+  2: ProcessStatus      process_status
+  3: TaskStatus         task_status
+}
+
+struct RunnerState {
+  1: RunnerHeader header
+  2: list<TaskStatus> statuses
+  3: map<string, list<ProcessStatus>> processes
+}

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/build-support/python/make-pycharm-virtualenv
----------------------------------------------------------------------
diff --git a/build-support/python/make-pycharm-virtualenv b/build-support/python/make-pycharm-virtualenv
index 8f58d4d..85e8137 100755
--- a/build-support/python/make-pycharm-virtualenv
+++ b/build-support/python/make-pycharm-virtualenv
@@ -22,8 +22,8 @@ PROJECT_NAME=${PWD##*/}
 
 pushd "$BUILDROOT"
   ./pants goal dependencies --dependencies-external src/test/python:all > requirements.txt
-  ./pants setup_py src/main/thrift/org/apache/aurora/gen:py-thrift-packaged
-  ./pants setup_py src/main/thrift/org/apache/thermos:py-thrift
+  ./pants setup_py api/src/main/thrift/org/apache/aurora/gen:py-thrift-packaged
+  ./pants setup_py api/src/main/thrift/org/apache/thermos:py-thrift
   source build-support/pants.venv/bin/activate
     pytest_requirement=$(pip freeze | grep pytest==)
     pytest_cov_requirement=$(pip freeze | grep pytest-cov==)

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/build-support/release/make-python-sdists
----------------------------------------------------------------------
diff --git a/build-support/release/make-python-sdists b/build-support/release/make-python-sdists
index e0d20a1..a7a627f 100755
--- a/build-support/release/make-python-sdists
+++ b/build-support/release/make-python-sdists
@@ -38,8 +38,8 @@ TARGETS=(
   src/main/python/apache/thermos/core
   src/main/python/apache/thermos/monitoring
   src/main/python/apache/thermos/observer
-  src/main/thrift/org/apache/aurora/gen:py-thrift-packaged
-  src/main/thrift/org/apache/thermos:py-thrift
+  api/src/main/thrift/org/apache/aurora/gen:py-thrift-packaged
+  api/src/main/thrift/org/apache/thermos:py-thrift
 )
 
 cd "`git rev-parse --show-toplevel`"

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/build-support/thrift/thriftw
----------------------------------------------------------------------
diff --git a/build-support/thrift/thriftw b/build-support/thrift/thriftw
new file mode 100755
index 0000000..50d6dfd
--- /dev/null
+++ b/build-support/thrift/thriftw
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Wrapper for thrift that attempts to use the system thrift if it's of the expected version,
+# otherwise it bootstraps a new one.
+set -e -u
+
+if [[ $# -lt 1 ]]; then
+  cat <<EOF
+Usage: thriftw EXPECTED_THRIFT_VERSION THRIFT_ARGS...
+
+Run the thrift compiler at EXPECTED_THRIFT_VERSION with THRIFT_ARGS, bootstrapping if necessary.
+EOF
+fi
+expected_version=$1
+shift
+
+HERE=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
+
+if which thrift >/dev/null 2>&1; then
+  if [[ $(thrift --version) = "Thrift version $expected_version" ]]; then
+    exec thrift "$@"
+  fi
+fi
+
+thrift="$HERE"/thrift-$expected_version/compiler/cpp/thrift
+if [[ ! -x "$thrift" ]]; then
+  make -C "$HERE"
+fi
+exec "$thrift" "$@"

http://git-wip-us.apache.org/repos/asf/incubator-aurora/blob/91b8d193/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 9a71d00..f1a3171 100644
--- a/build.gradle
+++ b/build.gradle
@@ -11,6 +11,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+import org.apache.aurora.build.CoverageReportCheck
 
 plugins {
   id 'com.eriwen.gradle.js' version '1.12.1'
@@ -21,28 +22,158 @@ plugins {
 apply plugin: 'application'
 apply plugin: 'checkstyle'
 apply plugin: 'findbugs'
-apply plugin: 'idea'
 apply plugin: 'jacoco'
-apply plugin: 'java'
-apply plugin: 'maven-publish'
 apply plugin: 'pmd'
-apply plugin: 'project-report'
 
-buildDir = 'dist'
-def generatedDir = "$buildDir/generated-src"
-def generatedJavaDir = "$generatedDir/gen-java"
-def generatedJSDir = "$generatedDir/gen-js"
-def generatedResourceDir = "$generatedDir/resources"
-def httpAssetsPath = 'scheduler/assets'
+allprojects {
+  apply plugin: 'java'
+  apply plugin: 'idea'
+  apply plugin: 'maven-publish'
+  apply plugin: 'project-report'
 
-def thriftVersion = '0.9.1'
+  buildDir = 'dist'
 
-compileJava {
-  sourceCompatibility = 1.7
-  targetCompatibility = 1.7
+  repositories {
+    mavenCentral()
+    maven {
+      url 'http://maven.twttr.com/'
+    }
+  }
+
+  compileJava {
+    sourceCompatibility = 1.7
+    targetCompatibility = 1.7
+  }
+
+  group 'org.apache.aurora'
+  version = file("${rootDir}/.auroraversion").text.trim().toUpperCase()
+
+  task sourceJar(type: Jar) {
+    from sourceSets.main.allJava
+  }
+
+  if (project.hasProperty('internalMavenUrl')) {
+    publishing {
+      repositories {
+        maven {
+          credentials {
+            username = internalMavenUser
+            password = internalMavenPass
+          }
+          url internalMavenUrl
+        }
+      }
+    }
+  }
+
+  ext.slf4jRev = '1.6.6'
+  def slf4jApiDep = "org.slf4j:slf4j-api:${slf4jRev}"
+  dependencies {
+    compile slf4jApiDep
+  }
+  ext.gsonRev = '2.2.4'
+  ext.guavaRev = '16.0'
+  ext.thriftRev = '0.9.1'
+
+  configurations {
+    compile {
+      resolutionStrategy {
+        failOnVersionConflict()
+        force slf4jApiDep
+      }
+    }
+  }
 }
 
-tasks.withType(JavaCompile).matching { it.name != 'compileGeneratedJava' }.all {
+project(':api') {
+  apply plugin: org.apache.aurora.build.ThriftPlugin
+  apply plugin: org.apache.aurora.build.ThriftEntitiesPlugin
+
+  task checkPython << {
+    def python27Executable = ['python2.7', 'python'].find { python ->
+      try {
+        def check = "import sys; sys.exit(0 if sys.version_info >= (2,7) and sys.version_info < (3,) else 1)"
+        return [python, "-c", check].execute().waitFor() == 0
+      } catch (IOException e) {
+        return false
+      }
+    }
+
+    if (python27Executable == null) {
+      throw new GradleException('Build requires Python 2.7.')
+    } else {
+      thriftEntities.python = python27Executable
+    }
+  }
+  generateThriftEntitiesJava.dependsOn checkPython
+
+  tasks.withType(Jar) {
+    baseName "aurora-api"
+  }
+
+  publishing {
+    publications {
+      mavenJava(MavenPublication) {
+        from components.java
+
+        artifactId "aurora-api"
+
+        artifact sourceJar {
+          classifier "sources"
+        }
+      }
+    }
+  }
+
+  thrift {
+    version = thriftRev
+    resourcePrefix = 'org/apache/aurora/scheduler/gen/client'
+  }
+
+  thriftEntities {
+    gsonRev = project.gsonRev
+    guavaRev = project.guavaRev
+  }
+
+  idea {
+    module {
+      [thrift.genJavaDir, thriftEntities.genJavaDir].each {
+        sourceDirs += it
+        generatedSourceDirs += it
+      }
+      // These directories must exist, else the plugin omits them from the
+      // generated project. Since this is executed during the configuration
+      // lifecycle phase, dependency tasks have not yet run and created
+      // the directories themselves.
+      // By default, the idea module [1] excludes are set to
+      // [project.buildDir, project.file('.gradle')]
+      // This has the side-effect of also excluding our generated sources [2].  Due to the way
+      // directory exclusion works in idea, you can't exclude a directory and include a child of that
+      // directory. Clearing the excludes seems to have no ill side-effects, making it preferable to
+      // other possible approaches.
+      //
+      // [1] http://www.gradle.org/docs/current/dsl/org.gradle.plugins.ide.idea.model.IdeaModule.html
+      // [2] http://issues.gradle.org/browse/GRADLE-1174
+      excludeDirs = [file(".gradle")]
+      [
+          "classes",
+          "dependency-cache",
+          "docs",
+          "jacoco",
+          "reports",
+          "test-results",
+          "tmp"
+      ].each {
+        excludeDirs << file("$buildDir/$it")
+      }
+    }
+  }
+}
+
+def generatedDir = "$buildDir/generated-src"
+def httpAssetsPath = 'scheduler/assets'
+
+compileJava {
   options.compilerArgs << '-Werror'
   options.compilerArgs << '-Xlint:all'
   // Don't fail for annotations not claimed by annotation processors.
@@ -55,102 +186,46 @@ task wrapper(type: Wrapper) {
   gradleVersion = project(':buildSrc').GRADLE_VERSION
 }
 
-task sourceJar(type: Jar) {
-  from sourceSets.main.allJava
-}
-
-group 'org.apache.aurora'
-version = file('.auroraversion').text.trim().toUpperCase()
-
+// TODO(ksweeney): Consider pushing this down to API - the scheduler implementation itself should
+// only be consumed as an application.
 publishing {
   publications {
-    maven(MavenPublication) {
-      artifactId 'aurora-scheduler'
+    mavenJava(MavenPublication) {
       from components.java
 
+      artifactId 'aurora-scheduler'
+
       artifact sourceJar {
         classifier "sources"
       }
     }
   }
-  if (project.hasProperty('internalMavenUrl')) {
-    repositories {
-      maven {
-        credentials {
-          username = internalMavenUser
-          password = internalMavenPass
-        }
-        url internalMavenUrl
-      }
-    }
-  }
-}
-
-repositories {
-  mavenCentral()
-  maven {
-    url 'http://maven.twttr.com/'
-  }
 }
 
 sourceSets {
-  test {
-    resources {
-      srcDir 'src/main/resources'
-      srcDir 'src/test/resources'
-      srcDir '3rdparty/javascript'
-      srcDir generatedJSDir
-      srcDir generatedResourceDir
-    }
-  }
-  generated {
-    java.srcDirs = [generatedDir]
-  }
   main {
-    compileClasspath += sourceSets.generated.output
     resources {
       srcDir '3rdparty/javascript'
-      srcDir generatedJSDir
-      srcDir generatedResourceDir
     }
   }
-  test {
-    compileClasspath += sourceSets.generated.output
-    runtimeClasspath += sourceSets.generated.output
-  }
-}
-/*  A note on libthrift: All of com.twitter.common depends on libthrift 0.5.x. We depend on
-    libthrift 0.9.x. There are binary incompatibilities between the two versions and resolving
-    them involved forking com.twitter.common classes into Aurora to use the new libthrift API. Be
-    very cautious when either upgrading libthrift or com.twitter.common dependencies!!!
- */
-
-jar {
-  from sourceSets.generated.output
-  manifest {
-    attributes('Created-By': 'Gradle')
-    attributes('Main-Class': 'org.apache.aurora.scheduler.app.SchedulerMain')
-  }
 }
 
 dependencies {
   def guiceRev = '3.0'
   def jerseyRev = '1.18.1'
   def log4jRev = '1.2.17'
-  def slf4jRev = '1.6.1'
   def junitRev = '4.11'
 
-  def gsonDep = 'com.google.code.gson:gson:2.2.4'
-  def guavaDep = 'com.google.guava:guava:16.0'
+  def gsonDep = "com.google.code.gson:gson:${gsonRev}"
+  def guavaDep = "com.google.guava:guava:${guavaRev}"
   // NOTE: We are using the jetty 7.x series due to a large number of dependencies impacted
   // by 8.x and later resulting from using newer javax.servlet servlet-api.
   def jettyDep = '7.6.15.v20140411'
-  def thriftLib = "org.apache.thrift:libthrift:${thriftVersion}"
+
+  compile project(':api')
 
   compile 'aopalliance:aopalliance:1.0'
   compile 'com.google.code.findbugs:jsr305:2.0.1'
-  compile gsonDep
-  compile guavaDep
   compile "com.google.inject:guice:${guiceRev}"
   compile "com.google.inject.extensions:guice-assistedinject:${guiceRev}"
   compile 'com.google.protobuf:protobuf-java:2.5.0'
@@ -165,7 +240,6 @@ dependencies {
   compile "log4j:log4j:${log4jRev}"
   compile 'org.antlr:stringtemplate:3.2.1'
   compile 'org.apache.mesos:mesos:0.20.1'
-  compile thriftLib
   compile 'org.apache.zookeeper:zookeeper:3.3.4'
   compile "org.eclipse.jetty:jetty-rewrite:${jettyDep}"
   compile "org.eclipse.jetty:jetty-server:${jettyDep}"
@@ -174,7 +248,6 @@ dependencies {
   compile 'org.mybatis:mybatis:3.2.7'
   compile 'org.mybatis:mybatis-guice:3.6'
   compile 'org.quartz-scheduler:quartz:2.2.1'
-  compile "org.slf4j:slf4j-api:${slf4jRev}"
   compile "org.slf4j:slf4j-jdk14:${slf4jRev}"
   compile 'com.twitter.common.logging:log4j:0.0.7'
   compile 'com.twitter.common.zookeeper.guice:client-flagged:0.0.5'
@@ -212,24 +285,18 @@ dependencies {
   testCompile 'com.twitter.common:zookeeper-testing:0.0.45'
   testCompile "junit:junit:${junitRev}"
 
-  generatedCompile gsonDep
-  generatedCompile guavaDep
-  generatedCompile thriftLib
-
   configurations.compile {
     exclude module: 'junit-dep'
     resolutionStrategy {
-      failOnVersionConflict()
-
       def forceDepVersions = [
         // Force versions based on the dependencies we use from above
         'com.google.code.gson:gson': '2.2.4',
-        'org.slf4j:slf4j-api' : slf4jRev,
         'log4j:log4j' : log4jRev,
-        'org.apache.thrift:libthrift' : thriftVersion,
         'junit:junit' : junitRev,
         // Force versions based on inter-dependency collisions
         'org.hamcrest:hamcrest-core' : '1.3',
+        'org.apache.thrift:libthrift': thriftRev,
+        'org.slf4j:slf4j-jdk14': slf4jRev,
       ]
 
       force forceDepVersions.collect { dep, ver -> "$dep:$ver" }
@@ -254,7 +321,7 @@ codeQualityTasks.each {
 }
 
 checkstyle {
-  sourceSets = [ sourceSets.main , sourceSets.test]
+  sourceSets = [sourceSets.main , sourceSets.test]
 }
 
 tasks.withType(FindBugs) {
@@ -288,36 +355,6 @@ pmd {
   consoleOutput = true
 }
 
-task checkPython() {
-  def py_versions = ['python2.7', 'python2.6', 'python']
-
-  project.ext.set('py', '')
-
-  py_versions.each { python_exe ->
-    if (project.py.isEmpty()) {
-      // Look for the first version of python listed in py_versions greater than 2.6.
-      // Execute will throw an exception if that python command does not exist,
-      // and set project.py to be empty
-      try {
-        def check = "import sys; sys.stdout.write(str(sys.version_info > (2,6) and sys.version_info < (3,)))"
-        def cmd = [python_exe, "-c", check].execute()
-        def output = cmd.in.text.trim()
-
-        if(output.toLowerCase() == 'true') {
-          project.py = python_exe
-        }
-      } catch (Exception e) {
-          project.py = ''
-      }
-    }
-  }
-
-  doLast {
-    if (project.py.isEmpty()) {
-      throw new GradleException('Build requires Python 2.6 or Python 2.7')
-    }
-  }
-}
 
 /**
  * There is a jshint target recommended in the README for gradle-js-plugin
@@ -353,144 +390,38 @@ task jsHint(type:com.eriwen.gradle.js.tasks.JsHintTask) {
 }
 tasks.checkstyleMain.dependsOn(jsHint)
 
-/**
- * Check if Apache Thrift is all ready installed and is the same version as we
- * depend on, otherwise compile the version in build-support. project.thrift will
- * contain the path to the thrift executable when finished
- */
-task bootstrapThrift {
-  logging.captureStandardOutput LogLevel.INFO
-
-  project.ext.set('thrift', '')
-
-  try {
-    // Attempt to run thrift and get the version string back. if no version of thrift is available
-    // execute will throw an exception, catch and set project.thrift as empty to build the local version
-    def output = "thrift --version".execute().text.trim()
-    if(output == "Thrift version ${thriftVersion}") {
-      project.thrift = 'thrift'
-    }
-  } catch (IOException e) {
-    project.thrift = ''
-  }
-
-  // If thrift was not found or was the wrong version build our local copy
-  if (project.thrift.isEmpty()) {
-    project.thrift = "build-support/thrift/thrift-${thriftVersion}/compiler/cpp/thrift"
-
-    inputs.file file(project.thrift)
-    outputs.dir file(project.thrift)
-    doLast {
-      exec {
-        executable = 'make'
-        args = ['-C', 'build-support/thrift']
-      }
-    }
-  }
-}
-
-// TODO(wfarner): Extract this into a task under buildSrc/
-task generateSources(dependsOn: ['bootstrapThrift', 'checkPython']) {
-  ext.thriftDir = 'src/main/thrift/org/apache/aurora/gen'
-  def thriftFiles = fileTree(dir: thriftDir).matching { include '**/*.thrift' }
-  def codeGenerator = 'src/main/python/apache/aurora/tools/java/thrift_wrapper_codegen.py';
-  ext.inputFiles = thriftFiles + files(codeGenerator)
-  ext.outputDir = file(generatedDir)
-  ext.jsOutputDir = file("$generatedJSDir/$httpAssetsPath/js")
-  inputs.file inputFiles
-  outputs.dir outputDir
-  doLast {
-    outputDir.exists() || outputDir.mkdirs()
-    thriftFiles.each { File file ->
-      exec {
-        executable = project.thrift
-        args = ['--gen', 'java:hashcode,private-members', '-o', outputDir, file]
-      }
-    }
-    exec {
-      executable = project.py
-      args = [codeGenerator,
-              'src/main/thrift/org/apache/aurora/gen/api.thrift',
-              generatedJavaDir,
-              "$generatedResourceDir/$httpAssetsPath"]
-    }
-
-    // Generate thrift javascript separately since we don't need all IDLs, and we want to place
-    // sources in the layout for assets served by the scheduler.
-    jsOutputDir.exists() || jsOutputDir.mkdirs()
-    exec {
-      executable = project.thrift
-      args = ['--gen', 'js:jquery', '-out', jsOutputDir, "$thriftDir/api.thrift"]
-    }
-  }
-}
-
-compileGeneratedJava {
-  dependsOn generateSources
-}
-
-compileJava.source sourceSets.main.java
-
 tasks.withType(Test) {
   maxParallelForks = Runtime.runtime.availableProcessors()
 }
 
-ideaProject.dependsOn generateSources
-
 idea {
   project {
     vcs = 'Git'
   }
-  module {
-    def codegenDirs = [file(generatedJavaDir)]
-
-    // These directories must exist, else the plugin omits them from the
-    // generated project. Since this is executed during the configuration
-    // lifecycle phase, dependency tasks have not yet run and created
-    // the directories themselves.
-    codegenDirs.each { File codegenDir ->
-      codegenDir.mkdirs()
-    }
-
-    // By default, the idea module [1] excludes are set to
-    // [project.buildDir, project.file('.gradle')]
-    // This has the side-effect of also excluding our generated sources [2].  Due to the way
-    // directory exclusion works in idea, you can't exclude a directory and include a child of that
-    // directory. Clearing the excludes seems to have no ill side-effects, making it preferable to
-    // other possible approaches.
-    //
-    // [1] http://www.gradle.org/docs/current/dsl/org.gradle.plugins.ide.idea.model.IdeaModule.html
-    // [2] http://issues.gradle.org/browse/GRADLE-1174
-    excludeDirs = [file("$buildDir/reports"), file("$buildDir/test-results")]
-    sourceDirs += codegenDirs
-
-    // For some reason src/main/resources is marked as both a production source and a test source
-    // directory. In order to run the local scheduler from IntelliJ the resources directory must not
-    // be marked as test sources, so we make it explicit that it's only a production source.
-    testSourceDirs -= file("src/main/resources")
-  }
 }
 
 // Configuration parameters for the application plugin.
 applicationName = 'aurora-scheduler'
 mainClassName = 'org.apache.aurora.scheduler.app.SchedulerMain'
 
-tasks.withType(nl.javadude.gradle.plugins.license.License).each { licenseTask ->
-  licenseTask.setSource files("$projectDir/src/main/java", "$projectDir/src/test/java")
+// TODO(ksweeney): Configure this to scan resources as well.
+tasks.withType(nl.javadude.gradle.plugins.license.License).each {
+  it.source = files("$projectDir/src/main/java", "$projectDir/src/test/java")
 }
 
 license {
   header rootProject.file('config/checkstyle/apache.header')
   strictCheck true
   skipExistingHeaders true
-  ext.year = Calendar.getInstance().get(Calendar.YEAR)
 }
 
 def reportPath = "$buildDir/reports/jacoco/test"
 jacocoTestReport {
   group = "Reporting"
   description = "Generate Jacoco coverage reports after running tests."
-  additionalSourceDirs = files(sourceSets.main.allJava.srcDirs)
+
+  sourceDirectories = sourceSets.main.java
+  classDirectories = files("$buildDir/classes/main")
   reports {
     xml.enabled true
   }
@@ -508,19 +439,7 @@ task analyzeReport(type: CoverageReportCheck) {
 }
 jacocoTestReport.finalizedBy analyzeReport
 
-task FlagSchemaChanges(type: Test) << {
-  exec {
-    executable = 'bash'
-    args = ['src/test/sh/org/apache/aurora/verify_thrift_checksum.sh']
-  }
-}
-
-javadoc {
-  classpath += sourceSets.generated.output
-}
-
 run {
-  classpath += sourceSets.generated.output
-  classpath += sourceSets.test.output
   main = 'org.apache.aurora.scheduler.app.local.LocalSchedulerMain'
+  classpath += sourceSets.test.output
 }


Mime
View raw message