hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jia...@apache.org
Subject [37/38] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)
Date Thu, 04 Aug 2016 06:53:08 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
new file mode 100644
index 0000000..f8e5e7c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
@@ -0,0 +1,795 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.SliderProviderFactory;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_HOME;
+import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH;
+import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH;
+import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM;
+
+/**
+ * Represents a cluster specification; designed to be sendable over the wire
+ * and persisted in JSON by way of Jackson.
+ * 
+ * When used in cluster status operations the <code>info</code>
+ * and <code>statistics</code> maps contain information about the cluster.
+ * 
+ * As a wire format it is less efficient in both xfer and ser/deser than 
+ * a binary format, but by having one unified format for wire and persistence,
+ * the code paths are simplified.
+ *
+ * This was the original single-file specification/model used in the Hoya
+ * precursor to Slider. Its now retained primarily as a way to publish
+ * the current state of the application, or at least a fraction thereof ...
+ * the larger set of information from the REST API is beyond the scope of
+ * this structure.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+
+public class ClusterDescription implements Cloneable {
+  protected static final Logger
+    log = LoggerFactory.getLogger(ClusterDescription.class);
+
+  private static final String UTF_8 = "UTF-8";
+
+  /**
+   * version counter
+   */
+  public String version = "1.0";
+
+  /**
+   * Name of the cluster
+   */
+  public String name;
+
+  /**
+   * Type of cluster
+   */
+  public String type = SliderProviderFactory.DEFAULT_CLUSTER_TYPE;
+
+  /**
+   * State of the cluster
+   */
+  public int state;
+  
+  /*
+   State list for both clusters and nodes in them. Ordered so that destroyed follows
+   stopped.
+   
+   Some of the states are only used for recording
+   the persistent state of the cluster and are not
+   seen in node descriptions
+   */
+
+  /**
+   * Specification is incomplete & cannot
+   * be used: {@value}
+   */
+  public static final int STATE_INCOMPLETE = StateValues.STATE_INCOMPLETE;
+
+  /**
+   * Spec has been submitted: {@value}
+   */
+  public static final int STATE_SUBMITTED = StateValues.STATE_SUBMITTED;
+  /**
+   * Cluster created: {@value}
+   */
+  public static final int STATE_CREATED = StateValues.STATE_CREATED;
+  /**
+   * Live: {@value}
+   */
+  public static final int STATE_LIVE = StateValues.STATE_LIVE;
+  /**
+   * Stopped
+   */
+  public static final int STATE_STOPPED = StateValues.STATE_STOPPED;
+  /**
+   * destroyed
+   */
+  public static final int STATE_DESTROYED = StateValues.STATE_DESTROYED;
+  
+  /**
+   * When was the cluster specification created?
+   * This is not the time a cluster was thawed; that will
+   * be in the <code>info</code> section.
+   */
+  public long createTime;
+
+  /**
+   * When was the cluster specification last updated
+   */
+  public long updateTime;
+
+  /**
+   * URL path to the original configuration
+   * files; these are re-read when 
+   * restoring a cluster
+   */
+
+  public String originConfigurationPath;
+
+  /**
+   * URL path to the generated configuration
+   */
+  public String generatedConfigurationPath;
+
+  /**
+   * This is where the data goes
+   */
+  public String dataPath;
+
+  /**
+   * cluster-specific options -to control both
+   * the Slider AM and the application that it deploys
+   */
+  public Map<String, String> options = new HashMap<>();
+
+  /**
+   * cluster information
+   * This is only valid when querying the cluster status.
+   */
+  public Map<String, String> info = new HashMap<>();
+
+  /**
+   * Statistics. This is only relevant when querying the cluster status
+   */
+  public Map<String, Map<String, Integer>> statistics = new HashMap<>();
+
+  /**
+   * Instances: role->count
+   */
+  public Map<String, List<String>> instances = new HashMap<>();
+
+  /**
+   * Role options, 
+   * role -> option -> value
+   */
+  public Map<String, Map<String, String>> roles = new HashMap<>();
+
+
+  /**
+   * List of key-value pairs to add to a client config to set up the client
+   */
+  public Map<String, String> clientProperties = new HashMap<>();
+
+  /**
+   * Status information
+   */
+  public Map<String, Object> status;
+
+  /**
+   * Liveness information; the same as returned
+   * on the <code>live/liveness/</code> URL
+   */
+  public ApplicationLivenessInformation liveness;
+
+  /**
+   * Creator.
+   */
+  public ClusterDescription() {
+  }
+
+  @Override
+  public String toString() {
+    try {
+      return toJsonString();
+    } catch (Exception e) {
+      log.debug("Failed to convert CD to JSON ", e);
+      return super.toString();
+    }
+  }
+
+  /**
+   * Shallow clone
+   * @return a shallow clone
+   * @throws CloneNotSupportedException
+   */
+  @Override
+  public Object clone() throws CloneNotSupportedException {
+    return super.clone();
+  }
+
+  /**
+   * A deep clone of the spec. This is done inefficiently with a ser/derser
+   * @return the cluster description
+   */
+  public ClusterDescription deepClone() {
+    try {
+      return fromJson(toJsonString());
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+
+  /**
+   * Save a cluster description to a hadoop filesystem
+   * @param fs filesystem
+   * @param path path
+   * @param overwrite should any existing file be overwritten
+   * @throws IOException IO exception
+   */
+  public void save(FileSystem fs, Path path, boolean overwrite) throws
+                                                                IOException {
+    FSDataOutputStream dataOutputStream = fs.create(path, overwrite);
+    writeJsonAsBytes(dataOutputStream);
+  }
+  
+  /**
+   * Save a cluster description to the local filesystem
+   * @param file file
+   * @throws IOException IO excpetion
+   */
+  public void save(File file) throws IOException {
+    log.debug("Saving to {}", file.getAbsolutePath());
+    if (!file.getParentFile().mkdirs()) {
+      log.warn("Failed to mkdirs for {}", file.getParentFile());
+    }
+    DataOutputStream dataOutputStream = new DataOutputStream(new FileOutputStream(file));
+    writeJsonAsBytes(dataOutputStream);
+  }
+
+  /**
+   * Write the json as bytes -then close the file
+   * @param dataOutputStream an outout stream that will always be closed
+   * @throws IOException any failure
+   */
+  private void writeJsonAsBytes(DataOutputStream dataOutputStream)
+      throws IOException {
+    try {
+      String json = toJsonString();
+      byte[] b = json.getBytes(UTF_8);
+      dataOutputStream.write(b);
+    } finally {
+      dataOutputStream.close();
+    }
+  }
+
+  /**
+   * Load from the filesystem
+   * @param fs filesystem
+   * @param path path
+   * @return a loaded CD
+   * @throws IOException IO problems
+   */
+  public static ClusterDescription load(FileSystem fs, Path path)
+      throws IOException, JsonParseException, JsonMappingException {
+    FileStatus status = fs.getFileStatus(path);
+    byte[] b = new byte[(int) status.getLen()];
+    FSDataInputStream dataInputStream = fs.open(path);
+    int count = dataInputStream.read(b);
+    String json = new String(b, 0, count, UTF_8);
+    return fromJson(json);
+  }
+
+  /**
+   * Make a deep copy of the class
+   * @param source source
+   * @return the copy
+   */
+  public static ClusterDescription copy(ClusterDescription source) {
+    //currently the copy is done by a generate/save. Inefficient but it goes
+    //down the tree nicely
+    try {
+      return fromJson(source.toJsonString());
+    } catch (IOException e) {
+      throw new RuntimeException("ClusterDescription copy failed " + e, e);
+    }
+  }
+
+  /**
+   * Convert to a JSON string
+   * @return a JSON string description
+   * @throws IOException Problems mapping/writing the object
+   */
+  public String toJsonString() throws IOException,
+                                      JsonGenerationException,
+                                      JsonMappingException {
+    ObjectMapper mapper = new ObjectMapper();
+    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
+    return mapper.writeValueAsString(this);
+  }
+
+  /**
+   * Convert from JSON
+   * @param json input
+   * @return the parsed JSON
+   * @throws IOException IO
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+  public static ClusterDescription fromJson(String json)
+    throws IOException, JsonParseException, JsonMappingException {
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return mapper.readValue(json, ClusterDescription.class);
+    } catch (IOException e) {
+      log.error("Exception while parsing json : " + e + "\n" + json, e);
+      throw e;
+    }
+  }
+
+    /**
+     * Convert from input stream
+     * @param is input stream of cluster description
+     * @return the parsed JSON
+     * @throws IOException IO
+     * @throws JsonMappingException failure to map from the JSON to this class
+     */
+    public static ClusterDescription fromStream(InputStream is)
+            throws IOException, JsonParseException, JsonMappingException {
+        if (is==null) {
+          throw new FileNotFoundException("Empty Stream");
+        }
+        ObjectMapper mapper = new ObjectMapper();
+        try {
+            return mapper.readValue(is, ClusterDescription.class);
+        } catch (IOException e) {
+            log.error("Exception while parsing input stream : {}", e, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Convert from a JSON file
+   * @param jsonFile input file
+   * @return the parsed JSON
+   * @throws IOException IO problems
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+  public static ClusterDescription fromFile(File jsonFile)
+    throws IOException, JsonParseException, JsonMappingException {
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return mapper.readValue(jsonFile, ClusterDescription.class);
+    } catch (IOException e) {
+      log.error("Exception while parsing json file {}" , jsonFile, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Set a cluster option: a key val pair in the options {} section
+   * @param key key option name
+   * @param val value option value
+   */
+  public void setOption(String key, String val) {
+    options.put(key, val);
+  }
+
+  /**
+   * Set a cluster option if it is unset. If it is already set,
+   * in the Cluster Description, it is left alone
+   * @param key key key to query/set
+   * @param val value value
+   */
+
+  public void setOptionifUnset(String key, String val) {
+    if (options.get(key) == null) {
+      options.put(key, val);
+    }
+  }
+
+  /**
+   * Set an integer option -it's converted to a string before saving
+   * @param option option name
+   * @param val integer value
+   */
+  public void setOption(String option, int val) {
+    setOption(option, Integer.toString(val));
+  }
+
+  /**
+   * Set a boolean option
+   * @param option option name
+   * @param val bool value
+   */
+  public void setOption(String option, boolean val) {
+    setOption(option, Boolean.toString(val));
+  }
+
+  /**
+   * Get a cluster option or value
+   *
+   * @param key option key
+   * @param defVal option val
+   * @return resolved value or default
+   */
+  public String getOption(String key, String defVal) {
+    String val = options.get(key);
+    return val != null ? val : defVal;
+  }
+
+  /**
+   * Get a cluster option or value
+   *
+   * @param key mandatory key
+   * @return the value
+   * @throws BadConfigException if the option is missing
+   */
+  public String getMandatoryOption(String key) throws BadConfigException {
+    String val = options.get(key);
+    if (val == null) {
+      throw new BadConfigException("Missing option " + key);
+    }
+    return val;
+  }
+
+  /**
+   * Get an integer option; use {@link Integer#decode(String)} so as to take hex
+   * oct and bin values too.
+   *
+   * @param option option name
+   * @param defVal default value
+   * @return parsed value
+   * @throws NumberFormatException if the role could not be parsed.
+   */
+  public int getOptionInt(String option, int defVal) {
+    String val = getOption(option, Integer.toString(defVal));
+    return Integer.decode(val);
+  }
+
+  /**
+   * Verify that an option is set: that is defined AND non-empty
+   * @param key key to verify
+   * @throws BadConfigException
+   */
+  public void verifyOptionSet(String key) throws BadConfigException {
+    if (SliderUtils.isUnset(getOption(key, null))) {
+      throw new BadConfigException("Unset cluster option %s", key);
+    }
+  }
+
+  /**
+   * Get an option as a boolean. Note that {@link Boolean#valueOf(String)}
+   * is used for parsing -its policy of what is true vs false applies.
+   * @param option name
+   * @param defVal default
+   * @return the option.
+   */
+  public boolean getOptionBool(String option, boolean defVal) {
+    return Boolean.valueOf(getOption(option, Boolean.toString(defVal)));
+  }
+
+  /**
+   * Get a role option
+   * @param role role to get from
+   * @param option option name
+   * @param defVal default value
+   * @return resolved value
+   */
+  public String getRoleOpt(String role, String option, String defVal) {
+    Map<String, String> roleopts = getRole(role);
+    if (roleopts == null) {
+      return defVal;
+    }
+    String val = roleopts.get(option);
+    return val != null ? val : defVal;
+  }
+
+  /**
+   * Get a mandatory role option
+   * @param role role to get from
+   * @param option option name
+   * @return resolved value
+   * @throws BadConfigException if the option is not defined
+   */
+  public String getMandatoryRoleOpt(String role, String option) throws
+                                                                BadConfigException {
+    Map<String, String> roleopts = getRole(role);
+    if (roleopts == null) {
+      throw new BadConfigException("Missing role %s ", role);
+    }
+    String val = roleopts.get(option);
+    if (val == null) {
+      throw new BadConfigException("Missing option '%s' in role %s ", option, role);
+    }
+    return val;
+  }
+
+  /**
+   * Get a mandatory integer role option
+   * @param role role to get from
+   * @param option option name
+   * @return resolved value
+   * @throws BadConfigException if the option is not defined
+   */
+  public int getMandatoryRoleOptInt(String role, String option)
+      throws BadConfigException {
+    getMandatoryRoleOpt(role, option);
+    return getRoleOptInt(role, option, 0);
+  }
+  
+  /**
+   * look up a role and return its options
+   * @param role role
+   * @return role mapping or null
+   */
+  public Map<String, String> getRole(String role) {
+    return roles.get(role);
+  }
+
+  /**
+   * Get a role -adding it to the roleopts map if
+   * none with that name exists
+   * @param role role
+   * @return role mapping
+   */
+  public Map<String, String> getOrAddRole(String role) {
+    Map<String, String> map = getRole(role);
+    if (map == null) {
+      map = new HashMap<>();
+    }
+    roles.put(role, map);
+    return map;
+  }
+  
+  /*
+   * return the Set of role names
+   */
+  @JsonIgnore
+  public Set<String> getRoleNames() {
+    return new HashSet<>(roles.keySet());
+  }
+
+  /**
+   * Get a role whose presence is mandatory
+   * @param role role name
+   * @return the mapping
+   * @throws BadConfigException if the role is not there
+   */
+  public Map<String, String> getMandatoryRole(String role) throws
+                                                           BadConfigException {
+    Map<String, String> roleOptions = getRole(role);
+    if (roleOptions == null) {
+      throw new BadConfigException("Missing role " + role);
+    }
+    return roleOptions;
+  }
+
+  /**
+   * Get an integer role option; use {@link Integer#decode(String)} so as to take hex
+   * oct and bin values too.
+   *
+   * @param role role to get from
+   * @param option option name
+   * @param defVal default value
+   * @return parsed value
+   * @throws NumberFormatException if the role could not be parsed.
+   */
+  public int getRoleOptInt(String role, String option, int defVal) {
+    String val = getRoleOpt(role, option, Integer.toString(defVal));
+    return Integer.decode(val);
+  }
+
+  /**
+   * Get an integer role option; use {@link Integer#decode(String)} so as to take hex
+   * oct and bin values too.
+   *
+   * @param role role to get from
+   * @param option option name
+   * @param defVal default value
+   * @return parsed value
+   * @throws NumberFormatException if the role could not be parsed.
+   */
+  public long getRoleOptLong(String role, String option, long defVal) {
+    String val = getRoleOpt(role, option, Long.toString(defVal));
+    return Long.decode(val);
+  }
+
+  /**
+   * Set a role option, creating the role if necessary
+   * @param role role name
+   * @param option option name
+   * @param val value
+   */
+  public void setRoleOpt(String role, String option, String val) {
+    Map<String, String> roleopts = getOrAddRole(role);
+    roleopts.put(option, val);
+  }
+
+  /**
+   * Set an integer role option, creating the role if necessary
+   * @param role role name
+   * @param option option name
+   * @param val integer value
+   */
+  public void setRoleOpt(String role, String option, int val) {
+    setRoleOpt(role, option, Integer.toString(val));
+  }
+
+  /**
+   * Set a role option of any object, using its string value.
+   * This works for (Boxed) numeric values as well as other objects
+   * @param role role name
+   * @param option option name
+   * @param val non-null value
+   */
+  public void setRoleOpt(String role, String option, Object val) {
+    setRoleOpt(role, option, val.toString());
+  }
+
+  /**
+   * Get the value of a role requirement (cores, RAM, etc).
+   * These are returned as integers, but there is special handling of the 
+   * string {@link ResourceKeys#YARN_RESOURCE_MAX}, which triggers
+   * the return of the maximum value.
+   * @param role role to get from
+   * @param option option name
+   * @param defVal default value
+   * @param maxVal value to return if the max val is requested
+   * @return parsed value
+   * @throws NumberFormatException if the role could not be parsed.
+   */
+  public int getRoleResourceRequirement(String role, String option, int defVal, int maxVal) {
+    String val = getRoleOpt(role, option, Integer.toString(defVal));
+    Integer intVal;
+    if (ResourceKeys.YARN_RESOURCE_MAX.equals(val)) {
+      intVal = maxVal;
+    } else {
+      intVal = Integer.decode(val);
+    }
+    return intVal;
+  }
+
+
+  /**
+   * Set the time for an information (human, machine) timestamp pair of fields.
+   * The human time is the time in millis converted via the {@link Date} class.
+   * @param keyHumanTime name of human time key
+   * @param keyMachineTime name of machine time
+   * @param time timestamp
+   */
+  
+  public void setInfoTime(String keyHumanTime, String keyMachineTime, long time) {
+    SliderUtils.setInfoTime(info, keyHumanTime, keyMachineTime, time);
+  }
+
+  /**
+   * Set an information string. This is content that is only valid in status
+   * reports.
+   * @param key key
+   * @param value string value
+   */
+  @JsonIgnore
+  public void setInfo(String key, String value) {
+    info.put(key, value);
+  }
+
+  /**
+   * Get an information string. This is content that is only valid in status
+   * reports.
+   * @param key key
+   * @return the value or null
+   */
+  @JsonIgnore
+  public String getInfo(String key) {
+    return info.get(key);
+  }
+
+  /**
+   * Get an information string. This is content that is only valid in status
+   * reports.
+   * @param key key
+   * @return the value or null
+   */
+  @JsonIgnore
+  public boolean getInfoBool(String key) {
+    String val = info.get(key);
+    if (val != null) {
+      return Boolean.valueOf(val);
+    }
+    return false;
+  }
+
+  @JsonIgnore
+  public String getZkHosts() throws BadConfigException {
+    return getMandatoryOption(ZOOKEEPER_QUORUM);
+  }
+
+  /**
+   * Set the hosts for the ZK quorum
+   * @param zkHosts a comma separated list of hosts
+   */
+  @JsonIgnore
+  public void setZkHosts(String zkHosts) {
+    setOption(ZOOKEEPER_QUORUM, zkHosts);
+  }
+
+  @JsonIgnore
+  public String getZkPath() throws BadConfigException {
+    return getMandatoryOption(ZOOKEEPER_PATH);
+  }
+
+  @JsonIgnore
+  public void setZkPath(String zkPath) {
+    setOption(ZOOKEEPER_PATH, zkPath);
+  }
+
+  /**
+   * HBase home: if non-empty defines where a copy of HBase is preinstalled
+   */
+  @JsonIgnore
+  public String getApplicationHome() {
+    return getOption(INTERNAL_APPLICATION_HOME, "");
+  }
+
+  @JsonIgnore
+  public void setApplicationHome(String applicationHome) {
+    setOption(INTERNAL_APPLICATION_HOME, applicationHome);
+  }
+
+  /**
+   * The path in HDFS where the HBase image is
+   */
+  @JsonIgnore
+  public String getImagePath() {
+    return getOption(INTERNAL_APPLICATION_IMAGE_PATH, "");
+  }
+
+  /**
+   * Set the path in HDFS where the HBase image is
+   */
+  @JsonIgnore
+  public void setImagePath(String imagePath) {
+    setOption(INTERNAL_APPLICATION_IMAGE_PATH, imagePath);
+  }
+
+  /**
+   * Query for the image path being set (non null/non empty)
+   * @return true if there is a path in the image path option
+   */
+  @JsonIgnore
+  public boolean isImagePathSet() {
+    return SliderUtils.isSet(getImagePath());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java
new file mode 100644
index 0000000..5b7a92a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+public class ClusterDescriptionKeys {
+
+  public static final String KEY_CLUSTER_LIVE = "live"; 
+  public static final String KEY_CLUSTER_FAILED = "failed"; 
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java
new file mode 100644
index 0000000..5b95414
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.SliderProviderFactory;
+
+import java.util.Map;
+
+import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH;
+import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM;
+
+/**
+ * Operations on Cluster Descriptions
+ */
+public class ClusterDescriptionOperations {
+
+
+  public static ClusterDescription buildFromInstanceDefinition(AggregateConf aggregateConf) throws
+                                                                                       BadConfigException {
+
+    ClusterDescription cd = new ClusterDescription();
+    
+    aggregateConf.resolve();
+
+    //options are a merge of all globals
+    Map<String, String> options = cd.options;
+    SliderUtils.mergeMapsIgnoreDuplicateKeys(options,
+        aggregateConf.getInternal().global);
+    SliderUtils.mergeMapsIgnoreDuplicateKeys(options,
+        aggregateConf.getAppConf().global);
+    SliderUtils.mergeMapsIgnoreDuplicateKeys(options,
+        aggregateConf.getResources().global);
+
+    //roles are the role values merged in the same order
+    mergeInComponentMap(cd, aggregateConf.getInternal());
+    mergeInComponentMap(cd, aggregateConf.getAppConf());
+    mergeInComponentMap(cd, aggregateConf.getResources());
+
+    //now add the extra bits
+    cd.state = ClusterDescription.STATE_LIVE;
+    MapOperations internalOptions =
+      aggregateConf.getInternalOperations().getGlobalOptions();
+    MapOperations appOptions =
+      aggregateConf.getAppConfOperations().getGlobalOptions();
+
+    cd.type = internalOptions.getOption(InternalKeys.INTERNAL_PROVIDER_NAME,
+                                SliderProviderFactory.DEFAULT_CLUSTER_TYPE);
+
+    cd.dataPath = internalOptions.get(InternalKeys.INTERNAL_DATA_DIR_PATH);
+    cd.name = internalOptions.get(OptionKeys.APPLICATION_NAME);
+    cd.originConfigurationPath = internalOptions.get(InternalKeys.INTERNAL_SNAPSHOT_CONF_PATH);
+    cd.generatedConfigurationPath = internalOptions.get(InternalKeys.INTERNAL_GENERATED_CONF_PATH);
+    cd.setImagePath(internalOptions.get(InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH));
+    cd.setApplicationHome(internalOptions.get(InternalKeys.INTERNAL_APPLICATION_HOME));
+    cd.setZkPath(appOptions.get(ZOOKEEPER_PATH));
+    cd.setZkHosts(appOptions.get(ZOOKEEPER_QUORUM));
+    
+    return cd;
+  }
+
+  private static void mergeInComponentMap(ClusterDescription cd,
+                                          ConfTree confTree) {
+
+    Map<String, Map<String, String>> components = confTree.components;
+    for (Map.Entry<String, Map<String, String>> compEntry : components.entrySet()) {
+      String name = compEntry.getKey();
+      Map<String, String> destRole = cd.getOrAddRole(name);
+      Map<String, String> sourceComponent = compEntry.getValue();
+      SliderUtils.mergeMapsIgnoreDuplicateKeys(destRole, sourceComponent);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterNode.java
new file mode 100644
index 0000000..8b0a563
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterNode.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.slider.api.proto.Messages;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * Describe a specific node in the cluster
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL )
+public final class ClusterNode implements Cloneable {
+  protected static final Logger
+    LOG = LoggerFactory.getLogger(ClusterNode.class);
+  
+  @JsonIgnore
+  public ContainerId containerId;
+
+  /**
+   * server name
+   */
+  public String name;
+
+
+  /**
+   * UUID of container used in Slider RPC to refer to instances
+   */
+  public String id;
+  
+  public String role;
+  
+  public int roleId;
+
+  public long createTime;
+  public long startTime;
+  /**
+   * flag set when it is released, to know if it has
+   * already been targeted for termination
+   */
+  public boolean released;
+  public String host;
+  public String ip;
+  public String hostname;
+  public String hostUrl;
+
+  /**
+   * state from {@link ClusterDescription}
+   */
+  public int state;
+
+  /**
+   * Exit code: only valid if the state >= STOPPED
+   */
+  public int exitCode;
+
+  /**
+   * what was the command executed?
+   */
+  public String command;
+
+  /**
+   * Any diagnostics
+   */
+  public String diagnostics;
+
+  /**
+   * What is the tail output from the executed process (or [] if not started
+   * or the log cannot be picked up
+   */
+  public String[] output;
+
+  /**
+   * Any environment details
+   */
+  public String[] environment;
+
+  /**
+   * server-side ctor takes the container ID and builds the name from it
+   * @param containerId container ID; can be null
+   */
+  public ClusterNode(ContainerId containerId) {
+    if (containerId != null) {
+      this.containerId = containerId;
+      this.name = containerId.toString();
+    }
+  }
+
+  /**
+   * ctor for deserialization
+   */
+  public ClusterNode() {
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append(name).append(": ");
+    builder.append(state).append("\n");
+    builder.append("state: ").append(state).append("\n");
+    builder.append("role: ").append(role).append("\n");
+    append(builder, "host", host);
+    append(builder, "hostURL", hostUrl);
+    append(builder, "command", command);
+    if (output != null) {
+      for (String line : output) {
+        builder.append(line).append("\n");
+      }
+    }
+    append(builder, "diagnostics", diagnostics);
+    return builder.toString();
+  }
+
+  private void append(StringBuilder builder, String key, Object val) {
+    if (val != null) {
+      builder.append(key).append(": ").append(val.toString()).append("\n");
+    }
+  }
+  
+  /**
+   * Convert to a JSON string
+   * @return a JSON string description
+   * @throws IOException Problems mapping/writing the object
+   */
+  public String toJsonString() throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.writeValueAsString(this);
+  }
+
+
+  /**
+   * Convert from JSON
+   * @param json input
+   * @return the parsed JSON
+   * @throws IOException IO
+   */
+  public static ClusterNode fromJson(String json)
+    throws IOException, JsonParseException, JsonMappingException {
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return mapper.readValue(json, ClusterNode.class);
+    } catch (IOException e) {
+      LOG.error("Exception while parsing json : {}\n{}", e , json, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Build from a protobuf response
+   * @param message
+   * @return the deserialized node
+   */
+  public static ClusterNode fromProtobuf(Messages.RoleInstanceState message) {
+    ClusterNode node = new ClusterNode();
+    node.name = message.getName();
+    node.command = message.getCommand();
+    node.diagnostics = message.getDiagnostics();
+    String[] arr;
+    int environmentCount = message.getEnvironmentCount();
+    if (environmentCount > 0) {
+      arr = new String[environmentCount];
+      node.environment = message.getEnvironmentList().toArray(arr);
+    }
+    node.exitCode = message.getExitCode();
+    int outputCount = message.getOutputCount();
+    if (outputCount > 0) {
+      arr = new String[outputCount];
+      node.output = message.getOutputList().toArray(arr);
+    }
+    node.role = message.getRole();
+    node.roleId = message.getRoleId();
+    node.state = message.getState();
+    node.host = message.getHost();
+    node.hostUrl = message.getHostURL();
+    node.createTime = message.getCreateTime();
+    node.startTime = message.getStartTime();
+    node.released = message.getReleased();
+    return node;
+  }
+
+  @Override
+  public Object clone() throws CloneNotSupportedException {
+    return super.clone();
+  }
+  
+  public ClusterNode doClone() {
+    try {
+      return (ClusterNode)clone();
+    } catch (CloneNotSupportedException e) {
+      //not going to happen. This is a final class
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
new file mode 100644
index 0000000..fcaaf0e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+/**
+ * Keys for internal use, go into `internal.json` and not intended for normal
+ * use except when tuning Slider AM operations
+ */
+public interface InternalKeys {
+
+
+  /**
+   * Home dir of the app: {@value}
+   * If set, implies there is a home dir to use
+   */
+  String INTERNAL_APPLICATION_HOME = "internal.application.home";
+  /**
+   * Path to an image file containing the app: {@value}
+   */
+  String INTERNAL_APPLICATION_IMAGE_PATH = "internal.application.image.path";
+  /**
+   * Time in milliseconds to wait after forking any in-AM 
+   * process before attempting to start up the containers: {@value}
+   * 
+   * A shorter value brings the cluster up faster, but means that if the
+   * in AM process fails (due to a bad configuration), then time
+   * is wasted starting containers on a cluster that isn't going to come
+   * up
+   */
+  String INTERNAL_CONTAINER_STARTUP_DELAY = "internal.container.startup.delay";
+  /**
+   * internal temp directory: {@value}
+   */
+  String INTERNAL_AM_TMP_DIR = "internal.am.tmp.dir";
+  /**
+   * internal temp directory: {@value}
+   */
+  String INTERNAL_TMP_DIR = "internal.tmp.dir";
+  /**
+   * where a snapshot of the original conf dir is: {@value}
+   */
+  String INTERNAL_SNAPSHOT_CONF_PATH = "internal.snapshot.conf.path";
+  /**
+   * where a snapshot of the original conf dir is: {@value}
+   */
+  String INTERNAL_GENERATED_CONF_PATH = "internal.generated.conf.path";
+  /**
+   * where a snapshot of the original conf dir is: {@value}
+   */
+  String INTERNAL_PROVIDER_NAME = "internal.provider.name";
+  /**
+   * where a snapshot of the original conf dir is: {@value}
+   */
+  String INTERNAL_DATA_DIR_PATH = "internal.data.dir.path";
+  /**
+   * where the app def is stored
+   */
+  String INTERNAL_APPDEF_DIR_PATH = "internal.appdef.dir.path";
+  /**
+   * where addons for the app are stored
+   */
+  String INTERNAL_ADDONS_DIR_PATH = "internal.addons.dir.path";
+  /**
+   * Time in milliseconds to wait after forking any in-AM 
+   * process before attempting to start up the containers: {@value}
+   *
+   * A shorter value brings the cluster up faster, but means that if the
+   * in AM process fails (due to a bad configuration), then time
+   * is wasted starting containers on a cluster that isn't going to come
+   * up
+   */
+  int DEFAULT_INTERNAL_CONTAINER_STARTUP_DELAY = 5000;
+  /**
+   * Time in seconds before a container is considered long-lived.
+   * Shortlived containers are interpreted as a problem with the role
+   * and/or the host: {@value}
+   */
+  String INTERNAL_CONTAINER_FAILURE_SHORTLIFE =
+      "internal.container.failure.shortlife";
+  /**
+   * Default short life threshold: {@value}
+   */
+  int DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE = 60;
+  
+  /**
+   * Version of the app: {@value}
+   */
+  String KEYTAB_LOCATION = "internal.keytab.location";
+
+  /**
+   * Queue used to deploy the app: {@value}
+   */
+  String INTERNAL_QUEUE = "internal.queue";
+
+  /**
+   * Flag to indicate whether or not the chaos monkey is enabled:
+   * {@value}
+   */
+  String CHAOS_MONKEY_ENABLED = "internal.chaos.monkey.enabled";
+  boolean DEFAULT_CHAOS_MONKEY_ENABLED = false;
+
+
+  /**
+   * Rate
+   */
+
+  String CHAOS_MONKEY_INTERVAL = "internal.chaos.monkey.interval";
+  String CHAOS_MONKEY_INTERVAL_DAYS = CHAOS_MONKEY_INTERVAL + ".days";
+  String CHAOS_MONKEY_INTERVAL_HOURS = CHAOS_MONKEY_INTERVAL + ".hours";
+  String CHAOS_MONKEY_INTERVAL_MINUTES = CHAOS_MONKEY_INTERVAL + ".minutes";
+  String CHAOS_MONKEY_INTERVAL_SECONDS = CHAOS_MONKEY_INTERVAL + ".seconds";
+  
+  int DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS = 0;
+  int DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS = 0;
+  int DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES = 0;
+
+  String CHAOS_MONKEY_DELAY = "internal.chaos.monkey.delay";
+  String CHAOS_MONKEY_DELAY_DAYS = CHAOS_MONKEY_DELAY + ".days";
+  String CHAOS_MONKEY_DELAY_HOURS = CHAOS_MONKEY_DELAY + ".hours";
+  String CHAOS_MONKEY_DELAY_MINUTES = CHAOS_MONKEY_DELAY + ".minutes";
+  String CHAOS_MONKEY_DELAY_SECONDS = CHAOS_MONKEY_DELAY + ".seconds";
+  
+  int DEFAULT_CHAOS_MONKEY_STARTUP_DELAY = 0;
+
+  /**
+   * Prefix for all chaos monkey probabilities
+   */
+  String CHAOS_MONKEY_PROBABILITY =
+      "internal.chaos.monkey.probability";
+  /**
+   * Probabilies are out of 10000 ; 100==1%
+   */
+
+  /**
+   * Probability of a monkey check killing the AM:  {@value}
+   */
+  String CHAOS_MONKEY_PROBABILITY_AM_FAILURE =
+      CHAOS_MONKEY_PROBABILITY + ".amfailure";
+
+  /**
+   * Default probability of a monkey check killing the AM:  {@value}
+   */
+  int DEFAULT_CHAOS_MONKEY_PROBABILITY_AM_FAILURE = 0;
+
+  /**
+   * Probability of a monkey check killing the AM:  {@value}
+   */
+  String CHAOS_MONKEY_PROBABILITY_AM_LAUNCH_FAILURE =
+      CHAOS_MONKEY_PROBABILITY + ".amlaunchfailure";
+
+  /**
+   * Probability of a monkey check killing a container:  {@value}
+   */
+
+  String CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE =
+      CHAOS_MONKEY_PROBABILITY + ".containerfailure";
+
+  /**
+   * Default probability of a monkey check killing the a container:  {@value}
+   */
+  int DEFAULT_CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE = 0;
+
+
+  /**
+   * 1% of chaos
+   */
+  int PROBABILITY_PERCENT_1 = 100;
+  
+  /**
+   * 100% for chaos values
+   */
+  int PROBABILITY_PERCENT_100 = 100 * PROBABILITY_PERCENT_1;
+
+  /**
+   * interval between checks for escalation: {@value}
+   */
+  String ESCALATION_CHECK_INTERVAL = "escalation.check.interval.seconds";
+
+  /**
+   * default value: {@value}
+   */
+  int DEFAULT_ESCALATION_CHECK_INTERVAL = 30;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
new file mode 100644
index 0000000..a035a99
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+/**
+ *  Keys for entries in the <code>options</code> section
+ *  of a cluster description.
+ */
+public interface OptionKeys extends InternalKeys {
+
+  /**
+   * Time in milliseconds to wait after forking any in-AM 
+   * process before attempting to start up the containers: {@value}
+   * 
+   * A shorter value brings the cluster up faster, but means that if the
+   * in AM process fails (due to a bad configuration), then time
+   * is wasted starting containers on a cluster that isn't going to come
+   * up
+   */
+  String APPLICATION_TYPE = "application.type";
+  
+  String APPLICATION_NAME = "application.name";
+
+  /**
+   * Prefix for site.xml options: {@value}
+   */
+  String SITE_XML_PREFIX = "site.";
+
+
+  /**
+   * Zookeeper quorum host list: {@value}
+   */
+  String ZOOKEEPER_QUORUM = "zookeeper.quorum";
+  String ZOOKEEPER_HOSTS = "zookeeper.hosts";
+  String ZOOKEEPER_PORT = "zookeeper.port";
+
+  /**
+   * Zookeeper path value (string): {@value}
+   */
+  String ZOOKEEPER_PATH = "zookeeper.path";
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java
new file mode 100644
index 0000000..92890be
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+/**
+ * These are the keys valid in resource options
+ *
+ /*
+
+ Container failure window.
+
+ The window is calculated in minutes as as (days * 24 *60 + hours* 24 + minutes)
+
+ Every interval of this period after the AM is started/restarted becomes
+ the time period in which the CONTAINER_FAILURE_THRESHOLD value is calculated.
+ 
+ After the window limit is reached, the failure counts are reset. This
+ is not a sliding window/moving average policy, simply a rule such as
+ "every six hours the failure count is reset"
+
+
+ <pre>
+ ===========================================================================
+ </pre>
+
+ */
+public interface ResourceKeys {
+
+
+  /**
+   * #of instances of a component: {@value}
+   *
+  */
+  String COMPONENT_INSTANCES = "yarn.component.instances";
+
+  /**
+   * Whether to use unique names for each instance of a component: {@value}
+   */
+  String UNIQUE_NAMES = "component.unique.names";
+
+  /**
+   *  Amount of memory to ask YARN for in MB.
+   *  <i>Important:</i> this may be a hard limit on the
+   *  amount of RAM that the service can use
+   *  {@value}
+   */
+  String YARN_MEMORY = "yarn.memory";
+  
+  /** {@value} */
+  int DEF_YARN_MEMORY = 256;
+  
+  /**
+   * Number of cores/virtual cores to ask YARN for
+   *  {@value}
+   */
+  String YARN_CORES = "yarn.vcores";
+
+  /**
+   * Number of disks per instance to ask YARN for
+   *  {@value}
+   */
+  String YARN_DISKS = "yarn.disks.count-per-instance";
+
+  /**
+   * Disk size per disk to ask YARN for
+   *  {@value}
+   */
+  String YARN_DISK_SIZE = "yarn.disk.size";
+
+  /** {@value} */
+  int DEF_YARN_CORES = 1;
+
+
+  /**
+   * Label expression that this container must satisfy
+   *  {@value}
+   */
+  String YARN_LABEL_EXPRESSION = "yarn.label.expression";
+
+  /** default label expression: */
+  String DEF_YARN_LABEL_EXPRESSION = null;
+
+
+  /**
+   * Constant to indicate that the requirements of a YARN resource limit
+   * (cores, memory, ...) should be set to the maximum allowed by
+   * the queue into which the YARN container requests are placed.
+   */
+  String YARN_RESOURCE_MAX = "max";
+  
+  /**
+   * Mandatory property for all roles
+   * 1. this must be defined.
+   * 2. this must be >= 1
+   * 3. this must not match any other role priority in the cluster.
+   */
+  String COMPONENT_PRIORITY = "yarn.role.priority";
+  
+  /**
+   * placement policy
+   */
+  String COMPONENT_PLACEMENT_POLICY = "yarn.component.placement.policy";
+
+  /**
+   * Maximum number of node failures that can be tolerated by a component on a specific node
+   */
+  String NODE_FAILURE_THRESHOLD =
+      "yarn.node.failure.threshold";
+
+  /**
+   * maximum number of failed containers (in a single role)
+   * before the cluster is deemed to have failed {@value}
+   */
+  String CONTAINER_FAILURE_THRESHOLD =
+      "yarn.container.failure.threshold";
+
+  /**
+   * prefix for the time of the container failure reset window.
+   * {@value}
+   */
+
+  String CONTAINER_FAILURE_WINDOW =
+      "yarn.container.failure.window";
+
+
+
+  int DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS = 0;
+  int DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS = 6;
+  int DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES = 0;
+
+
+  /**
+   * Default failure threshold: {@value}
+   */
+  int DEFAULT_CONTAINER_FAILURE_THRESHOLD = 5;
+
+  /**
+   * Default node failure threshold for a component instance: {@value}
+   * Should to be lower than default component failure threshold to allow
+   * the component to start elsewhere
+   */
+  int DEFAULT_NODE_FAILURE_THRESHOLD = 3;
+
+  /**
+   * Failure threshold is unlimited: {@value}
+   */
+  int NODE_FAILURE_THRESHOLD_UNLIMITED = -1;
+
+  /**
+   * Time in seconds to escalate placement delay
+   */
+  String PLACEMENT_ESCALATE_DELAY =
+      "yarn.placement.escalate.seconds";
+
+  /**
+   * Time to have a strict placement policy outstanding before 
+   * downgrading to a lax placement (for those components which permit that).
+   * <ol>
+   *   <li>For strictly placed components, there's no relaxation.</li>
+   *   <li>For components with no locality, there's no need to relax</li>
+   * </ol>
+   * 
+   */
+  int DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS = 30;
+
+  /**
+   * Log aggregation include, exclude patterns
+   */
+  String YARN_LOG_INCLUDE_PATTERNS = "yarn.log.include.patterns";
+  String YARN_LOG_EXCLUDE_PATTERNS = "yarn.log.exclude.patterns";
+
+  String YARN_PROFILE_NAME = "yarn.resource-profile-name";
+
+  /**
+   * Window of time where application master's failure count
+   * can be reset to 0.
+   */
+  String YARN_RESOURCEMANAGER_AM_RETRY_COUNT_WINDOW_MS  =
+      "yarn.resourcemanager.am.retry-count-window-ms";
+
+  /**
+   * The default window for Slider.
+   */
+  long DEFAULT_AM_RETRY_COUNT_WINDOW_MS = 300000;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
new file mode 100644
index 0000000..812a6b3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+/**
+ * Standard options for roles
+ */
+public interface RoleKeys {
+
+
+  /**
+   * The name of a role: {@value}
+   */
+  String ROLE_NAME = "role.name";
+
+  /**
+   * The group of a role: {@value}
+   */
+  String ROLE_GROUP = "role.group";
+
+  /**
+   * Status report: number actually granted : {@value} 
+   */
+  String ROLE_ACTUAL_INSTANCES = "role.actual.instances";
+
+  /**
+   * Status report: number currently requested: {@value} 
+   */
+  String ROLE_REQUESTED_INSTANCES = "role.requested.instances";
+
+  /**
+   * Status report: number currently being released: {@value} 
+   */
+  String ROLE_RELEASING_INSTANCES = "role.releasing.instances";
+
+  /**
+   * Status report: total number that have failed: {@value}
+   */
+  String ROLE_FAILED_INSTANCES = "role.failed.instances";
+
+  /**
+   * Status report: number that have failed recently: {@value}
+   */
+  String ROLE_FAILED_RECENTLY_INSTANCES = "role.failed.recently.instances";
+
+  /**
+   * Status report: number that have failed for node-related issues: {@value}
+   */
+  String ROLE_NODE_FAILED_INSTANCES = "role.failed.node.instances";
+
+  /**
+   * Status report: number that been pre-empted: {@value}
+   */
+  String ROLE_PREEMPTED_INSTANCES = "role.failed.preempted.instances";
+
+  /**
+   * Number of pending anti-affine instances: {@value}
+   */
+  String ROLE_PENDING_AA_INSTANCES = "role.pending.aa.instances";
+
+  /**
+   * Status report: number currently being released: {@value} 
+   */
+  String ROLE_FAILED_STARTING_INSTANCES = "role.failed.starting.instances";
+
+  /**
+   * Extra arguments (non-JVM) to use when starting this role
+   */
+  String ROLE_ADDITIONAL_ARGS = "role.additional.args";
+
+  /**
+   *  JVM heap size for Java applications in MB.  Only relevant for Java applications.
+   *  This MUST be less than or equal to the {@link ResourceKeys#YARN_MEMORY} option
+   *  {@value}
+   */
+  String JVM_HEAP = "jvm.heapsize";
+  
+  /*
+   * GC options for Java applications.
+   */
+  String GC_OPTS = "gc.opts";
+
+  /**
+   * JVM options other than heap size. Only relevant for Java applications.
+   *  {@value}
+   */
+  String JVM_OPTS = "jvm.opts";
+
+
+  /**
+   * All keys w/ env. are converted into env variables and passed down
+   */
+  String ENV_PREFIX = "env.";
+
+  /**
+   * Container service record attribute prefix.
+   */
+  String SERVICE_RECORD_ATTRIBUTE_PREFIX = "service.record.attribute";
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
new file mode 100644
index 0000000..d21785f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.api.types.ComponentInformation;
+import org.apache.slider.api.types.ContainerInformation;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.api.types.NodeInformationList;
+import org.apache.slider.api.types.PingInformation;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.ConfTreeOperations;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * API exported by the slider remote REST/IPC endpoints.
+ */
+public interface SliderApplicationApi {
+  /**
+   * Get the aggregate desired model
+   * @return the aggregate configuration of what was asked for
+   * -before resolution has taken place
+   * @throws IOException on any failure
+   */
+  AggregateConf getDesiredModel() throws IOException;
+
+  /**
+   * Get the desired application configuration
+   * @return the application configuration asked for
+   * -before resolution has taken place
+   * @throws IOException on any failure
+   */
+  ConfTreeOperations getDesiredAppconf() throws IOException;
+
+  /**
+   * Get the desired YARN resources
+   * @return the resources asked for
+   * -before resolution has taken place
+   * @throws IOException on any failure
+   */
+  ConfTreeOperations getDesiredResources() throws IOException;
+
+  /**
+   * Put an updated resources structure. This triggers a cluster flex
+   * operation
+   * @param updated updated resources
+   * @throws IOException on any problem.
+   */
+  void putDesiredResources(ConfTree updated) throws IOException;
+
+  /**
+   * Get the aggregate resolved model
+   * @return the aggregate configuration of what was asked for
+   * -after resolution has taken place
+   * @throws IOException on any failure
+   */
+  AggregateConf getResolvedModel() throws IOException;
+
+  /**
+   * Get the resolved application configuration
+   * @return the application configuration asked for
+   * -after resolution has taken place
+   * @throws IOException on any failure
+   */
+  ConfTreeOperations getResolvedAppconf() throws IOException;
+
+  /**
+   * Get the resolved YARN resources
+   * @return the resources asked for
+   * -after resolution has taken place
+   * @throws IOException on any failure
+   */
+  ConfTreeOperations getResolvedResources() throws IOException;
+
+  /**
+   * Get the live YARN resources
+   * @return the live set of resources in the cluster
+   * @throws IOException on any failure
+   */
+  ConfTreeOperations getLiveResources() throws IOException;
+
+  /**
+   * Get a map of live containers [containerId:info]
+   * @return a possibly empty list of serialized containers
+   * @throws IOException on any failure
+   */
+  Map<String, ContainerInformation> enumContainers() throws IOException;
+
+  /**
+   * Get a container from the container Id
+   * @param containerId YARN container ID
+   * @return the container information
+   * @throws IOException on any failure
+   */
+  ContainerInformation getContainer(String containerId) throws IOException;
+
+  /**
+   * List all components into a map of [name:info]
+   * @return a possibly empty map of components
+   * @throws IOException on any failure
+   */
+  Map<String, ComponentInformation> enumComponents() throws IOException;
+
+  /**
+   * Get information about a component
+   * @param componentName name of the component
+   * @return the component details
+   * @throws IOException on any failure
+   */
+  ComponentInformation getComponent(String componentName) throws IOException;
+
+  /**
+   * List all nodes into a map of [name:info]
+   * @return a possibly empty list of nodes
+   * @throws IOException on any failure
+   */
+  NodeInformationList getLiveNodes() throws IOException;
+
+  /**
+   * Get information about a node
+   * @param hostname name of the node
+   * @return the node details
+   * @throws IOException on any failure
+   */
+  NodeInformation getLiveNode(String hostname) throws IOException;
+
+  /**
+   * Ping as a GET
+   * @param text text to include
+   * @return the response
+   * @throws IOException on any failure
+   */
+  PingInformation ping(String text) throws IOException;
+
+  /**
+   * Stop the AM (async operation)
+   * @param text text to include
+   * @throws IOException on any failure
+   */
+  void stop(String text) throws IOException;
+
+  /**
+   * Get the application liveness
+   * @return current liveness information
+   * @throws IOException
+   */
+  ApplicationLivenessInformation getApplicationLiveness() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
new file mode 100644
index 0000000..33fce22
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License") throws IOException, YarnException; you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.api.proto.Messages;
+import org.apache.slider.common.SliderXmlConfKeys;
+
+import java.io.IOException;
+
+/**
+ * Cluster protocol. This can currently act as a versioned IPC
+ * endpoint or be relayed via protobuf
+ */
+@KerberosInfo(serverPrincipal = SliderXmlConfKeys.KEY_KERBEROS_PRINCIPAL)
+public interface SliderClusterProtocol extends VersionedProtocol {
+  long versionID = 0x01;
+
+  /**
+   * Stop the cluster
+   */
+
+  Messages.StopClusterResponseProto stopCluster(Messages.StopClusterRequestProto request) throws
+                                                                                          IOException, YarnException;
+  /**
+   * Upgrade the application containers
+   * 
+   * @param request upgrade containers request object
+   * @return upgrade containers response object
+   * @throws IOException
+   * @throws YarnException
+   */
+  Messages.UpgradeContainersResponseProto upgradeContainers(
+      Messages.UpgradeContainersRequestProto request) throws IOException,
+      YarnException;
+
+  /**
+   * Flex the cluster. 
+   */
+  Messages.FlexClusterResponseProto flexCluster(Messages.FlexClusterRequestProto request)
+      throws IOException;
+
+
+  /**
+   * Get the current cluster status
+   */
+  Messages.GetJSONClusterStatusResponseProto getJSONClusterStatus(Messages.GetJSONClusterStatusRequestProto request)
+      throws IOException, YarnException;
+
+
+  /**
+   * List all running nodes in a role
+   */
+  Messages.ListNodeUUIDsByRoleResponseProto listNodeUUIDsByRole(Messages.ListNodeUUIDsByRoleRequestProto request)
+      throws IOException, YarnException;
+
+
+  /**
+   * Get the details on a node
+   */
+  Messages.GetNodeResponseProto getNode(Messages.GetNodeRequestProto request)
+      throws IOException, YarnException;
+
+  /**
+   * Get the 
+   * details on a list of nodes.
+   * Unknown nodes are not returned
+   * <i>Important: the order of the results are undefined</i>
+   */
+  Messages.GetClusterNodesResponseProto getClusterNodes(Messages.GetClusterNodesRequestProto request)
+      throws IOException, YarnException;
+
+  /**
+   * Echo back the submitted text (after logging it).
+   * Useful for adding information to the log, and for testing round trip
+   * operations of the protocol
+   * @param request request
+   * @return response
+   * @throws IOException
+   * @throws YarnException
+   */
+  Messages.EchoResponseProto echo(Messages.EchoRequestProto request) throws IOException, YarnException;
+
+  /**
+   * Kill an identified container
+   * @param request request containing the container to kill
+   * @return the response
+   * @throws IOException
+   * @throws YarnException
+   */
+  Messages.KillContainerResponseProto killContainer(Messages.KillContainerRequestProto request)
+      throws IOException, YarnException;
+
+  /**
+   * AM to commit suicide. If the Hadoop halt entry point has not been disabled,
+   * this will fail rather than return with a response.
+   * @param request request
+   * @return response (this is not the expected outcome)
+   * @throws IOException
+   * @throws YarnException
+   */
+  Messages.AMSuicideResponseProto amSuicide(Messages.AMSuicideRequestProto request)
+      throws IOException;
+
+  /**
+   * Get the instance definition
+   */
+  Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
+    Messages.GetInstanceDefinitionRequestProto request)
+    throws IOException, YarnException;
+
+  /**
+   * Get the application liveness
+   * @return current liveness information
+   * @throws IOException
+   */
+  Messages.ApplicationLivenessInformationProto getLivenessInformation(
+      Messages.GetApplicationLivenessRequestProto request
+  ) throws IOException;
+
+  Messages.GetLiveContainersResponseProto getLiveContainers(
+      Messages.GetLiveContainersRequestProto request
+  ) throws IOException;
+
+  Messages.ContainerInformationProto getLiveContainer(
+      Messages.GetLiveContainerRequestProto request
+  ) throws IOException;
+
+  Messages.GetLiveComponentsResponseProto getLiveComponents(
+      Messages.GetLiveComponentsRequestProto request
+  ) throws IOException;
+
+  Messages.ComponentInformationProto getLiveComponent(
+      Messages.GetLiveComponentRequestProto request
+  ) throws IOException;
+
+  Messages.GetLiveNodesResponseProto getLiveNodes(
+      Messages.GetLiveNodesRequestProto request
+  ) throws IOException;
+
+  Messages.NodeInformationProto getLiveNode(
+      Messages.GetLiveNodeRequestProto request
+  ) throws IOException;
+
+  Messages.WrappedJsonProto getModelDesired(Messages.EmptyPayloadProto request) throws IOException;
+
+  Messages.WrappedJsonProto getModelDesiredAppconf(Messages.EmptyPayloadProto request) throws IOException;
+
+  Messages.WrappedJsonProto getModelDesiredResources(Messages.EmptyPayloadProto request) throws IOException;
+
+  Messages.WrappedJsonProto getModelResolved(Messages.EmptyPayloadProto request) throws IOException;
+
+  Messages.WrappedJsonProto getModelResolvedAppconf(Messages.EmptyPayloadProto request) throws IOException;
+
+  Messages.WrappedJsonProto getModelResolvedResources(Messages.EmptyPayloadProto request) throws IOException;
+
+  Messages.WrappedJsonProto getLiveResources(Messages.EmptyPayloadProto request) throws IOException;
+
+  Messages.GetCertificateStoreResponseProto getClientCertificateStore(Messages.GetCertificateStoreRequestProto request)
+      throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java
new file mode 100644
index 0000000..03751e1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+/**
+ * Enumeration of state values
+ */
+public class StateValues {
+
+  /**
+   * Specification is incomplete & cannot
+   * be used: {@value}
+   */
+  public static final int STATE_INCOMPLETE = 0;
+
+  /**
+   * Spec has been submitted: {@value}
+   */
+  public static final int STATE_SUBMITTED = 1;
+  /**
+   * Cluster created: {@value}
+   */
+  public static final int STATE_CREATED = 2;
+  /**
+   * Live: {@value}
+   */
+  public static final int STATE_LIVE = 3;
+  /**
+   * Stopped
+   */
+  public static final int STATE_STOPPED = 4;
+  /**
+   * destroyed
+   */
+  public static final int STATE_DESTROYED = 5;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79efbbc2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StatusKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StatusKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StatusKeys.java
new file mode 100644
index 0000000..8a2c4bb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StatusKeys.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+import static org.apache.slider.api.ResourceKeys.COMPONENT_INSTANCES;
+/**
+ * Contains status and statistics keys
+ */
+public interface StatusKeys {
+
+  String STATISTICS_CONTAINERS_ACTIVE_REQUESTS = "containers.active.requests";
+  String STATISTICS_CONTAINERS_COMPLETED = "containers.completed";
+  String STATISTICS_CONTAINERS_DESIRED = "containers.desired";
+  String STATISTICS_CONTAINERS_FAILED = "containers.failed";
+  String STATISTICS_CONTAINERS_FAILED_RECENTLY = "containers.failed.recently";
+  String STATISTICS_CONTAINERS_FAILED_NODE = "containers.failed.node";
+  String STATISTICS_CONTAINERS_PREEMPTED = "containers.failed.preempted";
+  String STATISTICS_CONTAINERS_LIVE = "containers.live";
+  String STATISTICS_CONTAINERS_REQUESTED = "containers.requested";
+  String STATISTICS_CONTAINERS_ANTI_AFFINE_PENDING = "containers.anti-affine.pending";
+  String STATISTICS_CONTAINERS_STARTED = "containers.start.started";
+  String STATISTICS_CONTAINERS_START_FAILED =
+      "containers.start.failed";
+  String STATISTICS_CONTAINERS_SURPLUS =
+      "containers.surplus";
+  String STATISTICS_CONTAINERS_UNKNOWN_COMPLETED =
+      "containers.unknown.completed";
+  /**
+   * No of containers provided on AM restart
+   */
+  String INFO_CONTAINERS_AM_RESTART = "containers.at.am-restart";
+
+  String INFO_CREATE_TIME_MILLIS = "create.time.millis";
+  String INFO_CREATE_TIME_HUMAN = "create.time";
+  String INFO_LIVE_TIME_MILLIS = "live.time.millis";
+  String INFO_LIVE_TIME_HUMAN = "live.time";
+  String INFO_FLEX_TIME_MILLIS = "flex.time.millis";
+  String INFO_FLEX_TIME_HUMAN = "flex.time";
+
+  String INFO_MASTER_ADDRESS = "info.master.address";
+
+  /**
+   * System time in millis when the status report was generated
+   */
+  String INFO_STATUS_TIME_MILLIS = "status.time.millis";
+
+  /**
+   * System time in human form when the status report was generated
+   */
+  String INFO_STATUS_TIME_HUMAN = "status.time";
+
+  String INFO_AM_APP_ID = "info.am.app.id";
+  String INFO_AM_ATTEMPT_ID = "info.am.attempt.id";
+  String INFO_AM_CONTAINER_ID = "info.am.container.id";
+  String INFO_AM_HOSTNAME = "info.am.hostname";
+  String INFO_AM_RPC_PORT = "info.am.rpc.port";
+  String INFO_AM_WEB_PORT = "info.am.web.port";
+  String INFO_AM_WEB_URL = "info.am.web.url";
+  String INFO_AM_AGENT_STATUS_PORT = "info.am.agent.status.port";
+  String INFO_AM_AGENT_OPS_PORT = "info.am.agent.ops.port";
+  String INFO_AM_AGENT_OPS_URL = "info.am.agent.ops.url";
+  String INFO_AM_AGENT_STATUS_URL = "info.am.agent.status.url";
+
+      /**
+       * info: #of instances of a component requested: {@value}
+       *
+       */
+  String COMPONENT_INSTANCES_ACTUAL = COMPONENT_INSTANCES + ".actual";
+
+  /**
+   * info: #of instances of a component requested: {@value}
+   *
+   */
+  String COMPONENT_INSTANCES_REQUESTING = COMPONENT_INSTANCES + ".requesting";
+
+  /**
+   * info: #of instances of a component being released: {@value}
+   *
+   */
+  String COMPONENT_INSTANCES_RELEASING = COMPONENT_INSTANCES + ".releasing";
+
+  /**
+   * info: #of instances of a component failed: {@value}
+   *
+   */
+  String COMPONENT_INSTANCES_FAILED = COMPONENT_INSTANCES + ".failed";
+
+  /**
+   * info: #of instances of a component started: {@value}
+   *
+   */
+  String COMPONENT_INSTANCES_STARTED = COMPONENT_INSTANCES + ".started";
+
+
+  /**
+   * info: #of instances of a component completed: {@value}
+   *
+   */
+  String COMPONENT_INSTANCES_COMPLETED = COMPONENT_INSTANCES + ".completed";
+
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message