hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wan...@apache.org
Subject [1/2] hadoop git commit: YARN-6363. Extending SLS: Synthetic Load Generator. (Carlo Curino via wangda)
Date Fri, 21 Apr 2017 04:57:02 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk 667966c13 -> de69d6e81


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
new file mode 100644
index 0000000..3d2ec94
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
@@ -0,0 +1,316 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.sls.synthetic;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.math3.random.JDKRandomGenerator;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.tools.rumen.JobStory;
+import org.apache.hadoop.tools.rumen.JobStoryProducer;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.codehaus.jackson.JsonParser.Feature.INTERN_FIELD_NAMES;
+import static org.codehaus.jackson.map.DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES;
+
+/**
+ * This is a JobStoryProducer that operates from distribution of different
+ * workloads. The .json input file is used to determine how many jobs, which
+ * size, number of maps/reducers and their duration, as well as the temporal
+ * distributed of submissions. For each parameter we control avg and stdev, and
+ * generate values via normal or log-normal distributions.
+ */
+public class SynthTraceJobProducer implements JobStoryProducer {
+
+  @SuppressWarnings("StaticVariableName")
+  private static final Log LOG = LogFactory.getLog(SynthTraceJobProducer.class);
+
+  private final Configuration conf;
+  private final AtomicInteger numJobs;
+  private final Trace trace;
+  private final long seed;
+
+  private int totalWeight;
+  private final List<Double> weightList;
+  private final Map<Integer, SynthWorkload> workloads;
+
+  private final Queue<StoryParams> listStoryParams;
+
+  private final JDKRandomGenerator rand;
+
+  public static final String SLS_SYNTHETIC_TRACE_FILE =
+      "sls.synthetic" + ".trace_file";
+
+  public SynthTraceJobProducer(Configuration conf) throws IOException {
+    this(conf, new Path(conf.get(SLS_SYNTHETIC_TRACE_FILE)));
+  }
+
+  public SynthTraceJobProducer(Configuration conf, Path path)
+      throws IOException {
+
+    LOG.info("SynthTraceJobProducer");
+
+    this.conf = conf;
+    this.rand = new JDKRandomGenerator();
+    workloads = new HashMap<Integer, SynthWorkload>();
+    weightList = new ArrayList<Double>();
+
+    ObjectMapper mapper = new ObjectMapper();
+    mapper.configure(INTERN_FIELD_NAMES, true);
+    mapper.configure(FAIL_ON_UNKNOWN_PROPERTIES, false);
+
+    FileSystem ifs = path.getFileSystem(conf);
+    FSDataInputStream fileIn = ifs.open(path);
+
+    this.trace = mapper.readValue(fileIn, Trace.class);
+    seed = trace.rand_seed;
+    rand.setSeed(seed);
+
+    this.numJobs = new AtomicInteger(trace.num_jobs);
+
+    for (int workloadId = 0; workloadId < trace.workloads
+        .size(); workloadId++) {
+      SynthWorkload workload = new SynthWorkload(workloadId, trace);
+      for (int classId =
+          0; classId < trace.workloads.get(workloadId).job_classes
+              .size(); classId++) {
+        SynthJobClass cls = new SynthJobClass(rand, trace, workload, classId);
+        workload.add(cls);
+      }
+      workloads.put(workloadId, workload);
+    }
+
+    for (int i = 0; i < workloads.size(); i++) {
+      double w = workloads.get(i).getWorkloadWeight();
+      totalWeight += w;
+      weightList.add(w);
+    }
+
+    // create priority queue to keep start-time sorted
+    listStoryParams =
+        new PriorityQueue<StoryParams>(10, new Comparator<StoryParams>() {
+          @Override
+          public int compare(StoryParams o1, StoryParams o2) {
+            return Math
+                .toIntExact(o2.actualSubmissionTime - o1.actualSubmissionTime);
+          }
+        });
+
+    // initialize it
+    createStoryParams();
+    LOG.info("Generated " + listStoryParams.size() + " deadlines for "
+        + this.numJobs.get() + " jobs ");
+  }
+
+  public long getSeed() {
+    return seed;
+  }
+
+  public int getNodesPerRack() {
+    return trace.nodes_per_rack;
+  }
+
+  public int getNumNodes() {
+    return trace.num_nodes;
+  }
+
+  /**
+   * Class used to parse a trace configuration file.
+   */
+  @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
+  @XmlRootElement
+  public static class Trace {
+    @JsonProperty("description")
+    String description;
+    @JsonProperty("num_nodes")
+    int num_nodes;
+    @JsonProperty("nodes_per_rack")
+    int nodes_per_rack;
+    @JsonProperty("num_jobs")
+    int num_jobs;
+
+    // in sec (selects a portion of time_distribution
+    @JsonProperty("rand_seed")
+    long rand_seed;
+    @JsonProperty("workloads")
+    List<Workload> workloads;
+
+  }
+
+  /**
+   * Class used to parse a workload from file.
+   */
+  @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
+  public static class Workload {
+    @JsonProperty("workload_name")
+    String workload_name;
+    // used to change probability this workload is picked for each job
+    @JsonProperty("workload_weight")
+    double workload_weight;
+    @JsonProperty("queue_name")
+    String queue_name;
+    @JsonProperty("job_classes")
+    List<JobClass> job_classes;
+    @JsonProperty("time_distribution")
+    List<TimeSample> time_distribution;
+  }
+
+  /**
+   * Class used to parse a job class from file.
+   */
+  @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
+  public static class JobClass {
+
+    @JsonProperty("class_name")
+    String class_name;
+    @JsonProperty("user_name")
+    String user_name;
+
+    // used to change probability this class is chosen
+    @JsonProperty("class_weight")
+    double class_weight;
+
+    // reservation related params
+    @JsonProperty("chance_of_reservation")
+    double chance_of_reservation;
+    @JsonProperty("deadline_factor_avg")
+    double deadline_factor_avg;
+    @JsonProperty("deadline_factor_stddev")
+    double deadline_factor_stddev;
+
+    // durations in sec
+    @JsonProperty("dur_avg")
+    double dur_avg;
+    @JsonProperty("dur_stddev")
+    double dur_stddev;
+    @JsonProperty("mtime_avg")
+    double mtime_avg;
+    @JsonProperty("mtime_stddev")
+    double mtime_stddev;
+    @JsonProperty("rtime_avg")
+    double rtime_avg;
+    @JsonProperty("rtime_stddev")
+    double rtime_stddev;
+
+    // number of tasks
+    @JsonProperty("mtasks_avg")
+    double mtasks_avg;
+    @JsonProperty("mtasks_stddev")
+    double mtasks_stddev;
+    @JsonProperty("rtasks_avg")
+    double rtasks_avg;
+    @JsonProperty("rtasks_stddev")
+    double rtasks_stddev;
+
+    // memory in MB
+    @JsonProperty("map_max_memory_avg")
+    long map_max_memory_avg;
+    @JsonProperty("map_max_memory_stddev")
+    double map_max_memory_stddev;
+    @JsonProperty("reduce_max_memory_avg")
+    long reduce_max_memory_avg;
+    @JsonProperty("reduce_max_memory_stddev")
+    double reduce_max_memory_stddev;
+
+    // vcores
+    @JsonProperty("map_max_vcores_avg")
+    long map_max_vcores_avg;
+    @JsonProperty("map_max_vcores_stddev")
+    double map_max_vcores_stddev;
+    @JsonProperty("reduce_max_vcores_avg")
+    long reduce_max_vcores_avg;
+    @JsonProperty("reduce_max_vcores_stddev")
+    double reduce_max_vcores_stddev;
+
+  }
+
+  /**
+   * This is used to define time-varying probability of a job start-time (e.g.,
+   * to simulate daily patterns).
+   */
+  @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
+  public static class TimeSample {
+    // in sec
+    @JsonProperty("time")
+    int time;
+    @JsonProperty("weight")
+    double jobs;
+  }
+
+  static class StoryParams {
+    private SynthJobClass pickedJobClass;
+    private long actualSubmissionTime;
+
+    StoryParams(SynthJobClass pickedJobClass, long actualSubmissionTime) {
+      this.pickedJobClass = pickedJobClass;
+      this.actualSubmissionTime = actualSubmissionTime;
+    }
+  }
+
+
+  void createStoryParams() {
+
+    for (int i = 0; i < numJobs.get(); i++) {
+      int workload = SynthUtils.getWeighted(weightList, rand);
+      SynthWorkload pickedWorkload = workloads.get(workload);
+      long jobClass =
+          SynthUtils.getWeighted(pickedWorkload.getWeightList(), rand);
+      SynthJobClass pickedJobClass =
+          pickedWorkload.getClassList().get((int) jobClass);
+      long actualSubmissionTime = pickedWorkload.getBaseSubmissionTime(rand);
+      // long actualSubmissionTime = (i + 1) * 10;
+      listStoryParams
+          .add(new StoryParams(pickedJobClass, actualSubmissionTime));
+    }
+  }
+
+  @Override
+  public JobStory getNextJob() throws IOException {
+    if (numJobs.decrementAndGet() < 0) {
+      return null;
+    }
+    StoryParams storyParams = listStoryParams.poll();
+    return storyParams.pickedJobClass.getJobStory(conf,
+        storyParams.actualSubmissionTime);
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public String toString() {
+    return "SynthTraceJobProducer [ conf=" + conf + ", numJobs=" + numJobs
+        + ", weightList=" + weightList + ", r=" + rand + ", totalWeight="
+        + totalWeight + ", workloads=" + workloads + "]";
+  }
+
+  public int getNumJobs() {
+    return trace.num_jobs;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthUtils.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthUtils.java
new file mode 100644
index 0000000..a7f8c7f
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthUtils.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.sls.synthetic;
+
+import org.apache.commons.math3.distribution.LogNormalDistribution;
+import org.apache.commons.math3.distribution.NormalDistribution;
+import org.apache.commons.math3.random.JDKRandomGenerator;
+
+import java.util.Collection;
+import java.util.Random;
+
+/**
+ * Utils for the Synthetic generator.
+ */
+public final class SynthUtils {
+
+  private SynthUtils(){
+    //class is not meant to be instantiated
+  }
+
+  public static int getWeighted(Collection<Double> weights, Random rr) {
+
+    double totalWeight = 0;
+    for (Double i : weights) {
+      totalWeight += i;
+    }
+
+    double rand = rr.nextDouble() * totalWeight;
+
+    double cur = 0;
+    int ind = 0;
+    for (Double i : weights) {
+      cur += i;
+      if (cur > rand) {
+        break;
+      }
+      ind++;
+    }
+
+    return ind;
+  }
+
+  public static NormalDistribution getNormalDist(JDKRandomGenerator rand,
+      double average, double stdDev) {
+
+    if (average <= 0) {
+      return null;
+    }
+
+    // set default for missing param
+    if (stdDev == 0) {
+      stdDev = average / 6;
+    }
+
+    NormalDistribution ret = new NormalDistribution(average, stdDev,
+        NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
+    ret.reseedRandomGenerator(rand.nextLong());
+    return ret;
+  }
+
+  public static LogNormalDistribution getLogNormalDist(JDKRandomGenerator rand,
+      double mean, double stdDev) {
+
+    if (mean <= 0) {
+      return null;
+    }
+
+    // set default for missing param
+    if (stdDev == 0) {
+      stdDev = mean / 6;
+    }
+
+    // derive lognormal parameters for X = LogNormal(mu, sigma)
+    // sigma^2 = ln (1+Var[X]/(E[X])^2)
+    // mu = ln(E[X]) - 1/2 * sigma^2
+    double var = stdDev * stdDev;
+    double sigmasq = Math.log1p(var / (mean * mean));
+    double sigma = Math.sqrt(sigmasq);
+    double mu = Math.log(mean) - 0.5 * sigmasq;
+
+    LogNormalDistribution ret = new LogNormalDistribution(mu, sigma,
+        LogNormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
+    ret.reseedRandomGenerator(rand.nextLong());
+    return ret;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java
new file mode 100644
index 0000000..9e5fd4e
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.sls.synthetic;
+
+import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer.Trace;
+
+import java.util.*;
+
+/**
+ * This class represent a workload (made up of multiple SynthJobClass(es)). It
+ * also stores the temporal distributions of jobs in this workload.
+ */
+public class SynthWorkload {
+
+  private final int id;
+  private final List<SynthJobClass> classList;
+  private final Trace trace;
+  private final SortedMap<Integer, Double> timeWeights;
+
+  public SynthWorkload(int identifier, Trace inTrace) {
+    classList = new ArrayList<SynthJobClass>();
+    this.id = identifier;
+    this.trace = inTrace;
+    timeWeights = new TreeMap<Integer, Double>();
+    for (SynthTraceJobProducer.TimeSample ts : trace.workloads
+        .get(id).time_distribution) {
+      timeWeights.put(ts.time, ts.jobs);
+    }
+  }
+
+  public boolean add(SynthJobClass s) {
+    return classList.add(s);
+  }
+
+  public List<Double> getWeightList() {
+    ArrayList<Double> ret = new ArrayList<Double>();
+    for (SynthJobClass s : classList) {
+      ret.add(s.getClassWeight());
+    }
+    return ret;
+  }
+
+  public int getId() {
+    return id;
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (!(other instanceof SynthWorkload)) {
+      return false;
+    }
+    // assume ID determines job classes by construction
+    return getId() == ((SynthWorkload) other).getId();
+  }
+
+  @Override
+  public int hashCode() {
+    return getId();
+  }
+
+  @Override
+  public String toString() {
+    return "SynthWorkload " + trace.workloads.get(id).workload_name + "[\n"
+        + classList + "]\n";
+  }
+
+  public String getName() {
+    return trace.workloads.get(id).workload_name;
+  }
+
+  public double getWorkloadWeight() {
+    return trace.workloads.get(id).workload_weight;
+  }
+
+  public String getQueueName() {
+    return trace.workloads.get(id).queue_name;
+  }
+
+  public long getBaseSubmissionTime(Random rand) {
+
+    // pick based on weights the "bucket" for this start time
+    int position = SynthUtils.getWeighted(timeWeights.values(), rand);
+
+    int[] time = new int[timeWeights.keySet().size()];
+    int index = 0;
+    for (Integer i : timeWeights.keySet()) {
+      time[index++] = i;
+    }
+
+    // uniformly pick a time between start and end time of this bucket
+    int startRange = time[position];
+    int endRange = startRange;
+    // if there is no subsequent bucket pick startRange
+    if (position < timeWeights.keySet().size() - 1) {
+      endRange = time[position + 1];
+      return startRange + rand.nextInt((endRange - startRange));
+    } else {
+      return startRange;
+    }
+  }
+
+  public List<SynthJobClass> getClassList() {
+    return classList;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/package-info.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/package-info.java
new file mode 100644
index 0000000..e069610
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Classes comprising the synthetic load generator for SLS.
+ */
+package org.apache.hadoop.yarn.sls.synthetic;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
index 085edc0..eaa59dd 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
@@ -149,4 +149,13 @@ public class SLSUtils {
     }
     return nodeSet;
   }
+
+  public static Set<? extends String> generateNodesFromSynth(
+      int numNodes, int nodesPerRack) {
+    Set<String> nodeSet = new HashSet<String>();
+    for (int i = 0; i < numNodes; i++) {
+      nodeSet.add("/rack" + i % nodesPerRack + "/node" + i);
+    }
+    return nodeSet;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
index dfd872c..f0e3b8c 100644
--- a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
+++ b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
@@ -27,9 +27,11 @@ Yarn Scheduler Load Simulator (SLS)
     * [Metrics](#Metrics)
         * [Real-time Tracking](#Real-time_Tracking)
         * [Offline Analysis](#Offline_Analysis)
+    * [Synthetic Load Generator](#SynthGen)
     * [Appendix](#Appendix)
         * [Resources](#Resources)
         * [SLS JSON input file format](#SLS_JSON_input_file_format)
+        * [SYNTH JSON input file format](#SYNTH_JSON_input_file_format)
         * [Simulator input topology file format](#Simulator_input_topology_file_format)
 
 Overview
@@ -72,7 +74,7 @@ The following figure illustrates the implementation architecture of the simulato
 
 ![The architecture of the simulator](images/sls_arch.png)
 
-The simulator takes input of workload traces, and fetches the cluster and applications information. For each NM and AM, the simulator builds a simulator to simulate their running. All NM/AM simulators run in a thread pool. The simulator reuses Yarn Resource Manager, and builds a wrapper out of the scheduler. The Scheduler Wrapper can track the scheduler behaviors and generates several logs, which are the outputs of the simulator and can be further analyzed.
+The simulator takes input of workload traces, or synthetic load distributions and generaters the cluster and applications information. For each NM and AM, the simulator builds a simulator to simulate their running. All NM/AM simulators run in a thread pool. The simulator reuses Yarn Resource Manager, and builds a wrapper out of the scheduler. The Scheduler Wrapper can track the scheduler behaviors and generates several logs, which are the outputs of the simulator and can be further analyzed.
 
 ### Usecases
 
@@ -179,17 +181,30 @@ The simulator supports two types of input files: the rumen traces and its own in
 
     $ cd $HADOOP_ROOT/share/hadoop/tools/sls
     $ bin/slsrun.sh
-      --input-rumen |--input-sls=<TRACE_FILE1,TRACE_FILE2,...>
-      --output-dir=<SLS_SIMULATION_OUTPUT_DIRECTORY> [--nodes=<SLS_NODES_FILE>]
-        [--track-jobs=<JOBID1,JOBID2,...>] [--print-simulation]
+      Usage: slsrun.sh <OPTIONS>
+                 --tracetype=<SYNTH | SLS | RUMEN>
+                 --tracelocation=<FILE1,FILE2,...>
+                 (deprecated --input-rumen=<FILE1,FILE2,...>  | --input-sls=<FILE1,FILE2,...>)
+                 --output-dir=<SLS_SIMULATION_OUTPUT_DIRECTORY>
+                 [--nodes=<SLS_NODES_FILE>]
+                 [--track-jobs=<JOBID1,JOBID2,...>]
+                 [--print-simulation]
+
 
 *   `--input-rumen`: The input rumen trace files. Users can input multiple
     files, separated by comma. One example trace is provided in
     `$HADOOP_ROOT/share/hadoop/tools/sls/sample-data/2jobs2min-rumen-jh.json`.
+    This is equivalent to `--tracetype=RUMEN --tracelocation=<path_to_trace>`.
 
 *   `--input-sls`: Simulator its own file format. The simulator also
     provides a tool to convert rumen traces to sls traces (`rumen2sls.sh`).
     Refer to appendix for an example of sls input json file.
+    This is equivalent to `--tracetype=SLS --tracelocation=<path_to_trace>`.
+
+*   `--tracetype`: This is the new way to configure the trace generation and
+    takes values RUMEN, SLS, or SYNTH, to trigger the three type of load generation
+
+*   `--tracelocation`: Path to the input file, matching the tracetype above.
 
 *   `--output-dir`: The output directory for generated running logs and
     metrics.
@@ -279,12 +294,34 @@ After the simulator finishes, all logs are saved in the output directory specifi
 
 *   Folder `metrics`: logs generated by the Metrics.
 
+Users can also reproduce those real-time tracking charts in offline mode. Just upload the `realtimetrack.json` to `$HADOOP_ROOT/share/hadoop/tools/sls/html/showSimulationTrace.html`. For browser security problem, need to put files `realtimetrack.json` and `showSimulationTrace.html` in the same directory.
+
+
+Synthetic Load Generator
+------------------------
+The Synthetic Load Generator complements the extensive nature of SLS-native and RUMEN traces, by providing a
+distribution-driven generation of load. The load generator is organized as a JobStoryProducer
+(compatible with rumen, and thus gridmix for later integration). We seed the Random number generator so
+that results randomized but deterministic---hence reproducible.
+We organize the jobs being generated around */workloads/job_class* hierarchy, which allow to easily
+group jobs with similar behaviors and categorize them (e.g., jobs with long running containers, or maponly
+computations, etc..). The user can control average and standard deviations for many of the
+important parameters, such as number of mappers/reducers, duration of mapper/reducers, size
+(mem/cpu) of containers, chance of reservation, etc. We use weighted-random sampling (whenever we
+pick among a small number of options) or LogNormal distributions (to avoid negative values) when we
+pick from wide ranges of values---see appendix on LogNormal distributions.
+
+The SYNTH mode of SLS is very convenient to generate very large loads without the need for extensive input
+files. This allows to easily explore wide range of use cases (e.g., imagine simulating 100k jobs, and in different
+runs simply tune the average number of mappers, or average task duration), in an efficient and compact way.
+
 Appendix
 --------
 
 ### Resources
 
 [YARN-1021](https://issues.apache.org/jira/browse/YARN-1021) is the main JIRA that introduces Yarn Scheduler Load Simulator to Hadoop Yarn project.
+[YARN-6363](https://issues.apache.org/jira/browse/YARN-6363) is the main JIRA that introduces the Synthetic Load Generator to SLS.
 
 ### SLS JSON input file format
 
@@ -339,6 +376,77 @@ Here we provide an example format of the sls json file, which contains 2 jobs. T
       } ]
     }
 
+
+### SYNTH JSON input file format
+Here we provide an example format of the synthetic generator json file. We use *(json-non-conforming)* inline comments to explain the use of each parameter.
+
+    {
+      "description" : "tiny jobs workload",    //description of the meaning of this collection of workloads
+      "num_nodes" : 10,  //total nodes in the simulated cluster
+      "nodes_per_rack" : 4, //number of nodes in each simulated rack
+      "num_jobs" : 10, // total number of jobs being simulated
+      "rand_seed" : 2, //the random seed used for deterministic randomized runs
+
+      // a list of “workloads”, each of which has job classes, and temporal properties
+      "workloads" : [
+        {
+          "workload_name" : "tiny-test", // name of the workload
+          "workload_weight": 0.5,  // used for weighted random selection of which workload to sample from
+          "queue_name" : "sls_queue_1", //queue the job will be submitted to
+
+        //different classes of jobs for this workload
+           "job_classes" : [
+            {
+              "class_name" : "class_1", //name of the class
+              "class_weight" : 1.0, //used for weighted random selection of class within workload
+
+              //nextr group controls average and standard deviation of a LogNormal distribution that
+              //determines the number of mappers and reducers for thejob.
+              "mtasks_avg" : 5,
+              "mtasks_stddev" : 1,
+              "rtasks_avg" : 5,
+              "rtasks_stddev" : 1,
+
+              //averge and stdev input param of LogNormal distribution controlling job duration
+              "dur_avg" : 60,
+              "dur_stddev" : 5,
+
+              //averge and stdev input param of LogNormal distribution controlling mappers and reducers durations
+              "mtime_avg" : 10,
+              "mtime_stddev" : 2,
+              "rtime_avg" : 20,
+              "rtime_stddev" : 4,
+
+              //averge and stdev input param of LogNormal distribution controlling memory and cores for map and reduce
+              "map_max_memory_avg" : 1024,
+              "map_max_memory_stddev" : 0.001,
+              "reduce_max_memory_avg" : 2048,
+              "reduce_max_memory_stddev" : 0.001,
+              "map_max_vcores_avg" : 1,
+              "map_max_vcores_stddev" : 0.001,
+              "reduce_max_vcores_avg" : 2,
+              "reduce_max_vcores_stddev" : 0.001,
+
+              //probability of running this job with a reservation
+              "chance_of_reservation" : 0.5,
+              //input parameters of LogNormal distribution that determines the deadline slack (as a multiplier of job duration)
+              "deadline_factor_avg" : 10.0,
+              "deadline_factor_stddev" : 0.001,
+            }
+           ],
+        // for each workload determines with what probability each time bucket is picked to choose the job starttime.
+        // In the example below the jobs have twice as much chance to start in the first minute than in the second minute
+        // of simulation, and then zero chance thereafter.
+          "time_distribution" : [
+            { "time" : 1, "weight" : 66 },
+            { "time" : 60, "weight" : 33 },
+            { "time" : 120, "jobs" : 0 }
+         ]
+        }
+     ]
+    }
+
+
 ### Simulator input topology file format
 
 Here is an example input topology file which has 3 nodes organized in 1 rack.
@@ -353,3 +461,9 @@ Here is an example input topology file which has 3 nodes organized in 1 rack.
         "node" : "node3"
       }]
     }
+
+### Notes on LogNormal distribution:
+LogNormal distributions represent well many of the parameters we see in practice (e.g., most jobs have
+a small number of mappers, but few might be very large, and few very small, but greater than zero. It is
+however worth noticing that it might be tricky to use, as the average is typically on the right side of the
+peak (most common value) of the distribution, because the distribution has a one-side tail.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
new file mode 100644
index 0000000..8ef72ab
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.sls;
+
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * This is a base class to ease the implementation of SLS-based tests.
+ */
+@RunWith(value = Parameterized.class)
+@NotThreadSafe
+@SuppressWarnings("VisibilityModifier")
+public class BaseSLSRunnerTest {
+
+  @Parameter(value = 0)
+  public String schedulerType;
+
+  @Parameter(value = 1)
+  public String traceType;
+
+  @Parameter(value = 2)
+  public String traceLocation;
+
+  @Parameter(value = 3)
+  public String nodeFile;
+
+  protected SLSRunner sls;
+
+  @After
+  public void tearDown() throws InterruptedException {
+    sls.stop();
+  }
+
+  public void runSLS(Configuration conf, long timeout) throws Exception {
+    File tempDir = new File("target", UUID.randomUUID().toString());
+    final List<Throwable> exceptionList =
+        Collections.synchronizedList(new ArrayList<Throwable>());
+
+    Thread.setDefaultUncaughtExceptionHandler(
+        new Thread.UncaughtExceptionHandler() {
+          @Override
+          public void uncaughtException(Thread t, Throwable e) {
+            e.printStackTrace();
+            exceptionList.add(e);
+          }
+        });
+
+    // start the simulator
+    File slsOutputDir = new File(tempDir.getAbsolutePath() + "/slsoutput/");
+
+    String[] args;
+
+    switch (traceType) {
+    case "OLD_SLS":
+      args = new String[] {"-inputsls", traceLocation, "-output",
+          slsOutputDir.getAbsolutePath()};
+      break;
+    case "OLD_RUMEN":
+      args = new String[] {"-inputrumen", traceLocation, "-output",
+          slsOutputDir.getAbsolutePath()};
+      break;
+    default:
+      args = new String[] {"-tracetype", traceType, "-tracelocation",
+          traceLocation, "-output", slsOutputDir.getAbsolutePath()};
+    }
+
+    if (nodeFile != null) {
+      args = ArrayUtils.addAll(args, new String[] {"-nodes", nodeFile});
+    }
+
+    conf.set(YarnConfiguration.RM_SCHEDULER, schedulerType);
+    sls = new SLSRunner(conf);
+    sls.run(args);
+
+    // wait for timeout seconds before stop, unless there is an uncaught
+    // exception in which
+    // case fail fast.
+    while (timeout >= 0) {
+      Thread.sleep(1000);
+
+      if (!exceptionList.isEmpty()) {
+        sls.stop();
+        Assert.fail("TestSLSRunner catched exception from child thread "
+            + "(TaskRunner.Task): " + exceptionList);
+        break;
+      }
+      timeout--;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
index 9da8ef3..b2bc8d5 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
@@ -18,53 +18,67 @@
 
 package org.apache.hadoop.yarn.sls;
 
-import org.junit.Assert;
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.hadoop.conf.Configuration;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.*;
 
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
+import java.util.*;
 
-public class TestSLSRunner {
+/**
+ * This test performs simple runs of the SLS with different trace types and
+ * schedulers.
+ */
+@RunWith(value = Parameterized.class)
+@NotThreadSafe
+public class TestSLSRunner extends BaseSLSRunnerTest {
 
-  @Test
-  @SuppressWarnings("all")
-  public void testSimulatorRunning() throws Exception {
-    File tempDir = new File("target", UUID.randomUUID().toString());
-    final List<Throwable> exceptionList =
-        Collections.synchronizedList(new ArrayList<Throwable>());
+  @Parameters(name = "Testing with: {1}, {0}, (nodeFile {3})")
+  public static Collection<Object[]> data() {
 
-    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
-      @Override
-      public void uncaughtException(Thread t, Throwable e) {
-        exceptionList.add(e);
-      }
-    });
+    String capScheduler =
+        "org.apache.hadoop.yarn.server.resourcemanager.scheduler."
+            + "capacity.CapacityScheduler";
+    String fairScheduler =
+        "org.apache.hadoop.yarn.server.resourcemanager.scheduler."
+            + "fair.FairScheduler";
+    String slsTraceFile = "src/test/resources/inputsls.json";
+    String rumenTraceFile = "src/main/data/2jobs2min-rumen-jh.json";
+    String synthTraceFile = "src/test/resources/syn.json";
+    String nodeFile = "src/test/resources/nodes.json";
+
+    // Test with both schedulers, and all three load producers.
+    return Arrays.asList(new Object[][] {
 
-    // start the simulator
-    File slsOutputDir = new File(tempDir.getAbsolutePath() + "/slsoutput/");
-    String args[] = new String[]{
-            "-inputrumen", "src/main/data/2jobs2min-rumen-jh.json",
-            "-output", slsOutputDir.getAbsolutePath()};
-    SLSRunner.main(args);
+        // covering old commandline in tests
+        {capScheduler, "OLD_RUMEN", rumenTraceFile, nodeFile },
+        {capScheduler, "OLD_SLS", slsTraceFile, nodeFile },
 
-    // wait for 20 seconds before stop
-    int count = 20;
-    while (count >= 0) {
-      Thread.sleep(1000);
+        // covering the no nodeFile case
+        {capScheduler, "SYNTH", synthTraceFile, null },
+        {capScheduler, "RUMEN", rumenTraceFile, null },
+        {capScheduler, "SLS", slsTraceFile, null },
 
-      if (! exceptionList.isEmpty()) {
-        SLSRunner.getRunner().stop();
-        Assert.fail("TestSLSRunner catched exception from child thread " +
-            "(TaskRunner.Task): " + exceptionList.get(0).getMessage());
-        break;
-      }
-      count--;
-    }
+        // covering new commandline and CapacityScheduler
+        {capScheduler, "SYNTH", synthTraceFile, nodeFile },
+        {capScheduler, "RUMEN", rumenTraceFile, nodeFile },
+        {capScheduler, "SLS", slsTraceFile, nodeFile },
 
-    SLSRunner.getRunner().stop();
+        // covering FairScheduler
+        {fairScheduler, "SYNTH", synthTraceFile, nodeFile },
+        {fairScheduler, "RUMEN", rumenTraceFile, nodeFile },
+        {fairScheduler, "SLS", slsTraceFile, nodeFile }
+    });
+  }
+
+  @Test(timeout = 60000)
+  @SuppressWarnings("all")
+  public void testSimulatorRunning() throws Exception {
+    Configuration conf = new Configuration(false);
+    long timeTillShutdownInsec = 20L;
+    runSLS(conf, timeTillShutdownInsec);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
new file mode 100644
index 0000000..2b1971a
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.sls;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
+import org.apache.hadoop.yarn.sls.synthetic.SynthJob;
+import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Simple test class driving the {@code SynthTraceJobProducer}, and validating
+ * jobs produce are within expected range.
+ */
+public class TestSynthJobGeneration {
+
+  public final static Logger LOG =
+      Logger.getLogger(TestSynthJobGeneration.class);
+
+  @Test
+  public void test() throws IllegalArgumentException, IOException {
+
+    Configuration conf = new Configuration();
+
+    conf.set(SynthTraceJobProducer.SLS_SYNTHETIC_TRACE_FILE,
+        "src/test/resources/syn.json");
+
+    SynthTraceJobProducer stjp = new SynthTraceJobProducer(conf);
+
+    SynthJob js = (SynthJob) stjp.getNextJob();
+
+    int jobCount = 0;
+
+    while (js != null) {
+      LOG.info((jobCount++) + " " + js.getQueueName() + " -- "
+          + js.getJobClass().getClassName() + " (conf: "
+          + js.getJobConf().get(MRJobConfig.QUEUE_NAME) + ") " + " submission: "
+          + js.getSubmissionTime() + ", " + " duration: " + js.getDuration()
+          + " numMaps: " + js.getNumberMaps() + " numReduces: "
+          + js.getNumberReduces());
+
+      validateJob(js);
+      js = (SynthJob) stjp.getNextJob();
+    }
+
+    Assert.assertEquals(stjp.getNumJobs(), jobCount);
+  }
+
+  private void validateJob(SynthJob js) {
+
+    assertTrue(js.getSubmissionTime() > 0);
+    assertTrue(js.getDuration() > 0);
+    assertTrue(js.getNumberMaps() >= 0);
+    assertTrue(js.getNumberReduces() >= 0);
+    assertTrue(js.getNumberMaps() + js.getNumberReduces() > 0);
+    assertTrue(js.getTotalSlotTime() >= 0);
+
+    for (int i = 0; i < js.getNumberMaps(); i++) {
+      TaskAttemptInfo tai = js.getTaskAttemptInfo(TaskType.MAP, i, 0);
+      assertTrue(tai.getRuntime() > 0);
+    }
+
+    for (int i = 0; i < js.getNumberReduces(); i++) {
+      TaskAttemptInfo tai = js.getTaskAttemptInfo(TaskType.REDUCE, i, 0);
+      assertTrue(tai.getRuntime() > 0);
+    }
+
+    if (js.hasDeadline()) {
+      assertTrue(js.getDeadline() > js.getSubmissionTime() + js.getDuration());
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
index ca3d195..56aa219 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
@@ -134,7 +134,7 @@ public class TestAMSimulator {
     String queue = "default";
     List<ContainerSimulator> containers = new ArrayList<>();
     app.init(1, 1000, containers, rm, null, 0, 1000000L, "user1", queue,
-        true, appId);
+        true, appId, null, 0);
     app.firstStep();
 
     verifySchedulerMetrics(appId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/scheduler/TestTaskRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/scheduler/TestTaskRunner.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/scheduler/TestTaskRunner.java
index 23f2bb6..ce6c1b3 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/scheduler/TestTaskRunner.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/scheduler/TestTaskRunner.java
@@ -35,7 +35,7 @@ public class TestTaskRunner {
   }
 
   @After
-  public void cleanUp() {
+  public void cleanUp() throws InterruptedException {
     runner.stop();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/resources/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/capacity-scheduler.xml b/hadoop-tools/hadoop-sls/src/test/resources/capacity-scheduler.xml
index 61be96a..1762265 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/capacity-scheduler.xml
+++ b/hadoop-tools/hadoop-sls/src/test/resources/capacity-scheduler.xml
@@ -39,6 +39,16 @@
   </property>
 
   <property>
+    <name>yarn.scheduler.capacity.root.sls_queue_1.reservable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.sls_queue_1.show-reservations-as-queues</name>
+    <value>true</value>
+  </property>
+
+  <property>
     <name>yarn.scheduler.capacity.root.sls_queue_2.capacity</name>
     <value>25</value>
   </property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/resources/fair-scheduler.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/fair-scheduler.xml b/hadoop-tools/hadoop-sls/src/test/resources/fair-scheduler.xml
index fa10359..7c46767 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/fair-scheduler.xml
+++ b/hadoop-tools/hadoop-sls/src/test/resources/fair-scheduler.xml
@@ -21,6 +21,7 @@
 -->
 
 <allocations>
+  <defaultQueueSchedulingPolicy>drf</defaultQueueSchedulingPolicy>
   <user name="jenkins">
     <!-- Limit on running jobs for the user across all pools. If more
       jobs than this are submitted, only the first <maxRunningJobs> will
@@ -31,20 +32,21 @@
   <userMaxAppsDefault>1000</userMaxAppsDefault>
   <queue name="sls_queue_1">
     <minResources>1024 mb, 1 vcores</minResources>
-    <schedulingMode>fair</schedulingMode>
+    <schedulingPolicy>drf</schedulingPolicy>
     <weight>0.25</weight>
     <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
+    <reservation>true</reservation>
   </queue>
   <queue name="sls_queue_2">
     <minResources>1024 mb, 1 vcores</minResources>
-    <schedulingMode>fair</schedulingMode>
+    <schedulingMode>drf</schedulingMode>
     <weight>0.25</weight>
     <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
   </queue>
   <queue name="sls_queue_3">
     <minResources>1024 mb, 1 vcores</minResources>
     <weight>0.5</weight>
-    <schedulingMode>fair</schedulingMode>
+    <schedulingMode>drf</schedulingMode>
     <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
   </queue>
 </allocations>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/resources/inputsls.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/inputsls.json b/hadoop-tools/hadoop-sls/src/test/resources/inputsls.json
new file mode 100644
index 0000000..b9d46a5
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/inputsls.json
@@ -0,0 +1,55 @@
+{
+  "am.type": "mapreduce",
+  "job.start.ms": 0,
+  "job.end.ms": 95375,
+  "job.queue.name": "sls_queue_1",
+  "job.id": "job_1",
+  "job.user": "default",
+  "job.tasks": [
+    {
+      "container.host": "/default-rack/node1",
+      "container.start.ms": 6664,
+      "container.end.ms": 23707,
+      "container.priority": 20,
+      "container.type": "map"
+    },
+    {
+      "container.host": "/default-rack/node3",
+      "container.start.ms": 6665,
+      "container.end.ms": 21593,
+      "container.priority": 20,
+      "container.type": "map"
+    },
+    {
+      "container.host": "/default-rack/node2",
+      "container.start.ms": 68770,
+      "container.end.ms": 86613,
+      "container.priority": 20,
+      "container.type": "map"
+    }
+  ]
+}
+{
+  "am.type": "mapreduce",
+  "job.start.ms": 105204,
+  "job.end.ms": 197256,
+  "job.queue.name": "sls_queue_2",
+  "job.id": "job_2",
+  "job.user": "default",
+  "job.tasks": [
+    {
+      "container.host": "/default-rack/node1",
+      "container.start.ms": 111822,
+      "container.end.ms": 133985,
+      "container.priority": 20,
+      "container.type": "map"
+    },
+    {
+      "container.host": "/default-rack/node2",
+      "container.start.ms": 111788,
+      "container.end.ms": 131377,
+      "container.priority": 20,
+      "container.type": "map"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/resources/nodes.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/nodes.json b/hadoop-tools/hadoop-sls/src/test/resources/nodes.json
new file mode 100644
index 0000000..3039554
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/nodes.json
@@ -0,0 +1,84 @@
+{
+  "rack": "rack1",
+  "nodes": [
+    {
+      "node": "node1"
+    },
+    {
+      "node": "node2"
+    },
+    {
+      "node": "node3"
+    },
+    {
+      "node": "node4"
+    }
+  ]
+}
+{
+  "rack": "rack2",
+  "nodes": [
+    {
+      "node": "node5"
+    },
+    {
+      "node": "node6"
+    },
+    {
+      "node": "node7"
+    },
+    {
+      "node": "node8"
+    }
+  ]
+}
+{
+  "rack": "rack3",
+  "nodes": [
+    {
+      "node": "node9"
+    },
+    {
+      "node": "node10"
+    },
+    {
+      "node": "node11"
+    },
+    {
+      "node": "node12"
+    }
+  ]
+}
+{
+  "rack": "rack4",
+  "nodes": [
+    {
+      "node": "node13"
+    },
+    {
+      "node": "node14"
+    },
+    {
+      "node": "node15"
+    },
+    {
+      "node": "node16"
+    }
+  ]
+}
+{
+  "rack": "rack5",
+  "nodes": [
+    {
+      "node": "node17"
+    },
+    {
+      "node": "node18"
+    },
+    {
+      "node": "node19"
+    },
+    {
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml b/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
index d7acc98..2f076c2 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
+++ b/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
@@ -25,11 +25,11 @@
   <!-- Nodes configuration -->
   <property>
     <name>yarn.sls.nm.memory.mb</name>
-    <value>10240</value>
+    <value>100240</value>
   </property>
   <property>
     <name>yarn.sls.nm.vcores</name>
-    <value>10</value>
+    <value>100</value>
   </property>
   <property>
     <name>yarn.sls.nm.heartbeat.interval.ms</name>
@@ -77,5 +77,5 @@
     <name>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</name>
     <value>org.apache.hadoop.yarn.sls.scheduler.CapacitySchedulerMetrics</value>
   </property>
-  
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/resources/syn.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/syn.json b/hadoop-tools/hadoop-sls/src/test/resources/syn.json
new file mode 100644
index 0000000..8479d23
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/syn.json
@@ -0,0 +1,53 @@
+{
+  "description": "tiny jobs workload",
+  "num_nodes": 20,
+  "nodes_per_rack": 4,
+  "num_jobs": 10,
+  "rand_seed": 2,
+  "workloads": [
+    {
+      "workload_name": "tiny-test",
+      "workload_weight": 0.5,
+      "description": "Sort jobs",
+      "queue_name": "sls_queue_1",
+      "job_classes": [
+        {
+          "class_name": "class_1",
+          "user_name": "foobar",
+          "class_weight": 1.0,
+          "mtasks_avg": 5,
+          "mtasks_stddev": 1,
+          "rtasks_avg": 5,
+          "rtasks_stddev": 1,
+          "dur_avg": 60,
+          "dur_stddev": 5,
+          "mtime_avg": 10,
+          "mtime_stddev": 2,
+          "rtime_avg": 20,
+          "rtime_stddev": 4,
+          "map_max_memory_avg": 1024,
+          "map_max_memory_stddev": 0.001,
+          "reduce_max_memory_avg": 2048,
+          "reduce_max_memory_stddev": 0.001,
+          "map_max_vcores_avg": 1,
+          "map_max_vcores_stddev": 0.001,
+          "reduce_max_vcores_avg": 2,
+          "reduce_max_vcores_stddev": 0.001,
+          "chance_of_reservation": 0.5,
+          "deadline_factor_avg": 10.0,
+          "deadline_factor_stddev": 0.001
+        }
+      ],
+      "time_distribution": [
+        {
+          "time": 1,
+          "weight": 100
+        },
+        {
+          "time": 60,
+          "jobs": 0
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de69d6e8/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml b/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml
index 78aa6f2..7b2e674 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml
+++ b/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml
@@ -17,7 +17,7 @@
 <configuration>
   <property>
     <name>yarn.resourcemanager.scheduler.class</name>
-	  <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
     <!-- <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value> -->
   </property>
 
@@ -79,4 +79,12 @@
     <name>yarn.scheduler.fair.assignmultiple</name>
     <value>true</value>
   </property>
+
+
+  <property>
+    <description>Enable reservation system.</description>
+    <name>yarn.resourcemanager.reservation-system.enable</name>
+    <value>true</value>
+  </property>
+
 </configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message