climate-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jo...@apache.org
Subject [36/56] [partial] gh-pages clean up
Date Tue, 01 Jul 2014 14:50:00 GMT
http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/processing.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/processing.py b/ocw-ui/backend/processing.py
deleted file mode 100644
index 2b70713..0000000
--- a/ocw-ui/backend/processing.py
+++ /dev/null
@@ -1,736 +0,0 @@
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#  contributor license agreements.  See the NOTICE file distributed with
-#  this work for additional information regarding copyright ownership.
-#  The ASF licenses this file to You under the Apache License, Version 2.0
-#  (the "License"); you may not use this file except in compliance with
-#  the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-''' Provides endpoints for running an OCW evaluation. '''
-
-from datetime import timedelta, datetime
-import inspect
-import sys
-import os
-import json
-
-from bottle import Bottle, request, response
-
-from config import WORK_DIR
-
-import ocw.data_source.local as local
-import ocw.data_source.rcmed as rcmed
-import ocw.dataset_processor as dsp
-from ocw.evaluation import Evaluation
-from ocw.dataset import Bounds
-import ocw.metrics as metrics
-import ocw.plotter as plotter
-
-import numpy as np
-
-processing_app = Bottle()
-
-@processing_app.hook('after_request')
-def enable_cors():
-    ''' Allow Cross-Origin Resource Sharing for all URLs. '''
-    response.headers['Access-Control-Allow-Origin'] = '*'
-
-@processing_app.route('/metrics/')
-def retrieve_metrics():
-    ''' Retrieve available metric names.
-
-    **Example Return JSON Format**
-
-    .. sourcecode:: javascript
-
-        {
-            'metrics': [
-                'MetricName1',
-                'MetricName2',
-                ...
-            ]
-        }
-    '''
-    valid_metrics = _get_valid_metric_options().keys()
-    output = json.dumps({'metrics': valid_metrics})
-    response.content_type = 'application/json'
-
-    if request.query.callback:
-        return '%s(%s)' % (request.query.callback, output)
-    return output
-
-@processing_app.route('/run_evaluation/', method='POST')
-def run_evaluation():
-    ''' Run an OCW Evaluation.
-
-    *run_evaluation* expects the Evaluation parameters to be POSTed in
-    the following format.
-
-    .. sourcecode:: javascript
-
-        {
-            reference_dataset: {
-                // Id that tells us how we need to load this dataset.
-                'data_source_id': 1 == local, 2 == rcmed,
-
-                // Dict of data_source specific identifying information.
-                //
-                // if data_source_id == 1 == local:
-                // {
-                //     'id': The path to the local file on the server for loading.
-                //     'var_name': The variable data to pull from the file.
-                //     'lat_name': The latitude variable name.
-                //     'lon_name': The longitude variable name.
-                //     'time_name': The time variable name
-                //     'name': Optional dataset name
-                // }
-                //
-                // if data_source_id == 2 == rcmed:
-                // {
-                //     'dataset_id': The dataset id to grab from RCMED.
-                //     'parameter_id': The variable id value used by RCMED.
-                //     'name': Optional dataset name
-                // }
-                'dataset_info': {..}
-            },
-
-            // The list of target datasets to use in the Evaluation. The data
-            // format for the dataset objects should be the same as the
-            // reference_dataset above.
-            'target_datasets': [{...}, {...}, ...],
-
-            // All the datasets are re-binned to the reference dataset
-            // before being added to an experiment. This step (in degrees)
-            // is used when re-binning both the reference and target datasets.
-            'spatial_rebin_lat_step': The lat degree step. Integer > 0,
-
-            // Same as above, but for lon
-            'spatial_rebin_lon_step': The lon degree step. Integer > 0,
-
-            // The temporal resolution to use when doing a temporal re-bin
-            // This is a timedelta of days to use so daily == 1, monthly is
-            // (1, 31], annual/yearly is (31, 366], and full is anything > 366.
-            'temporal_resolution': Integer in range(1, 999),
-
-            // A list of the metric class names to use in the evaluation. The
-            // names must match the class name exactly.
-            'metrics': [Bias, TemporalStdDev, ...]
-
-            // The bounding values used in the Evaluation. Note that lat values
-            // should range from -180 to 180 and lon values from -90 to 90.
-            'start_time': start time value in the format '%Y-%m-%d %H:%M:%S',
-            'end_time': end time value in the format '%Y-%m-%d %H:%M:%S',
-            'lat_min': The minimum latitude value,
-            'lat_max': The maximum latitude value,
-            'lon_min': The minimum longitude value,
-            'lon_max': The maximum longitude value,
-
-            // NOTE: At the moment, subregion support is fairly minimal. This
-            // will be addressed in the future. Ideally, the user should be able
-            // to load a file that they have locally. That would change the
-            // format that this data is passed.
-            'subregion_information': Path to a subregion file on the server.
-        }
-    '''
-    # TODO: validate input parameters and return an error if not valid
-
-    eval_time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
-    data = request.json
-
-    eval_bounds = {
-        'start_time': datetime.strptime(data['start_time'], '%Y-%m-%d %H:%M:%S'),
-        'end_time': datetime.strptime(data['end_time'], '%Y-%m-%d %H:%M:%S'),
-        'lat_min': float(data['lat_min']),
-        'lat_max': float(data['lat_max']),
-        'lon_min': float(data['lon_min']),
-        'lon_max': float(data['lon_max'])
-    }
-
-    # Load all the datasets
-    ref_dataset = _process_dataset_object(data['reference_dataset'], eval_bounds)
-
-    target_datasets = [_process_dataset_object(obj, eval_bounds)
-					   for obj
-					   in data['target_datasets']]
-
-    # Normalize the dataset time values so they break on consistent days of the
-    # month or time of the day, depending on how they will be rebinned.
-    resolution = data['temporal_resolution']
-    time_delta = timedelta(days=resolution)
-
-    time_step = 'daily' if resolution == 1 else 'monthly'
-    ref_dataset = dsp.normalize_dataset_datetimes(ref_dataset, time_step)
-    target_datasets = [dsp.normalize_dataset_datetimes(ds, time_step)
-                       for ds in target_datasets]
-
-    # Subset the datasets
-    start = eval_bounds['start_time']
-    end = eval_bounds['end_time']
-
-    # Normalize all the values to the first of the month if we're not
-    # dealing with daily data. This will ensure that a valid subregion
-    # isn't considered out of bounds do to a dataset's time values
-    # being shifted to the first of the month.
-    if time_step != 'daily':
-        if start.day != 1:
-            day_offset = start.day - 1
-            start -= timedelta(days=day_offset)
-
-        if end.day != 1:
-            day_offset = end.day - 1
-            end -= timedelta(days=day_offset)
-
-    subset = Bounds(eval_bounds['lat_min'],
-                    eval_bounds['lat_max'],
-                    eval_bounds['lon_min'],
-                    eval_bounds['lon_max'],
-                    start,
-                    end)
-
-    ref_dataset = dsp.safe_subset(subset, ref_dataset)
-    target_datasets = [dsp.safe_subset(subset, ds)
-                       for ds
-                       in target_datasets]
-    
-    # Do temporal re-bin based off of passed resolution
-    ref_dataset = dsp.temporal_rebin(ref_dataset, time_delta)
-    target_datasets = [dsp.temporal_rebin(ds, time_delta)
-					   for ds
-					   in target_datasets]
-
-    # Do spatial re=bin based off of reference dataset + lat/lon steps
-    lat_step = data['spatial_rebin_lat_step']
-    lon_step = data['spatial_rebin_lon_step']
-    lat_bins, lon_bins = _calculate_new_latlon_bins(eval_bounds,
-													lat_step,
-													lon_step)
-
-    ref_dataset = dsp.spatial_regrid(ref_dataset, lat_bins, lon_bins)
-    target_datasets =  [dsp.spatial_regrid(ds, lat_bins, lon_bins)
-						for ds
-						in target_datasets]
-
-    # Load metrics
-    loaded_metrics = _load_metrics(data['metrics'])
-
-    # Prime evaluation object with data
-    evaluation = Evaluation(ref_dataset, target_datasets, loaded_metrics)
-
-    # Run evaluation
-    evaluation.run()
-
-    # Plot
-    _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp)
-
-    return json.dumps({'eval_work_dir': eval_time_stamp})
-
-def _process_dataset_object(dataset_object, eval_bounds):
-    ''' Convert an dataset object representation into an OCW Dataset
-
-    The dataset_object must contain two pieces of information. The
-    `data_source_id` tells how to load the dataset, and the `dataset_info`
-    contains all the information necessary for the load.
-
-    .. sourcecode: javascript
-
-        // Id that tells us how we need to load this dataset.
-        'data_source_id': 1 == local, 2 == rcmed,
-
-        // Dict of data_source specific identifying information.
-        //
-        // if data_source_id == 1 == local:
-        // {
-        //     'id': The path to the local file on the server for loading.
-        //     'var_name': The variable data to pull from the file.
-        //     'lat_name': The latitude variable name.
-        //     'lon_name': The longitude variable name.
-        //     'time_name': The time variable name
-        //     'name': Optional dataset name
-        // }
-        //
-        // if data_source_id == 2 == rcmed:
-        // {
-        //     'dataset_id': The dataset id to grab from RCMED.
-        //     'parameter_id': The variable id value used by RCMED.
-        //     'name': Optional dataset name
-        // }
-        'dataset_info': {..}
-
-    :param dataset_object: Dataset information of the above form to be
-        loaded into an OCW Dataset object.
-    :type dataset_object: Dictionary
-    :param eval_bounds: The evaluation bounds for this Evaluation. These
-        are needed to load RCMED datasets.
-    :type eval_bounds: Dictionary
-
-    :returns: dataset_object converted to an ocw.Dataset
-
-    :raises KeyError: If dataset_object is malformed and doesn't contain the
-        keys `data_source_id` or `dataset_info`.
-    :raises ValueError: If the data_source_id isn't valid.
-
-    '''
-    source_id = int(dataset_object['data_source_id'])
-    dataset_info = dataset_object['dataset_info']
-
-    # If we should load with local
-    if source_id == 1:
-        return _load_local_dataset_object(dataset_info)
-    # If we should load with RCMED
-    elif source_id == 2:
-        return _load_rcmed_dataset_object(dataset_info, eval_bounds)
-    else:
-        cur_frame = sys._getframe().f_code
-        err = "{}.{}: Invalid data_source_id - {}".format(
-            cur_frame.co_filename,
-            cur_frame.co_name,
-            source_id
-        )
-        raise ValueError(err)
-
-def _load_local_dataset_object(dataset_info):
-    ''' Create an ocw.dataset.Dataset object from supplied data.
-
-    .. note: At the moment, data_source.local cannot take advantage of all the
-        supplied variable names. This functionality will be added in the future.
-        However, in the mean time, it is expected that the dataset_info object
-        still contain all the appropriate keys.
-
-    :param dataset_info: The necessary data to load a local dataset with
-        ocw.data_source.local. Must be of the form:
-        {
-            'dataset_id': The path to the local file for loading,
-            'var_name': The variable data to pull from the file,
-            'lat_name': The latitude variable name,
-            'lon_name': The longitude variable name,
-            'time_name': The time variable name
-            'name': Optional dataset name
-        }
-    :type dataset_info: Dictionary
-
-    :returns: An ocw.dataset.Dataset object containing the requested information.
-
-    :raises KeyError: If the required keys aren't present in the dataset_info.
-    :raises ValueError: If data_source.local could not load the requested file.
-    '''
-    path = dataset_info['dataset_id']
-    var_name = dataset_info['var_name']
-    lat_name = dataset_info['lat_name']
-    lon_name = dataset_info['lon_name']
-    time_name = dataset_info['time_name']
-    # If a name is passed for the dataset, use it. Otherwise, use the file name.
-    name = (dataset_info['name'] 
-            if 'name' in dataset_info.keys() 
-            else path.split('/')[-1])
-
-    dataset =  local.load_file(path, var_name)
-    dataset.name = name
-    return dataset
-
-def _load_rcmed_dataset_object(dataset_info, eval_bounds):
-    ''' Create an ocw.dataset.Dataset object from supplied data.
-
-    :param dataset_info: The necessary data to load a RCMED dataset with
-        ocw.data_source.rcmed. Must be of the form:
-        {
-            'dataset_id': The dataset id to grab from RCMED.
-            'parameter_id': The variable id value used by RCMED.
-            'name': Optional dataset name
-        }
-    :type dataset_info: Dictionary
-
-    :param eval_bounds: The time, lat, and lon bounds values for this Evaluation.
-        Must be of the form:
-        {
-            'start_time': request.query.start_time,
-            'end_time': request.query.end_time,
-            'lat_min': request.query.lat_min,
-            'lat_max': request.query.lat_max,
-            'lon_min': request.query.lon_min,
-            'lon_max': request.query.lon_max
-        }
-    ;type eval_bounds: Dictionary
-
-    :returns: An ocw.dataset.Dataset object containing the requested information.
-
-    :raises KeyError: If the required keys aren't present in the dataset_info or
-        eval_bounds objects.
-    '''
-    dataset = rcmed.parameter_dataset(int(dataset_info['dataset_id']),
-                                      int(dataset_info['parameter_id']),
-                                      eval_bounds['lat_min'],
-                                      eval_bounds['lat_max'],
-                                      eval_bounds['lon_min'],
-                                      eval_bounds['lon_max'],
-                                      eval_bounds['start_time'],
-                                      eval_bounds['end_time'])
-
-    # If a name is passed for the dataset, use it. Otherwise, use the file name.
-    if 'name'in dataset_info.keys():
-        name = dataset_info['name']
-    else:
-        for m in rcmed.get_parameters_metadata():
-            if m['parameter_id'] == str(dataset_info['parameter_id']):
-                name = m['longname']
-                break
-        else:
-            # If we can't find a name for the dataset, default to something...
-            name = "RCMED dataset"
-
-    dataset.name = name
-
-    return dataset
-
-def _calculate_new_latlon_bins(eval_bounds, lat_grid_step, lon_grid_step):
-    ''' Calculate the new lat/lon ranges for spatial re-binning.
-
-    :param eval_bounds: The time and lat/lon bounds for the evaluation.
-        Must be of the form:
-        {
-            'lat_min': request.query.lat_min,
-            'lat_max': request.query.lat_max,
-            'lon_min': request.query.lon_min,
-            'lon_max': request.query.lon_max
-        }
-    :type eval_bounds: Dictionary
-    :param lat_grid_step: The degree step between successive latitude values
-        in the newly created bins.
-    :type lat_grid_step: Integer > 0
-    :param lon_grid_step: The degree step between successive longitude values
-        in the newly created bins.
-    :type lat_grid_step: Integer > 0
-
-    :returns: The new lat/lon value lists as a tuple of the form (new_lats, new_lons)
-    '''
-    new_lats = np.arange(eval_bounds['lat_min'],
-						 eval_bounds['lat_max'],
-						 lat_grid_step)
-    new_lons = np.arange(eval_bounds['lon_min'],
-						 eval_bounds['lon_max'],
-						 lon_grid_step)
-    return (new_lats, new_lons)
-
-def _load_metrics(metric_names):
-    ''' Load and create an instance of each requested metric.
-
-    :param metric_names: The names of the metrics that should be loaded and
-        instantiated from ocw.metrics for use in an evaluation.
-    :type metric_names: List
-
-    :returns: A List of Metric objects
-
-    :raises ValueError: If a metric name cannot be matched.
-    '''
-    instantiated_metrics = []
-    metrics_map = _get_valid_metric_options()
-    possible_metrics = metrics_map.keys()
-
-    for metric in metric_names:
-        if metric not in possible_metrics:
-            cur_frame = sys._getframe().f_code
-            err = "{}.{}: Invalid metric name - {}".format(
-            cur_frame.co_filename,
-            cur_frame.co_name,
-            metric
-            )
-            raise ValueError(err)
-
-        instantiated_metrics.append(metrics_map[metric]())
-
-    return instantiated_metrics
-
-def _get_valid_metric_options():
-    ''' Get valid metric options from the ocw.metrics module.
-
-    :returns: A dictionary of metric (name, object) pairs
-    '''
-    invalid_metrics = ['ABCMeta', 'Metric', 'UnaryMetric', 'BinaryMetric']
-
-    # Consider all Unary Metrics invalid. At the moment, the UI cannot handle
-    # running Unary Metrics.
-    unary_metrics = [cls.__name__ for cls in metrics.UnaryMetric.__subclasses__()]
-    invalid_metrics += unary_metrics
-
-    return {name:obj
-            for name, obj in inspect.getmembers(metrics)
-            if inspect.isclass(obj) and name not in invalid_metrics}
-
-def _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp):
-    ''' Generate the Evaluation's plots
-
-    .. note: This doesn't support graphing evaluations with subregion data.
-
-    :param evaluation: A run Evaluation for which to generate plots.
-    :type evaluation: ocw.evaluation.Evaluation
-    :param lat_bins: The latitude bin values used in the evaluation.
-    :type lat_bins: List
-    :param lon_bins: The longitude bin values used in the evaluation.
-    :type lon_bins: List
-    :param eval_time_stamp: The time stamp for the directory where
-        evaluation results should be saved.
-    :type eval_time_stamp: Time stamp of the form '%Y-%m-%d_%H-%M-%S'
-
-    :raises ValueError: If there aren't any results to graph.
-    '''
-    # Create time stamp version-ed WORK_DIR for plotting
-    eval_path = os.path.join(WORK_DIR, eval_time_stamp)
-    os.makedirs(eval_path)
-
-    # TODO: Should be able to check for None here...
-    if evaluation.results == [] and evaluation.unary_results == []:
-        cur_frame = sys._getframe().f_code
-        err = "{}.{}: No results to graph".format(cur_frame.co_filename,
-												  cur_frame.co_name)
-        raise ValueError(err)
-
-    if evaluation.ref_dataset:
-        grid_shape_dataset = evaluation.ref_dataset
-    else:
-        grid_shape_dataset = evaluation.target_datasets[0]
-
-    grid_shape = _calculate_grid_shape(grid_shape_dataset)
-
-    if evaluation.results != []:
-        for dataset_index, dataset in enumerate(evaluation.target_datasets):
-            for metric_index, metric in enumerate(evaluation.metrics):
-                results = evaluation.results[dataset_index][metric_index]
-                file_name = _generate_binary_eval_plot_file_path(evaluation,
-																 dataset_index,
-																 metric_index,
-                                                                 eval_time_stamp)
-                plot_title = _generate_binary_eval_plot_title(evaluation,
-															  dataset_index,
-															  metric_index)
-                plotter.draw_contour_map(results,
-										 lat_bins,
-										 lon_bins,
-										 fname=file_name,
-										 ptitle=plot_title,
-                                         gridshape=grid_shape)
-
-    if evaluation.unary_results != []:
-        for metric_index, metric in enumerate(evaluation.unary_metrics):
-			cur_unary_results = evaluation.unary_results[metric_index]
-			for result_index, result in enumerate(cur_unary_results):
-				file_name = _generate_unary_eval_plot_file_path(evaluation,
-																result_index,
-																metric_index,
-                                                                eval_time_stamp)
-				plot_title = _generate_unary_eval_plot_title(evaluation,
-															 result_index,
-															 metric_index)
-
-				plotter.draw_contrough_map(results,
-										   lat_bins,
-										   lon_bins,
-										   fname=file_name,
-										   ptitle=plot_title,
-                                           gridshape=grid_shape)
-
-def _calculate_grid_shape(reference_dataset, max_cols=6):
-    ''' Calculate the plot grid shape given a reference dataset. 
-
-    :param reference_dataset: The dataset from which to strip out temporal
-        bin information and calculate grid shape.
-    :type reference_dataset: ocw.dataset.Dataset
-    :param max_cols: The maximum number of columns in the calculated grid shape.
-        Note that the calculated shape with always have max_cols as its column
-        count.
-    :type max_cols: Integer > 0
-
-    :returns: The grid shape to use as (num_rows, num_cols)
-    '''
-    total_temporal_bins = reference_dataset.values.shape[0]
-    temporal_bins = total_temporal_bins
-
-    num_rows = 1
-    while temporal_bins > max_cols:
-        temporal_bins -= max_cols
-        num_rows += 1
-
-    return _balance_grid_shape(total_temporal_bins, num_rows, max_cols)
-    #return (num_rows, max_cols)
-
-def _balance_grid_shape(total_temporal_bins, num_rows, num_cols):
-    ''' Balance grid shape values to prevent large row/col discrepancies.
-
-    Often times _calculate_grid_shape will result in values where there is a
-    large difference between row/column count. This tries to balance out the
-    shape so that it is as square as possible.
-
-    :param total_temporal_bins: The total number of bins that the shape must
-        fit.
-    :type total_temporal_bins: Integer >= 1
-    :params num_rows: The number of rows.
-    :type num_rows: Integer >= 1
-    :params num_cols: The number of columns.
-    :type num_cols: Integer >= 1
-
-    :returns: The adjusted shape values so the difference between the number
-        of rows and the number of columns <= 1.
-    '''
-    while True:
-        if abs(num_rows - num_cols) <= 1:
-            # We might be able to reduce both.
-            if total_temporal_bins < (num_rows - 1) * (num_cols - 1):
-                num_rows -= 1
-                num_cols -= 1
-            # If not, then we're nearly done.
-            else:
-                # We might end up with a grid that is slightly too large. We
-                # tend to favor larger column numbers rather than rows, so we'll
-                # try to drop another column to get a tighter grid.
-                if total_temporal_bins <= num_rows * (num_cols - 1):
-                    num_cols -= 1
-
-                # Favor more columns or more rows in the final layout.
-                if num_rows > num_cols:
-                    num_rows, num_cols = num_cols, num_rows
-
-                break
-        else:
-            if num_rows > num_cols:
-                # When we have a delta >= 2, first we try to drop just one of the values.
-                if total_temporal_bins < (num_rows - 1) * num_cols:
-                        num_rows -= 1
-                # In certain cases we can't just drop a value yet we still have a delta
-                # that is >= 2. In that situation we need to trade a value between them.
-                elif total_temporal_bins < (num_rows - 1) * (num_cols + 1):
-                    num_rows -= 1
-                    num_cols += 1
-            else:
-                if total_temporal_bins < num_rows * (num_cols - 1):
-                        num_cols -= 1
-                elif total_temporal_bins < (num_rows + 1) * (num_cols - 1):
-                    num_rows += 1
-                    num_cols -= 1
-
-    return (int(num_rows), int(num_cols))
-
-def _generate_binary_eval_plot_file_path(evaluation, dataset_index,
-                                         metric_index, time_stamp):
-    ''' Generate a plot path for a given binary metric run.
-
-    :param evaluation: The Evaluation object from which to pull name information.
-    :type evaluation: ocw.evaluation.Evaluation
-    :param dataset_index: The index of the target dataset to use when
-		generating the name.
-    :type dataset_index: Integer >= 0 < len(evaluation.target_datasets)
-    :param metric_index: The index of the metric to use when generating the name.
-    :type metric_index: Integer >= 0 < len(evaluation.metrics)
-
-    :returns: The full path for the requested metric run. The paths will always
-		be placed in the WORK_DIR set for the web services.
-    '''
-    plot_name = "{}_compared_to_{}_{}".format(
-        evaluation.ref_dataset.name.lower(),
-        evaluation.target_datasets[dataset_index].name.lower(),
-        evaluation.metrics[metric_index].__class__.__name__.lower()
-    )
-
-    timestamped_workdir = os.path.join(WORK_DIR, time_stamp)
-    return os.path.join(timestamped_workdir, plot_name)
-
-def _generate_unary_eval_plot_file_path(evaluation, dataset_index,
-                                        metric_index, time_stamp):
-    ''' Generate a plot path for a given unary metric run.
-
-    :param evaluation: The Evaluation object from which to pull name information.
-    :type evaluation: ocw.evaluation.Evaluation
-    :param dataset_index: The index of the target dataset to use when
-		generating the name.
-    :type dataset_index: Integer >= 0 < len(evaluation.target_datasets)
-    :param metric_index: The index of the metric to use when generating the name.
-    :type metric_index: Integer >= 0 < len(evaluation.metrics)
-
-    :returns: The full path for the requested metric run. The paths will always
-		be placed in the WORK_DIR set for the web services.
-    '''
-    metric = evaluation.unary_metrics[metric_index]
-    timestamped_workdir = os.path.join(WORK_DIR, time_stamp)
-
-    # Unary metrics can be run over both the reference dataset and the target
-    # datasets. It's possible for an evaluation to only have one and not the
-    # other. If there is a reference dataset then the 0th result index refers to
-    # the result of the metric being run on the reference dataset. Any future
-    # indexes into the target dataset list must then be offset by one. If
-    # there's no reference dataset then we don't have to bother with any of this.
-    if evaluation.ref_dataset:
-        if dataset_index == 0:
-            plot_name = "{}_{}".format(
-                evaluation.ref_dataset.name.lower(),
-                metric.__class__.__name__.lower()
-            )
-
-            return os.path.join(timestamped_workdir, plot_name)
-        else:
-            dataset_index -= 1
-
-    plot_name = "{}_{}".format(
-        evaluation.target_datasets[dataset_index].name.lower(),
-        metric.__class__.__name__.lower()
-    )
-
-    return os.path.join(timestamped_workdir, plot_name)
-
-def _generate_binary_eval_plot_title(evaluation, dataset_index, metric_index):
-    ''' Generate a plot title for a given binary metric run.
-
-    :param evaluation: The Evaluation object from which to pull name information.
-    :type evaluation: ocw.evaluation.Evaluation
-    :param dataset_index: The index of the target dataset to use when
-		generating the name.
-    :type dataset_index: Integer >= 0 < len(evaluation.target_datasets)
-    :param metric_index: The index of the metric to use when generating the name.
-    :type metric_index: Integer >= 0 < len(evaluation.metrics)
-
-    :returns: The plot title for the requested metric run.
-    '''
-    return "{} of {} compared to {}".format(
-        evaluation.metrics[metric_index].__class__.__name__,
-        evaluation.ref_dataset.name,
-        evaluation.target_datasets[dataset_index].name
-    )
-
-def _generate_unary_eval_plot_title(evaluation, dataset_index, metric_index):
-    ''' Generate a plot title for a given unary metric run.
-
-    :param evaluation: The Evaluation object from which to pull name information.
-    :type evaluation: ocw.evaluation.Evaluation
-    :param dataset_index: The index of the target dataset to use when
-        generating the name.
-    :type dataset_index: Integer >= 0 < len(evaluation.target_datasets)
-    :param metric_index: The index of the metric to use when generating the name.
-    :type metric_index: Integer >= 0 < len(evaluation.metrics)
-
-    :returns: The plot title for the requested metric run.
-    '''
-
-    # Unary metrics can be run over both the reference dataset and the target
-    # datasets. It's possible for an evaluation to only have one and not the
-    # other. If there is a reference dataset then the 0th result index refers to
-    # the result of the metric being run on the reference dataset. Any future
-    # indexes into the target dataset list must then be offset by one. If
-    # there's no reference dataset then we don't have to bother with any of this.
-    if evaluation.ref_dataset:
-        if dataset_index == 0:
-            return "{} of {}".format(
-                evaluation.unary_metrics[metric_index].__class__.__name__,
-                evaluation.ref_dataset.name
-            )
-        else:
-            dataset_index -= 1
-
-    return "{} of {}".format(
-        evaluation.unary_metrics[metric_index].__class__.__name__,
-        evaluation.target_datasets[dataset_index].name
-    )

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/rcmed_helpers.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/rcmed_helpers.py b/ocw-ui/backend/rcmed_helpers.py
deleted file mode 100644
index 32e1af8..0000000
--- a/ocw-ui/backend/rcmed_helpers.py
+++ /dev/null
@@ -1,166 +0,0 @@
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#  contributor license agreements.  See the NOTICE file distributed with
-#  this work for additional information regarding copyright ownership.
-#  The ASF licenses this file to You under the Apache License, Version 2.0
-#  (the "License"); you may not use this file except in compliance with
-#  the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-''' Services for interacting with NASA JPL's Regional Climate Model Evaluation Database. '''
-
-import ocw.data_source.rcmed as rcmed
-
-from bottle import Bottle, request, response
-
-import requests
-
-rcmed_app = Bottle()
-
-@rcmed_app.route('/datasets/')
-def get_observation_dataset_data():
-    ''' Return a list of dataset information from JPL's RCMED.
-
-    **Example Return JSON Format**
-
-    .. sourcecode:: javascript
-
-        [
-            {
-                "dataset_id": "17",
-                "shortname": "The dataset's short name",
-                "longname": "The dataset's, full name",
-                "source": "Where the dataset originated"
-            },
-            ...
-        ]
-    '''
-    r = requests.get('http://rcmes.jpl.nasa.gov/query-api/datasets.php')
-
-    if (request.query.callback):
-        return "%s(%s)" % (request.query.callback, r.text)
-    return r.text
-
-@rcmed_app.route('/parameters/')
-def get_dataset_parameters():
-    ''' Return dataset specific parameter information from JPL's RCMED.
-
-    **Example Call Format**
-
-    .. sourcecode:: javascript
-
-        /parameters/?dataset=<dataset's short name>
-
-    **Example Return JSON Format**
-
-    .. sourcecode:: javascript
-
-        [
-            {
-                "parameter_id": "80",
-                "shortname": "The dataset's short name",
-                "datasetshortname": "The dataset's short name again",
-                "longname": "The dataset's long name",
-                "units": "Units for the dataset's measurements"
-            }
-        ]
-
-    '''
-    url = 'http://rcmes.jpl.nasa.gov/query-api/parameters.php?dataset=' + request.query.dataset
-    r = requests.get(url)
-
-    if (request.query.callback):
-        return "%s(%s)" % (request.query.callback, r.text)
-    return r.text
-
-
-def extract_bounds(parameter):
-    ''' This will take a parameter dictionary and return the spatial and temporal bounds.
-
-    :param parameter: Single parameter that is returned from rcmed.get_parameters_metadata().
-    :type parameter: dictionary:
-
-    :returns: parameter_id and bounds dictionary of format
-            {
-              "start_date": "1901-01-15",
-              "end_date": "2009-12-15",
-              "lat_max": 89.75,
-              "lat_min": -89.75,
-              "lon_max": 179.75,
-              "lon_min": -179.75
-            }
-    '''
-    bounds_data = {}
-    bounds_data['start_date'] = str(parameter['start_date'])
-    bounds_data['end_date'] = str(parameter['end_date'])
-    spatial_bounds = parameter['bounding_box'].replace('(','').replace(')','')
-    spatial_bounds = spatial_bounds.split(',')
-    # spatial_bounds is in format:
-    # [<lat_max>, <lon_max>, <lat_min>, <lon_max>, <lat_min>, <lon_min>, <lat_max>, <lon_min>]
-    # ['49.875', '179.875', '-49.875', '179.875', '-49.875', '-179.875', '49.875', '-179.875']
-    bounds_data['lat_max'] = float(spatial_bounds[0])
-    bounds_data['lat_min'] = float(spatial_bounds[2])
-    bounds_data['lon_max'] = float(spatial_bounds[1])
-    bounds_data['lon_min'] = float(spatial_bounds[5])
-    param_id =str(parameter['parameter_id'])
-    return param_id, bounds_data
-
-
-@rcmed_app.route('/parameters/bounds/')
-@rcmed_app.route('/parameters/bounds')
-def get_parameters_bounds():
-    ''' Return temporal and spatial bounds metadata for all of JPL's RCMED parameters.
-
-    **Example Call Format**
-
-    .. sourcecode:: javascript
-
-        /parameters/bounds/
-
-    **Example Return JSON Format**
-
-    .. sourcecode:: javascript
-
-        {
-          "38": {
-            "start_date": "1901-01-15",
-            "end_date": "2009-12-15",
-            "lat_max": 89.75,
-            "lat_min": -89.75,
-            "lon_max": 179.75,
-            "lon_min": -179.75
-          },
-          "39": {
-            "start_date": "1901-01-15",
-            "end_date": "2009-12-15",
-            "lat_max": 89.75,
-            "lat_min": -89.75,
-            "lon_max": 179.75,
-            "lon_min": -179.75
-          }
-        }
-
-    '''
-    parameter_bounds = {}
-    raw_parameters = rcmed.get_parameters_metadata()
-    for parameter in raw_parameters:
-        if parameter['bounding_box'] != None:
-            param_id, bounds_data = extract_bounds(parameter)
-            parameter_bounds[param_id] = bounds_data
-
-
-    return parameter_bounds
-
-
-@rcmed_app.hook('after_request')
-def enable_cors():
-    ''' Allow Cross-Origin Resource Sharing for all URLs. '''
-    response.headers['Access-Control-Allow-Origin'] = '*'

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/run_webservices.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/run_webservices.py b/ocw-ui/backend/run_webservices.py
deleted file mode 100644
index 4e50801..0000000
--- a/ocw-ui/backend/run_webservices.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#  contributor license agreements.  See the NOTICE file distributed with
-#  this work for additional information regarding copyright ownership.
-#  The ASF licenses this file to You under the Apache License, Version 2.0
-#  (the "License"); you may not use this file except in compliance with
-#  the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-''' OCW UI Backend web services initialization. '''
-
-from bottle import Bottle, response, static_file
-from local_file_metadata_extractors import lfme_app
-from directory_helpers import dir_app
-from rcmed_helpers import rcmed_app
-from processing import processing_app
-
-app = Bottle()
-app.mount('/lfme/', lfme_app)
-app.mount('/dir/', dir_app)
-app.mount('/rcmed/', rcmed_app)
-app.mount('/processing/', processing_app)
-
-@app.route('/')
-@app.route('/index.html')
-def index():
-    return static_file('index.html', root='./app')
-
-@app.route('/js/:path#.+#')
-def server_static(path):
-    return static_file(path, root='./app/js/')
-
-@app.route('/css/:path#.+#')
-def server_static(path):
-    return static_file(path, root='./app/css/')
-
-@app.route('/img/:path#.+#')
-def server_static(path):
-    return static_file(path, root='./app/img/')
-
-@app.route('/partials/:path#.+#')
-def server_static(path):
-    return static_file(path, root='./app/partials/')
-
-@app.route('/font/:path#.+#')
-def server_static(path):
-    return static_file(path, root='./app/font/')
-
-@app.route('/static/eval_results/<file_path:path>')
-def get_eval_result_image(file_path):
-    ''' Return static file.
-    
-    Return static file specified by root + filepath where root defaults to:
-        /tmp/ocw
-
-    The 'root' path should coincide with the work directory set used by the
-    OCW toolkit for processing.
-
-    :param filepath: The path component that when appended to the 'root' path
-        header specifies a file to return.
-    :type filepath: string
-
-    :returns: The requested file resource
-    '''
-    return static_file(file_path, root="/tmp/ocw")
-
-@app.hook('after_request')
-def enable_cors():
-    ''' Allow Cross-Origin Resource Sharing for all URLs. '''
-    response.headers['Access-Control-Allow-Origin'] = '*'
-
-if __name__ == "__main__":
-    app.run(host='localhost', port=8082)

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/tests/__init__.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/tests/__init__.py b/ocw-ui/backend/tests/__init__.py
deleted file mode 100644
index 90a7953..0000000
--- a/ocw-ui/backend/tests/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-from urllib import urlretrieve
-from ..config import WORK_DIR, PATH_LEADER
-
-FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
-FILE_1 = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
-FILE_2 = "AFRICA_UC-WRF311_CTL_ERAINT_MM_50km-rg_1989-2008_tasmax.nc"
-
-def setup_package():
-    if not os.path.exists('/tmp/d1.nc'):
-        urlretrieve(FILE_LEADER + FILE_1, '/tmp/d1.nc')
-
-    if not os.path.exists('/tmp/d2.nc'):
-        urlretrieve(FILE_LEADER + FILE_2, '/tmp/d2.nc')
-
-    if not os.path.exists(WORK_DIR):
-        os.makedirs(WORK_DIR)
-
-    if not os.path.exists(PATH_LEADER):
-        os.makedirs(PATH_LEADER)

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/tests/example_data/lat_lon_time.nc
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/tests/example_data/lat_lon_time.nc b/ocw-ui/backend/tests/example_data/lat_lon_time.nc
deleted file mode 100644
index 3ca2b69..0000000
Binary files a/ocw-ui/backend/tests/example_data/lat_lon_time.nc and /dev/null differ

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/tests/example_data/lat_lon_time_invalid.nc
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/tests/example_data/lat_lon_time_invalid.nc b/ocw-ui/backend/tests/example_data/lat_lon_time_invalid.nc
deleted file mode 100644
index 4de6b98..0000000
Binary files a/ocw-ui/backend/tests/example_data/lat_lon_time_invalid.nc and /dev/null differ

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/tests/test_directory_helpers.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/tests/test_directory_helpers.py b/ocw-ui/backend/tests/test_directory_helpers.py
deleted file mode 100644
index 8c21e41..0000000
--- a/ocw-ui/backend/tests/test_directory_helpers.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import os
-import re
-import unittest
-from webtest import TestApp
-
-from ..run_webservices import app
-from ..config import WORK_DIR, PATH_LEADER
-from ..directory_helpers import _get_clean_directory_path
-
-test_app = TestApp(app)
-
-class TestDirectoryPathList(unittest.TestCase):
-    @classmethod
-    def setUpClass(self):
-        if not os.path.exists(PATH_LEADER + '/bar'):
-            os.mkdir(PATH_LEADER + '/bar')
-        if not os.path.exists(PATH_LEADER + '/baz.txt'):
-            open(PATH_LEADER + '/baz.txt', 'a').close()
-        if not os.path.exists(PATH_LEADER + '/test.txt'):
-            open(PATH_LEADER + '/test.txt', 'a').close()
-
-    @classmethod
-    def tearDownClass(self):
-        os.remove(PATH_LEADER + '/test.txt')
-        os.remove(PATH_LEADER + '/baz.txt')
-        os.rmdir(PATH_LEADER + '/bar')
-
-    def test_valid_path_listing(self):
-        expected_return = {'listing': ['/bar/', '/baz.txt', '/test.txt']}
-        response = test_app.get('http://localhost:8082/dir/list//')
-        self.assertDictEqual(response.json, expected_return)
-
-    def test_invalid_path_listing(self):
-        expected_return = {'listing': []}
-        response = test_app.get('http://localhost:8082/dir/list//usr/local')
-        self.assertDictEqual(response.json, expected_return)
-
-    def test_nonexistent_path_listing(self):
-        expected_return = {'listing': []}
-        response = test_app.get('http://localhost:8082/dir/list//fake/path')
-        self.assertDictEqual(response.json, expected_return)
-
-class TestResultDirectoryList(unittest.TestCase):
-    def setUp(self):
-        if not os.path.exists(WORK_DIR + '/foo'): os.mkdir(WORK_DIR + '/foo')
-        if not os.path.exists(WORK_DIR + '/bar'): os.mkdir(WORK_DIR + '/bar')
-
-    @classmethod
-    def tearDownClass(self):
-        if os.path.exists(WORK_DIR + '/foo'): os.rmdir(WORK_DIR + '/foo')
-        if os.path.exists(WORK_DIR + '/bar'): os.rmdir(WORK_DIR + '/bar')
-
-    def test_result_listing(self):
-        expected_return = {'listing': ['bar', 'foo']}
-        response = test_app.get('http://localhost:8082/dir/results/')
-        response_json = self.clean_result_listing_json(response.json)
-        self.assertDictEqual(response_json, expected_return)
-
-    def test_missing_work_dir_listing(self):
-        if os.path.exists(WORK_DIR + '/foo'): os.rmdir(WORK_DIR + '/foo')
-        if os.path.exists(WORK_DIR + '/bar'): os.rmdir(WORK_DIR + '/bar')
-
-        expected_return = {'listing': []}
-        response = test_app.get('http://localhost:8082/dir/results/')
-        response_json = self.clean_result_listing_json(response.json)
-        self.assertDictEqual(response_json, expected_return)
-
-    def clean_result_listing_json(self, response_json):
-        # The working directory that is being pulled for results is the actual directory
-        # that OCW uses when running evaluations on the system. It's possible that these
-        # tests are being run on a system where actual results are run. If that's the case,
-        # the listings for actual runs need to be removed before the results are check. The
-        # standard form for a result directory is a timestamp of YYYY-MM-DD_HH-MM-SS.
-        valid_directory = re.compile(r"\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}", re.UNICODE)
-        response_json['listing'] = [folder
-                                    for folder in response_json['listing']
-                                    if not re.search(valid_directory, folder)]
-        return response_json
-
-class TestResultResultRetrieval(unittest.TestCase):
-    @classmethod
-    def setUpClass(self):
-        if not os.path.exists(WORK_DIR + '/foo'): os.mkdir(WORK_DIR + '/foo')
-
-        if not os.path.exists(WORK_DIR + '/foo/baz.txt'):
-            open(WORK_DIR + '/foo/baz.txt', 'a').close()
-        if not os.path.exists(WORK_DIR + '/foo/test.txt'):
-            open(WORK_DIR + '/foo/test.txt', 'a').close()
-
-    @classmethod
-    def tearDownClass(self):
-        os.remove(WORK_DIR + '/foo/baz.txt')
-        os.remove(WORK_DIR + '/foo/test.txt')
-        os.rmdir(WORK_DIR + '/foo')
-
-    def test_no_test_directory_retreival(self):
-        expected_return = {'listing': []}
-        response = test_app.get('http://localhost:8082/dir/results//bar')
-
-        response_json = response.json
-        self.assertDictEqual(response_json, expected_return)
-
-    def test_results_retreival(self):
-        expected_return = {'listing': ['foo/baz.txt', 'foo/test.txt']}
-        response = test_app.get('http://localhost:8082/dir/results//foo')
-        response_json = response.json
-        self.assertDictEqual(response_json, expected_return)
-
-class TestDirectoryPathCleaner(unittest.TestCase):
-    VALID_CLEAN_DIR = os.path.join(PATH_LEADER, 'bar')
-
-    @classmethod
-    def setUpClass(self):
-        if not os.path.exists(self.VALID_CLEAN_DIR): os.mkdir(self.VALID_CLEAN_DIR)
-
-    @classmethod
-    def tearDownClass(self):
-        os.rmdir(self.VALID_CLEAN_DIR)
-
-    def test_valid_directory_path(self):
-        clean_path = _get_clean_directory_path(PATH_LEADER, '/bar')
-        self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
-
-    def test_duplicate_slash_removal(self):
-        clean_path = _get_clean_directory_path(PATH_LEADER, '//bar')
-        self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
-
-        clean_path = _get_clean_directory_path(PATH_LEADER, '/////bar')
-        self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
-
-    def test_relative_path_removal(self):
-        clean_path = _get_clean_directory_path(PATH_LEADER, '/../bar')
-        self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
-
-        clean_path = _get_clean_directory_path(PATH_LEADER, '/./bar')
-        self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
-
-        clean_path = _get_clean_directory_path(PATH_LEADER, '/.././bar')
-        self.assertEquals(clean_path, self.VALID_CLEAN_DIR)

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/tests/test_local_file_metadata_extractors.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/tests/test_local_file_metadata_extractors.py b/ocw-ui/backend/tests/test_local_file_metadata_extractors.py
deleted file mode 100644
index 2e469dd..0000000
--- a/ocw-ui/backend/tests/test_local_file_metadata_extractors.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import os
-from json import loads
-import unittest
-from webtest import TestApp
-
-from ..run_webservices import app
-
-test_app = TestApp(app)
-
-class TestLatLonExtraction(unittest.TestCase):
-    def test_successful_latlon_extract(self):
-        expected_return = {
-            "success": True,
-            "lat_name": "lat",
-            "lon_name": "lon",
-            "lat_min": -45.759998321533203,
-            "lat_max": -45.759998321533203,
-            "lon_min": -24.639999389648438,
-            "lon_max": 60.279998779296875
-        }
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time.nc')
-
-        response = test_app.get('/lfme/list_latlon/' + file_location)
-
-        self.assertDictEqual(expected_return, response.json)
-
-    def test_successful_latlon_extract_jsonp(self):
-        expected_return = {
-            "success": True,
-            "lat_name": "lat",
-            "lon_name": "lon",
-            "lat_min": -45.759998321533203,
-            "lat_max": -45.759998321533203,
-            "lon_min": -24.639999389648438,
-            "lon_max": 60.279998779296875
-        }
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time.nc')
-
-        response = test_app.get('/lfme/list_latlon/' + file_location + '?callback=test_callback')
-
-        json = response.text
-
-        # Strip out the callback functino and the json string from the response
-        # and check for proper content.
-        callback = json[:json.index('(')]
-        json = json[json.index('(') + 1 : json.rindex(')')]
-        json = loads(json)
-
-        self.assertDictEqual(expected_return, json)
-        self.assertEqual(callback, "test_callback")
-
-    def test_failure_latlon_extract(self):
-        expected_return = {
-            "success": False,
-            "variables": ["invalid_lon", "invalid_time", "invalid_lat"]
-        }
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time_invalid.nc')
-
-        response = test_app.get('/lfme/list_latlon/' + file_location)
-
-        self.assertDictEqual(expected_return, response.json)
-
-    def test_failure_latlon_extract_jsonp(self):
-        expected_return = {
-            "success": False,
-            "variables": ["invalid_lon", "invalid_time", "invalid_lat"]
-        }
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time_invalid.nc')
-
-        response = test_app.get('/lfme/list_latlon/' + file_location + '?callback=test_callback')
-        json = response.text
-
-        # Strip out the callback functino and the json string from the response
-        # and check for proper content.
-        callback = json[:json.index('(')]
-        json = json[json.index('(') + 1 : json.rindex(')')]
-        json = loads(json)
-
-        self.assertDictEqual(expected_return, json)
-        self.assertEqual(callback, "test_callback")
-
-class TestTimeExtraction(unittest.TestCase):
-    def test_successful_time_extract(self):
-        expected_return = {
-            "success": True,
-            "time_name": "time",
-            "start_time": "1989-01-01 00:00:00",
-            "end_time": "2008-12-01 00:00:00"
-        }
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time.nc')
-
-        response = test_app.get('/lfme/list_time/' + file_location)
-
-        self.assertDictEqual(expected_return, response.json)
-
-	def test_successful_time_extract_jsonp(self):
-		expected_return = {
-			"success": True,
-			"time_name": "time",
-			"start_time": "1989-01-01 00:00:00",
-			"end_time": "2008-12-01 00:00:00"
-		}
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time.nc')
-
-        response = test_app.get('/lfme/list_time/' + file_location + '?callback=test_callback')
-        json = response.text
-
-        # Strip out the callback functino and the json string from the response
-        # and check for proper content.
-        callback = json[:json.index('(')]
-        json = json[json.index('(') + 1 : json.rindex(')')]
-        json = loads(json)
-
-        self.assertDictEqual(expected_return, json)
-        self.assertEqual(callback, "test_callback")
-
-    def test_failure_time_extract(self):
-        expected_return = {
-            "success": False,
-            "variables": ["invalid_lon", "invalid_time", "invalid_lat"]
-        } 
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time_invalid.nc')
-
-        response = test_app.get('/lfme/list_time/' + file_location)
-
-        self.assertDictEqual(expected_return, response.json)
-
-	def test_failure_time_extract_jsonp(self):
-		expected_return = {
-			"success": False,
-			"variables": ["invalid_lon", "invalid_time", "invalid_lat"]
-		}
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time_invalid.nc')
-
-        response = test_app.get('/lfme/list_time/' + file_location + '?callback=test_callback')
-        json = response.text
-
-        # Strip out the callback functino and the json string from the response
-        # and check for proper content.
-        callback = json[:json.index('(')]
-        json = json[json.index('(') + 1 : json.rindex(')')]
-        json = loads(json)
-
-        self.assertDictEqual(expected_return, json)
-        self.assertEqual(callback, "test_callback")
-
-class TestVariableExtraction(unittest.TestCase):
-    def test_successful_variable_extract(self):
-        expected_return = {
-            'success': True,
-            'variables': ['lat', 'lon', 'time']
-        }
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time.nc')
-
-        response = test_app.get('/lfme/list_vars/' + file_location)
-
-        self.assertDictEqual(expected_return, response.json)
-
-    def test_successful_variable_extract_jsonp(self):
-        expected_return = {
-            'success': True,
-            'variables': ['lat', 'lon', 'time']
-        }
-
-        file_location = os.path.abspath('tests/example_data/lat_lon_time.nc')
-
-        response = test_app.get('/lfme/list_vars/' + file_location + '?callback=test_callback')
-        json = response.text
-
-        # Strip out the callback functino and the json string from the response
-        # and check for proper content.
-        callback = json[:json.index('(')]
-        json = json[json.index('(') + 1 : json.rindex(')')]
-        json = loads(json)
-
-        self.assertDictEqual(expected_return, json)
-        self.assertEqual(callback, "test_callback")
-
-    def test_failure_variable_extract(self):
-        expected_return = {'success': False}
-
-        response = test_app.get('/lfme/list_vars/' + 'fake_path')
-
-        self.assertDictEqual(expected_return, response.json)
-
-    def test_failure_variable_extract_jsonp(self):
-        expected_return = {'success': False}
-
-        response = test_app.get('/lfme/list_vars//fakepath?callback=test_callback')
-        json = response.text
-
-        # Strip out the callback functino and the json string from the response
-        # and check for proper content.
-        callback = json[:json.index('(')]
-        json = json[json.index('(') + 1 : json.rindex(')')]
-        json = loads(json)
-
-        self.assertDictEqual(expected_return, json)
-        self.assertEqual(callback, "test_callback")
-
-if __name__ == '__main__':
-    unittest.main()

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/tests/test_processing.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/tests/test_processing.py b/ocw-ui/backend/tests/test_processing.py
deleted file mode 100644
index cc26b26..0000000
--- a/ocw-ui/backend/tests/test_processing.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import os
-import unittest
-import datetime as dt
-
-from webtest import TestApp
-
-from backend.config import WORK_DIR
-from backend.run_webservices import app
-import backend.processing as bp
-
-import ocw.metrics as metrics
-import ocw.data_source.rcmed as rcmed
-from ocw.dataset import Dataset
-from ocw.evaluation import Evaluation
-
-import numpy
-
-test_app = TestApp(app)
-
-class TestLocalDatasetLoad(unittest.TestCase):
-    def setUp(self):
-        self.dataset_object = {
-            'dataset_id': os.path.abspath('/tmp/d1.nc'),
-            'var_name': 'tasmax',
-            'lat_name': 'lat',
-            'lon_name': 'lon',
-            'time_name': 'time'
-        }
-
-    def test_valid_load(self):
-        dataset = bp._load_local_dataset_object(self.dataset_object)
-        self.assertEqual(dataset.variable, self.dataset_object['var_name'])
-
-    def test_default_name_assignment(self):
-        dataset = bp._load_local_dataset_object(self.dataset_object)
-        self.assertEqual(dataset.name, 'd1.nc')
-
-    def test_custom_name_assignment(self):
-        self.dataset_object['name'] = 'CustomName'
-        dataset = bp._load_local_dataset_object(self.dataset_object)
-        self.assertEqual(dataset.name, self.dataset_object['name'])
-
-class TestDatasetProessingHelper(unittest.TestCase):
-    def test_invalid_process_dataset_objects(self):
-        invalid_dataset_object = {'data_source_id': 3, 'dataset_info': {}}
-        self.assertRaises(
-                ValueError,
-                bp._process_dataset_object,
-                invalid_dataset_object, 'fake parameter')
-
-class TestRCMEDDatasetLoad(unittest.TestCase):
-    def setUp(self):
-        metadata = rcmed.get_parameters_metadata()
-        # Load TRMM from RCMED
-        dataset_dat = [m for m in metadata if m['parameter_id'] == '36'][0]
-
-        self.dataset_info = {
-            'dataset_id': int(dataset_dat['dataset_id']),
-            'parameter_id': int(dataset_dat['parameter_id'])
-        }
-
-        self.eval_bounds = {
-            'start_time': dt.datetime(1998, 02, 01),
-            'end_time': dt.datetime(1998, 03, 01),
-            'lat_min': -10,
-            'lat_max': 10,
-            'lon_min': -15,
-            'lon_max': 15
-        }
-
-    def test_valid_load(self):
-        dataset = bp._load_rcmed_dataset_object(self.dataset_info, self.eval_bounds)
-        lat_min, lat_max, lon_min, lon_max = dataset.spatial_boundaries()
-        start_time, end_time = dataset.time_range()
-
-        self.assertTrue(self.eval_bounds['lat_min'] <= lat_min)
-        self.assertTrue(self.eval_bounds['lat_max'] >= lat_max)
-        self.assertTrue(self.eval_bounds['lon_min'] <= lon_min)
-        self.assertTrue(self.eval_bounds['lon_max'] >= lon_max)
-        self.assertTrue(self.eval_bounds['start_time'] <= start_time)
-        self.assertTrue(self.eval_bounds['end_time'] >= end_time)
-
-    def test_default_name_assignment(self):
-        dataset = bp._load_rcmed_dataset_object(self.dataset_info, self.eval_bounds)
-        self.assertEquals(dataset.name, 'TRMM v.7 Monthly Precipitation')
-
-    def test_custom_name_assignment(self):
-        self.dataset_info['name'] = 'CustomName'
-        dataset = bp._load_rcmed_dataset_object(self.dataset_info, self.eval_bounds)
-        self.assertEquals(dataset.name, self.dataset_info['name'])
-
-
-class TestMetricLoad(unittest.TestCase):
-    def test_get_valid_metric_options(self):
-        metric_map = bp._get_valid_metric_options()
-        bias = metric_map['Bias']()
-        self.assertTrue(isinstance(bias, metrics.Bias))
-
-    def test_valid_metric_load(self):
-        metric_objs = bp._load_metrics(['Bias'])
-        self.assertTrue(isinstance(metric_objs[0], metrics.Bias))
-
-    def test_invalid_metric_load(self):
-        self.assertRaises(ValueError, bp._load_metrics, ['AAA'])
-
-class TestSpatialRebinHelpers(unittest.TestCase):
-    def test_latlon_bin_helper(self):
-        eval_bounds = {
-            'lat_min': -57.2,
-            'lat_max': 58.2,
-            'lon_min': -45.3,
-            'lon_max': 39.2,
-        }
-        lat_step = 1
-        lon_step = 1
-
-        lats = numpy.arange(eval_bounds['lat_min'], eval_bounds['lat_max'])
-        lons = numpy.arange(eval_bounds['lon_min'], eval_bounds['lon_max'])
-
-        new_lats, new_lons = bp._calculate_new_latlon_bins(eval_bounds, lat_step, lon_step)
-
-        self.assertTrue(numpy.array_equal(lats, new_lats))
-        self.assertTrue(numpy.array_equal(lons, new_lons))
-
-class TestCalculateGridShape(unittest.TestCase):
-    def test_grid_shape_calculation(self):
-        ref_dataset = _create_fake_dataset('foo')
-        shape = bp._calculate_grid_shape(ref_dataset, max_cols=3)
-        self.assertEquals(shape, (3, 3))
-
-class TestBalanceGridShape(unittest.TestCase):
-    def test_balance_grid_shape(self):
-        # Test column imbalance
-        self.assertEquals(bp._balance_grid_shape(7, 2, 6), (3, 3))
-        self.assertEquals(bp._balance_grid_shape(7, 2, 4), (3, 3))
-        self.assertEquals(bp._balance_grid_shape(10, 2, 6), (3, 4))
-        self.assertEquals(bp._balance_grid_shape(20, 3, 7), (4, 5))
-
-        # Test row imbalance
-        self.assertEquals(bp._balance_grid_shape(7, 6, 2), (3, 3))
-        self.assertEquals(bp._balance_grid_shape(7, 4, 2), (3, 3))
-        self.assertEquals(bp._balance_grid_shape(10, 6, 2), (3, 4))
-        self.assertEquals(bp._balance_grid_shape(20, 7, 3), (4, 5))
-
-class TestFilePathCreation(unittest.TestCase):
-    def setUp(self):
-        self.full_evaluation = Evaluation(
-            _create_fake_dataset('Ref'),
-            [_create_fake_dataset('T1'), _create_fake_dataset('T2')],
-            [metrics.TemporalStdDev(), metrics.Bias(), metrics.Bias()]
-        )
-
-        self.unary_evaluation = Evaluation(
-            None,
-            [_create_fake_dataset('T1'), _create_fake_dataset('T2')],
-            [metrics.TemporalStdDev()]
-        )
-
-    def test_binary_metric_path_generation(self):
-        time_stamp = dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
-        self.assertEquals(
-            bp._generate_binary_eval_plot_file_path(self.full_evaluation,
-                                                    0, # dataset_index
-                                                    1, # metric_index
-                                                    time_stamp),
-            '/tmp/ocw/{}/ref_compared_to_t1_bias'.format(time_stamp)
-        )
-
-    def test_unary_metric_path_generation_full_eval(self):
-        time_stamp = dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
-        self.assertEquals(
-            bp._generate_unary_eval_plot_file_path(self.full_evaluation,
-                                                   0, # dataset_index
-                                                   0, # metric_index
-                                                   time_stamp),
-            '/tmp/ocw/{}/ref_temporalstddev'.format(time_stamp)
-        )
-
-        self.assertEquals(
-            bp._generate_unary_eval_plot_file_path(self.full_evaluation,
-                                                   1, # dataset_index
-                                                   0, # metric_index
-                                                   time_stamp),
-            '/tmp/ocw/{}/t1_temporalstddev'.format(time_stamp)
-        )
-
-    def test_unary_metric_path_generation_partial_eval(self):
-        time_stamp = dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
-        self.assertEquals(
-            bp._generate_unary_eval_plot_file_path(self.unary_evaluation,
-                                                   0, # dataset_index
-                                                   0, # metric_index
-                                                   time_stamp),
-            '/tmp/ocw/{}/t1_temporalstddev'.format(time_stamp)
-        )
-
-        self.assertEquals(
-            bp._generate_unary_eval_plot_file_path(self.unary_evaluation,
-                                                   1, # dataset_index
-                                                   0, # metric_index
-                                                   time_stamp),
-            '/tmp/ocw/{}/t2_temporalstddev'.format(time_stamp)
-        )
-
-class TestPlotTitleCreation(unittest.TestCase):
-    def setUp(self):
-        self.full_evaluation = Evaluation(
-            _create_fake_dataset('Ref'),
-            [_create_fake_dataset('T1'), _create_fake_dataset('T2')],
-            [metrics.TemporalStdDev(), metrics.Bias(), metrics.Bias()]
-        )
-
-        self.unary_evaluation = Evaluation(
-            None,
-            [_create_fake_dataset('T1'), _create_fake_dataset('T2')],
-            [metrics.TemporalStdDev()]
-        )
-
-    def test_binary_plot_title_generation(self):
-        self.assertEquals(
-            bp._generate_binary_eval_plot_title(self.full_evaluation, 0, 1),
-            'Bias of Ref compared to T1'
-        )
-
-    def test_unary_plot_title_generation_full_eval(self):
-        self.assertEqual(
-            bp._generate_unary_eval_plot_title(self.full_evaluation, 0, 0),
-            'TemporalStdDev of Ref'
-        )
-
-        self.assertEqual(
-            bp._generate_unary_eval_plot_title(self.full_evaluation, 1, 0),
-            'TemporalStdDev of T1'
-        )
-
-	def test_unary_plot_title_generation_partial_eval(self):
-		self.assertEquals(
-            bp._generate_unary_eval_plot_title(self.unary_evaluation, 0, 0),
-            'TemporalStdDev of T1'
-        )
-
-        self.assertEquals(
-            bp._generate_unary_eval_plot_title(self.unary_evaluation, 1, 0),
-            'TemporalStdDev of T2'
-        )
-
-class TestRunEvaluation(unittest.TestCase):
-    def test_full_evaluation(self):
-        data = {
-            'reference_dataset': {
-                'data_source_id': 1,
-                'dataset_info': {
-                    'dataset_id': os.path.abspath('/tmp/d1.nc'),
-                    'var_name': 'tasmax',
-                    'lat_name': 'lat',
-                    'lon_name': 'lon',
-                    'time_name': 'time'
-                }
-            },
-            'target_datasets': [
-                {
-                    'data_source_id': 1,
-                    'dataset_info': {
-                        'dataset_id': os.path.abspath('/tmp/d2.nc'),
-                        'var_name': 'tasmax',
-                        'lat_name': 'lat',
-                        'lon_name': 'lon',
-                        'time_name': 'time'
-                    }
-                }
-            ],
-            'spatial_rebin_lat_step': 1,
-            'spatial_rebin_lon_step': 1,
-            'temporal_resolution': 365,
-            'metrics': ['Bias'],
-            'start_time': '1989-01-01 00:00:00',
-            'end_time': '1991-01-01 00:00:00',
-            'lat_min': -25.0,
-            'lat_max': 22.0,
-            'lon_min': -14.0,
-            'lon_max': 40.0,
-            'subregion_information': None
-        }
-
-        # NOTE: Sometimes the file download will die if you use the this WebTest
-        # call for testing. If that is the case, download the files manually with wget.
-        test_app.post_json('/processing/run_evaluation/', data)
-        result_dirs = [x for x in os.listdir(WORK_DIR)
-                       if os.path.isdir(os.path.join(WORK_DIR, x))]
-
-        eval_dir = os.path.join(WORK_DIR, result_dirs[-1])
-        eval_files = [f for f in os.listdir(eval_dir)
-                      if os.path.isfile(os.path.join(eval_dir, f))]
-
-        self.assertTrue(len(eval_files) == 1)
-        self.assertEquals(eval_files[0], 'd1.nc_compared_to_d2.nc_bias.png')
-
-class TestMetricNameRetrieval(unittest.TestCase):
-    def test_metric_name_retrieval(self):
-        invalid_metrics = ['ABCMeta', 'Metric', 'UnaryMetric', 'BinaryMetric']
-        data = test_app.get('/processing/metrics/').json
-        metrics = data['metrics']
-
-        self.assertTrue(invalid_metrics not in metrics)
-        self.assertTrue(len(metrics) > 0)
-        self.assertTrue('Bias' in metrics)
-
-def _create_fake_dataset(name):
-    lats = numpy.array(range(-10, 25, 1))
-    lons = numpy.array(range(-30, 40, 1))
-    times = numpy.array(range(8))
-    values = numpy.zeros((len(times), len(lats), len(lons)))
-
-    return Dataset(lats, lons, times, values, name=name)

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/backend/tests/test_run_webservices.py
----------------------------------------------------------------------
diff --git a/ocw-ui/backend/tests/test_run_webservices.py b/ocw-ui/backend/tests/test_run_webservices.py
deleted file mode 100644
index 5c01a9e..0000000
--- a/ocw-ui/backend/tests/test_run_webservices.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import os
-import unittest
-from webtest import TestApp
-
-from ..run_webservices import app
-
-test_app = TestApp(app)
-
-class TestStaticEvalResults(unittest.TestCase):
-    @classmethod
-    def setUpClass(self):
-        if not os.path.exists('/tmp/ocw/foo.txt'): 
-            open('/tmp/ocw/foo.txt', 'a').close()
-
-    @classmethod
-    def tearDownClass(self):
-        os.remove('/tmp/ocw/foo.txt')
-
-    def test_static_eval_results_return(self):
-        response = test_app.get('/static/eval_results//foo.txt')
-
-        self.assertEqual(response.status_int, 200)
-
-if __name__ == '__main__':
-    unittest.main()

http://git-wip-us.apache.org/repos/asf/climate/blob/a53e3af5/ocw-ui/frontend/app/css/app.css
----------------------------------------------------------------------
diff --git a/ocw-ui/frontend/app/css/app.css b/ocw-ui/frontend/app/css/app.css
deleted file mode 100755
index 61f461e..0000000
--- a/ocw-ui/frontend/app/css/app.css
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http: *www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
-**/
-body {
-	
-}
-
-#header-img {
-	
-}
-
-#header-title {
-	
-}
-
-#OCW-powered {
-	
-}
-
-#main-container {
-	margin-top: 80px;
-	min-height: 400px;
-	height: auto !important;
-	height: 400px;
-}
-
-#datasetDiv {
-	height: 750px;
-	overflow-y: auto;
-	overflow-x: hidden;
-}
-
-#map { height: 500px; }
-
-/* Small preview map that is displayed alongside dataset information */
-.preview-map {
-	height: 100px;
-	width: 100px;
-}
-
-.small-alert {
-	font-size: 12px;
-	color: green;
-	margin-top: 4px;
-	margin-left: 10px;
-}
-
-.colorSquare {
-	margin-top: 3px;
-	height: 10px;
-	width: 10px;
-}
-
-ul { list-style-type: none; }
-
-.no-color-link { color: #000000; }
-.no-color-link:hover { color: #000000; text-decoration: none; }
-.no-color-link:visited { color: #000000; }
-.no-color-link:active { color: #000000; }
-
-/* Remove the grayed out close button in modal headers */
-.modal-header .close { opacity: 1; }
-
-/* Remove the grayed out close button in modal footers */
-.modal-footer .close { opacity: 1; }
-
-/** 
-  * Timeline 
-  */
-div#OCWtimeline {
-	margin-top: 20px;
-	padding-bottom: 20px;
-}
-
-div.timeline-event {
-	border: none;
-	background: none;
-}
-
-div.timeline-event-content { margin: 0; }
-
-div.ocw-bar { height: 5px; }
-
-/**
-  * Results
-  */
-#results-sidebar {
-	min-height: 400px;
-	height: auto !important;
-	height: 400px;
-}
-
-#results-sidebar-header { font-size: 14px; }


Mime
View raw message