climate-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ah...@apache.org
Subject svn commit: r1520687 - in /incubator/climate/trunk: ocw/ rcmet/src/main/python/rcmes/services/ rcmet/src/main/python/rcmes/toolkit/ rcmet/src/main/python/rcmes/utils/ rcmet/src/main/ui/app/js/controllers/ rcmet/src/main/ui/app/partials/
Date Fri, 06 Sep 2013 20:29:38 GMT
Author: ahart
Date: Fri Sep  6 20:29:37 2013
New Revision: 1520687

URL: http://svn.apache.org/r1520687
Log:
resolve CLIMATE-282: patch applied to trunk

Added:
    incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/metrics_kyo.py
Modified:
    incubator/climate/trunk/ocw/plotter.py
    incubator/climate/trunk/rcmet/src/main/python/rcmes/services/run_rcmes_processing.py
    incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/do_data_prep.py
    incubator/climate/trunk/rcmet/src/main/python/rcmes/utils/misc.py
    incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/ParameterSelectCtrl.js
    incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/RcmedSelectionCtrl.js
    incubator/climate/trunk/rcmet/src/main/ui/app/partials/main.html

Modified: incubator/climate/trunk/ocw/plotter.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw/plotter.py?rev=1520687&r1=1520686&r2=1520687&view=diff
==============================================================================
--- incubator/climate/trunk/ocw/plotter.py (original)
+++ incubator/climate/trunk/ocw/plotter.py Fri Sep  6 20:29:37 2013
@@ -176,6 +176,10 @@ def draw_taylor_diagram(results, names, 
     fig.dpi = 300
     for i, data in enumerate(results):
         rect = gridshape + (i + 1,)
+        # Convert rect to string form as expected by TaylorDiagram constructor
+        rect = str(rect[0]) + str(rect[1]) + str(rect[2])
+
+        # Create Taylor Diagram object
         dia = TaylorDiagram(1, fig=fig, rect=rect, label=refname, radmax=radmax)
         for i, (stddev, corrcoef) in enumerate(data):
             dia.add_sample(stddev, corrcoef, marker='$%d$' % (i + 1), ms=6, 

Modified: incubator/climate/trunk/rcmet/src/main/python/rcmes/services/run_rcmes_processing.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/rcmet/src/main/python/rcmes/services/run_rcmes_processing.py?rev=1520687&r1=1520686&r2=1520687&view=diff
==============================================================================
--- incubator/climate/trunk/rcmet/src/main/python/rcmes/services/run_rcmes_processing.py (original)
+++ incubator/climate/trunk/rcmet/src/main/python/rcmes/services/run_rcmes_processing.py Fri Sep  6 20:29:37 2013
@@ -17,13 +17,26 @@
 #!/usr/local/bin/python
 """Module used to launch the RESTful API"""
 import sys
+import ast
 sys.path.append('../../.')
 from bottle import route, request
+
+from classes import GridBox, JobProperties, Model
+from storage import db
+from storage.rcmed import getParams
+from utils.misc import msg, readSubRegionsFile
+from toolkit.do_data_prep import prep_data
+from toolkit import process
+from toolkit.metrics_kyo import calculate_metrics_and_make_plots
+
+import ConfigParser
 import json
 import cli.do_rcmes_processing_sub as awesome
 import time
 import datetime
 import os
+import numpy as np
+import numpy.ma as ma
 time_format_new = '%Y-%m-%d %H:%M:%S'
 
 #Static Default params
@@ -37,6 +50,14 @@ maskLatMax=0         # float (only used 
 maskLonMin=0         # float (only used if maskOption=1)
 maskLonMax=0         # float (only used if maskOption=1)
 
+# Hard-coded configuration/settings values that 
+# are not yet mapped to UI settings
+precipFlag=False   # 
+maskOption=True    # To match rcmet.py line 221
+spatialGrid='user' # Eventually, use options['regrid']
+gridLonStep=0.5
+gridLatStep=0.5
+
 ###########################################################
 ##OPEN FOR DISCUSSION
 titleOption = 'default'   #this means that ' model vs obs :' will be used
@@ -45,84 +66,280 @@ plotFileNameOption = 'default'  #another
 
 @route('/rcmes/run/')
 def rcmes_go():
+
     print "**********\nBEGIN RCMES2.0_RUN\n**********"
-    print 'cachedir', cachedir
-    print 'workdir', workdir
     evalWorkDir = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
     evalPath = os.path.join( workdir, evalWorkDir )
     os.makedirs(evalPath)
     print 'evalPath', evalPath
     
+    # Attempt to create the cache dir if it does not exist`
     try:
         if not os.path.exists(cachedir):
             os.makedirs(cachedir)
     except Error as e:
         print "I/O error({0}: {1}".format(e.errno, e.strerror)
         sys.exit(1)
+    
+    # Print request variables as received
+    msg('Request Variables', {k:request.query.get(k) for k in request.query.keys()})
 
-    obsDatasetId = int(request.query.get('obsDatasetId', '').strip())
-    print 'obsDatasetId', obsDatasetId
-    obsParameterId = int(request.query.get('obsParameterId', '').strip())
-    print 'obsParameterId', obsParameterId
-
-    #reformat DateTime after pulling it out of the POST
-    POSTstartTime = str(request.query.get('startTime', '').strip())
-    startTime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(POSTstartTime, time_format_new)))
-    print 'startTime', startTime
-    #reformat DateTime after pulling it out of the POST
-    POSTendTime = str(request.query.get('endTime', '').strip())
-    endTime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(POSTendTime, time_format_new)))
-    print 'endTime', endTime
+    # Obtain the observational dataset and parameter ids from the request
+    obsDatasetIds = ast.literal_eval(request.query.get('obsDatasetIds', '[]'))
+    obsParameterIds = ast.literal_eval(request.query.get('obsParameterIds', '[]'))
 
+
+    # Reformat start date/time after pulling it out of the request
+    requestStartTime = str(request.query.get('startTime', '').strip())
+    startTime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(requestStartTime, time_format_new)))
+    
+    # Reformat end date/time after pulling it out of the request
+    requestEndTime = str(request.query.get('endTime', '').strip())
+    endTime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(requestEndTime, time_format_new)))
+
+    # Obtain the geographical boundaries from the request
     latMin = float(request.query.get('latMin', '').strip())
-    print 'latMin', latMin
     latMax = float(request.query.get('latMax', '').strip())
-    print 'latMax', latMax 
     lonMin = float(request.query.get('lonMin', '').strip())
-    print 'lonMin', lonMin
     lonMax = float(request.query.get('lonMax', '').strip())
-    print 'lonMax', lonMax
-
-    filelist = [request.query.get('filelist', '').strip()]
-    print 'filelist', filelist[0]
-
-    modelVarName = str(request.query.get('modelVarName', '').strip())
-    print 'modelVarName', modelVarName
-    precipFlag = request.query.get('precipFlag', '').strip()
-    print 'precipFlag', precipFlag
-    modelTimeVarName = str(request.query.get('modelTimeVarName', '').strip())
-    print 'modelTimeVarName', modelTimeVarName
-    modelLatVarName = str(request.query.get('modelLatVarName', '').strip())
-    print 'modelLatVarName', modelLatVarName
-    modelLonVarName = str(request.query.get('modelLonVarName', '').strip())
-    print 'modelLonVarName', modelLonVarName
 
+    # Obtain the list of model files from the request
+    filelist = ast.literal_eval(request.query.get('filelist', '[]'))
+    
+    # Obtain the evaluation variable name info for each model from the request
+    modelVarNames = ast.literal_eval(request.query.get('modelVarName', '[]'))
+    
+    # Obtain the time variable name info for each model from the request
+    modelTimeVarNames = ast.literal_eval(request.query.get('modelTimeVarName', '[]'))
+    
+    # Obtain the lat variable name info for each model from the request
+    modelLatVarNames = ast.literal_eval(request.query.get('modelLatVarName', '[]'))    
+    modelLonVarNames = ast.literal_eval(request.query.get('modelLonVarName', ''))
+    
+    # Obtain regrid configuration info from the request
     regridOption = str(request.query.get('regridOption', '').strip())
-    print 'regridOption', regridOption
     timeRegridOption = str(request.query.get('timeRegridOption', '').strip())
-    print 'timeRegridOption', timeRegridOption
+   
+    # Parse subregion information from the request
+    subRegionFile = str(request.query.get('subregionFile','').strip())
+   
+    # Parse additional options from the request
     seasonalCycleOption = request.query.get('seasonalCycleOption', '').strip()
-    print 'seasonalCycleOption', seasonalCycleOption
     metricOption = str(request.query.get('metricOption', '').strip())
-    print 'metricOption', metricOption    
     
-    settings = {"cacheDir": cachedir, "workDir": evalPath, "fileList": filelist}
-    params = {"obsDatasetId": obsDatasetId, "obsParamId": obsParameterId, 
-              "startTime": startTime, "endTime": endTime, "latMin": latMin, 
-              "latMax": latMax, "lonMin": lonMin, "lonMax": lonMax}
-    model = {"varName": modelVarName, "timeVariable": modelTimeVarName, 
-             "latVariable": modelLatVarName, "lonVariable": modelLonVarName}
-    mask = {"latMin": latMin, "latMax": latMax, "lonMin": lonMin, "lonMax": lonMax}
-    options = {"regrid": regridOption, "timeRegrid": timeRegridOption, 
-               "seasonalCycle": seasonalCycleOption, "metric": metricOption, 
-               "plotTitle": titleOption, "plotFilename": plotFileNameOption, 
-               "mask": maskOption, "precip": precipFlag}
-    
-    awesome.do_rcmes(settings, params, model, mask, options)
-    
-    model_path = os.path.join(evalPath, plotFileNameOption + "model.png")
-    obs_path = os.path.join(evalPath, plotFileNameOption + "obs.png")
-    comp_path = os.path.join(evalPath, plotFileNameOption + ".png")
+    # Build dicts of extracted request information
+    settings = {"cacheDir": cachedir, "workDir": workdir, "fileList": filelist}
+    params   = {"obsDatasetIds": obsDatasetIds, "obsParamIds": obsParameterIds, 
+                "startTime": startTime, "endTime": endTime, "latMin": latMin, 
+                "latMax": latMax, "lonMin": lonMin, "lonMax": lonMax}
+    models   = {"varNames": modelVarNames, "timeVariables": modelTimeVarNames, 
+                "latVariables": modelLatVarNames, "lonVariables": modelLonVarNames}
+    mask     = {"latMin": latMin, "latMax": latMax, "lonMin": lonMin, 
+                "lonMax": lonMax}
+    options  = {"regrid": regridOption, "timeRegrid": timeRegridOption, 
+                "seasonalCycle": seasonalCycleOption, "metric": metricOption, 
+                "plotTitle": titleOption, "plotFilename": plotFileNameOption, 
+                "mask": maskOption, "precip": precipFlag}
+    
+    # Include optional information
+    options['subRegionFile'] = subRegionFile if subRegionFile != '' else False
+    
+    
+    # Summarize what was extracted from the request
+    msg('Parsed Evaluation Criteria: ---------')
+    msg('Settings',  settings, 2)
+    msg('Parameters',params, 2)
+    msg('Models',    models, 2)
+    msg('Mask',      mask, 2)
+    msg('Options',   options, 2)
+    
+    # Parse the provided subregion data
+    if options['subRegionFile']:
+        # Parse the Config file
+        subRegions = readSubRegionsFile(options['subRegionFile'])
+        msg("Parsed SubRegions", subRegions)
+    
+    # Create a JobProperties object with the information
+    # extracted from the request
+    jobProperties = JobProperties(
+        settings['workDir'],
+        settings['cacheDir'],
+        spatialGrid,
+        options['timeRegrid'],
+        latMin=params['latMin'], # only used if spatial grid ='user'
+        latMax=params['latMax'], # only used if spatial grid ='user'
+        lonMin=params['lonMin'], # only used if spatial grid ='user'
+        lonMax=params['lonMax'], # only used if spatial grid ='user'
+        startDate=params['startTime'].strftime("%Y%m%d"),
+        endDate=params['endTime'].strftime("%Y%m%d"))
+        
+        
+    # Create a GridBox object with the spatial information
+    # extracted from the request
+    gridBox = GridBox(params['latMin'],
+        params['lonMin'],
+        params['latMax'],
+        params['lonMax'],
+        gridLonStep,
+        gridLatStep)
+        
+    # Prepare requested model files
+    eval_models = []
+    for i in xrange(len(settings['fileList'])):
+        # use getModelTimes(modelFile,timeVarName) to generate the 
+        # modelTimeStep and time list
+        _ , timestep = process.getModelTimes(
+            settings['fileList'][i],
+            models['timeVariables'][i])
+            
+        modelInfo = {
+            'filename':     settings['fileList'][i],
+            'latVariable':  models['latVariables'][i],
+            'lonVariable':  models['lonVariables'][i],
+            'timeVariable': models['timeVariables'][i],
+            'varName':      models['varNames'][i],
+            'timeStep':     timestep,
+            'precipFlag':   options['precip']
+        }
+        
+        msg("Built model info dict for {0}".format(settings['fileList'][i]),
+            modelInfo)
+
+        model = Model(**modelInfo)
+        eval_models.append(model)
+        
+    msg("Prepared {0} models".format(len(eval_models)))
+    
+    # Prepare requested observational data
+    # Obtain dataset metadata from RCMED Query Service
+    obs_timesteps = []
+    for i in xrange(len(params['obsDatasetIds'])):
+        dId = params['obsDatasetIds'][i]
+        pId = params['obsParamIds'][i]
+        query_service_url= 'http://rcmes.jpl.nasa.gov/query-api/query.php?datasetId={0}&parameterId={1}'.format(dId,pId)
+        msg("Obtaining dataset information from", query_service_url)
+        obs_timesteps.append(db.get_param_info(query_service_url)[1])
+    
+    # Build dict of dataset metadata
+    obsInfo = {
+        'obsDatasetId': params['obsDatasetIds'],
+        'obsParamId':   params['obsParamIds'],
+        'obsTimeStep':  obs_timesteps
+    }
+    
+    # Get parameter listing from the database
+    params = getParams()
+    
+    # Build the list of observational datasets that exist in RCMED
+    eval_datasets = []
+    for param_id in obsInfo['obsParamId']:
+        for param in params:
+            if param['parameter_id'] == int(param_id):
+                eval_datasets.append(param)
+    
+    
+    msg("Observation Info", obsInfo)
+    msg("Observation Dataset List", eval_datasets)
+
+
+    # At this point, all of the input parameters from the UI have
+    # been pre-processed and formatted. It now remains to invoke
+    # some series of processing steps to generate the requested
+    # output. 
+    #
+    # TODO: Break down the following processing into more modular
+    # discrete processing steps whose composition could then be
+    # determined by providing additional configuration options to
+    # the user via the UI.
+    
+    msg('Evaluation Output: ------------------')
+    numOBSs, numMDLs, nT, ngrdY, ngrdX, Times, lons, lats, obsData, modelData, obsList, mdlName = prep_data(jobProperties,eval_datasets,gridBox,eval_models)
+    
+    msg('numOBSs',numOBSs)
+    msg('numMDLs',numMDLs)
+    msg('nT',nT)
+    msg('ngrdY',ngrdY)
+    msg('ngrdX',ngrdX)
+    msg('Times',Times)
+    msg('lons',lons)
+    msg('lats',lats)
+    msg('obsData Length',len(obsData))
+    msg('modelData Length',len(modelData))
+    msg('obsList',obsList)
+    msg('mdlName',mdlName)
+    
+    # Prepare SubRegion data structures
+    if options['subRegionFile']:
+        numSubRgn = len(subRegions)
+        msg("Processing {0} sub regions...".format(numSubRgn))
+        if numSubRgn > 0:
+            subRgnName = [ x.name   for x in subRegions ]
+            subRgnLon0 = [ x.lonMin for x in subRegions ]
+            subRgnLon1 = [ x.lonMax for x in subRegions ]
+            subRgnLat0 = [ x.latMin for x in subRegions ]
+            subRgnLat1 = [ x.latMax for x in subRegions ]
+            # compute the area-mean timeseries for all subregions.
+            #   the number of subregions is usually small and memory usage 
+            #   is usually not a concern
+            obsRgn = ma.zeros((numOBSs, numSubRgn, nT))
+            mdlRgn = ma.zeros((numMDLs, numSubRgn, nT))
+            
+            print 'Enter area-averaging: mdlData.shape, obsData.shape ', modelData.shape, obsData.shape
+            print 'Use Latitude/Longitude Mask for Area Averaging'
+            for n in np.arange(numSubRgn):
+                # Define mask using regular lat/lon box specified by users ('mask=True' defines the area to be excluded)
+                maskLonMin = subRgnLon0[n] 
+                maskLonMax = subRgnLon1[n]
+                maskLatMin = subRgnLat0[n]
+                maskLatMax = subRgnLat1[n]
+                mask = np.logical_or(np.logical_or(lats <= maskLatMin, lats >= maskLatMax), 
+                                    np.logical_or(lons <= maskLonMin, lons >= maskLonMax))
+                # Calculate area-weighted averages within this region and store in a new list
+                for k in np.arange(numOBSs):           # area-average obs data
+                    Store = []
+                    for t in np.arange(nT):
+                        Store.append(process.calc_area_mean(obsData[k, t, :, :], lats, lons, mymask = mask))
+                    obsRgn[k, n, :] = ma.array(Store[:])
+                for k in np.arange(numMDLs):           # area-average mdl data
+                    Store = []
+                    for t in np.arange(nT):
+                        Store.append(process.calc_area_mean(modelData[k, t, :, :], lats, lons, mymask = mask))
+                    mdlRgn[k, n, :] = ma.array(Store[:])
+                Store = []                               # release the memory allocated by temporary vars
+    
+    # Call combined metrics/plotting function
+    print options['subRegionFile']
+    print options['subRegionFile'] == False
+    calculate_metrics_and_make_plots(
+        models['varNames'][0], # won't work if models have different var names among themselves
+        evalPath,
+        lons, 
+        lats, 
+        obsData, 
+        modelData, 
+        (obsRgn if options['subRegionFile'] != False else None),  # Depends on presence of subregion info
+        (mdlRgn if options['subRegionFile'] != False else None),  # Depends on presence of subregion info
+        obsList, 
+        mdlName, 
+        (True if options['subRegionFile'] != False else False),   # Depends on presence of subregion info
+        (subRgnLon0 if options['subRegionFile'] != False else None), 
+        (subRgnLon1 if options['subRegionFile'] != False else None), 
+        (subRgnLat0 if options['subRegionFile'] != False else None), 
+        (subRgnLat1 if options['subRegionFile'] != False else None))
+        
+    # At this point, all of the plots have been written to various files
+    
+    # Obsolete invocation
+    #awesome.do_rcmes(settings, params, models, mask, options)
+    
+    # Prepare a response with the results of the evaluation
+
+    # TODO: This may be obsolete, if so, it should be removed:
+    msg('Evaluation Complete, Preparing Response...')
+    model_path = os.path.join(workdir, plotFileNameOption + "model.png")
+    obs_path = os.path.join(workdir,   plotFileNameOption + "obs.png")
+    comp_path = os.path.join(workdir,  plotFileNameOption + ".png")
+
 
     product_dict = {'modelPath':model_path,
                     'obsPath': obs_path,

Modified: incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/do_data_prep.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/do_data_prep.py?rev=1520687&r1=1520686&r2=1520687&view=diff
==============================================================================
--- incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/do_data_prep.py (original)
+++ incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/do_data_prep.py Fri Sep  6 20:29:37 2013
@@ -325,7 +325,8 @@ def prep_data(settings, obsDatasetList, 
         # if everything's fine, append the spatially and temporally regridded data in the obs data array (obsData)
         regridMdlData.append(mData)
 
-    modelData = ma.array(regridMdlData)
+    modelData  = ma.array(regridMdlData)
+    modelTimes = newMdlTimes
 
     if (precipFlag == True) & (mvUnit == 'KG M-2 S-1'):
         print 'convert model variable unit from mm/s to mm/day'

Added: incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/metrics_kyo.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/metrics_kyo.py?rev=1520687&view=auto
==============================================================================
--- incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/metrics_kyo.py (added)
+++ incubator/climate/trunk/rcmet/src/main/python/rcmes/toolkit/metrics_kyo.py Fri Sep  6 20:29:37 2013
@@ -0,0 +1,718 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+'''
+Module storing functions to calculate statistical metrics from numpy arrays
+'''
+
+import subprocess
+import os, sys
+import datetime
+import numpy as np
+import numpy.ma as ma
+import scipy.stats as stats
+import matplotlib.pyplot as plt
+from toolkit import plots, process
+from ocw import plotter
+from utils import misc
+from storage import files 
+
+def calcAnnualCycleMeans(dataset1):
+    '''
+    Purpose:: 
+        Calculate annual cycle in terms of monthly means at every grid point.
+
+    Input::
+        dataset1 - 3d numpy array of data in (t,lat,lon) or 1d numpy array timeseries
+
+    Output:: 
+        means - if 3d numpy was entered, 3d (# of months,lat,lon), if 1d data entered
+        it is a timeseries of the data of length # of months
+
+     '''
+    data = misc.reshapeMonthlyData(dataset1)
+    means = data.mean(axis = 0)
+    return data, means
+
+def calcAnnualCycleMeansSubRegion(dataset1):
+    '''
+    Purpose:: 
+        Calculate annual cycle in terms of monthly means at every sub-region.
+
+    Input::
+        dataset1 - 2d numpy array of data in (region, t)
+        
+    Output:: 
+        means - (region, # of months)
+
+     '''
+    nregion, nT = dataset1.shape
+    data = dataset1.reshape([nregion, nT/12, 12])
+    means = data.mean(axis = 1)
+    return data, means
+
+
+def calcClimYear(dataset1):
+    '''
+    Purpose:: 
+       Calculate annual mean timeseries and climatology for both 2-D and point time series.
+
+    Input::
+        dataset1 - 3d numpy array of data in (t,lat,lon) or 1d numpy array timeseries
+
+    Output:: 
+        tSeries - if 3d numpy was entered, 3d (nYr,lat,lon), if 1d data entered
+        it is a timeseries of the data of length nYr
+        means - if 3d numpy was entered, 2d (lat,lon), if 1d data entered
+        it is a floating point number representing the overall mean
+    '''
+    data = misc.reshapeMonthlyData(dataset1)
+    tSeries = data.mean(axis = 1)
+    means = tSeries.mean(axis = 0)
+    return tSeries, means
+
+def calcClimSeason(monthBegin, monthEnd, dataset1):
+    '''
+    Purpose :: 
+       Calculate seasonal mean montheries and climatology for both 3-D and point time series.
+       For example, to calculate DJF mean time series, monthBegin = 12, monthEnd =2 
+       This can handle monthBegin=monthEnd i.e. for climatology of a specific month
+
+    Input::
+        monthBegin - an integer for the beginning month (Jan =1)
+        monthEnd - an integer for the ending month (Jan = 1)
+        dataset1 - 3d numpy array of data in (t,lat,lon) or 1d numpy array montheries
+
+    Output:: 
+        tSeries - if 3d numpy was entered, 3d (number of years/number of years -1 if monthBegin > monthEnd,lat,lon),
+        if 1d data entered it is a montheries of the data of length number of years
+        means - if 3d numpy was entered, 2d (lat,lon), if 1d data entered
+        it is a floating point number representing the overall mean
+    '''
+    if monthBegin > monthEnd:
+        # Offset the original array so that the the first month
+        # becomes monthBegin, note that this cuts off the first year of data
+        offset = slice(monthBegin - 1, monthBegin - 13)
+        data = misc.reshapeMonthlyData(dataset1[offset])
+        monthIndex = slice(0, 13 - monthBegin + monthEnd)
+    else:
+        # Since monthBegin <= monthEnd, just take a slice containing those months
+        data = misc.reshapeMonthlyData(dataset1)
+        monthIndex =  slice(monthBegin - 1, monthEnd)
+        
+    tSeries = data[:, monthIndex].mean(axis = 1)
+    means = tSeries.mean(axis = 0)        
+    return tSeries, means
+
+def calcClimSeasonSubRegion(monthBegin, monthEnd, dataset1):
+    '''
+    Purpose :: 
+       Calculate seasonal mean montheries and climatology for both 3-D and point time series.
+       For example, to calculate DJF mean time series, monthBegin = 12, monthEnd =2 
+       This can handle monthBegin=monthEnd i.e. for climatology of a specific month
+
+    Input::
+        monthBegin - an integer for the beginning month (Jan =1)
+        monthEnd - an integer for the ending month (Jan = 1)
+        dataset1 - 3d numpy array of data in (region, t) or 1d numpy array montheries
+
+    Output:: 
+        tSeries - (region, number of years/number of years -1 if monthBegin > monthEnd,lat,lon),
+        means - (region)
+    '''
+    nregion, nT = dataset1.shape
+    nYR = nT/12
+    if monthBegin > monthEnd:
+        # Offset the original array so that the the first month
+        # becomes monthBegin, note that this cuts off the first year of data
+        offset = slice(monthBegin - 1, monthBegin - 13)
+        data = dataset1[:,offset].reshape([nregion,nYR-1, 12])
+        monthIndex = slice(0, 13 - monthBegin + monthEnd)
+    else:
+        # Since monthBegin <= monthEnd, just take a slice containing those months
+        data = dataset1.reshape([nregion,nYR,12])
+        monthIndex =  slice(monthBegin - 1, monthEnd)
+        
+    tSeries = data[:, :, monthIndex].mean(axis = 2)
+    means = tSeries.mean(axis = 1)        
+    return tSeries, means
+
+def calcAnnualCycleStdev(dataset1):
+    '''
+     Purpose:: 
+        Calculate monthly standard deviations for every grid point
+     
+     Input::
+        dataset1 - 3d numpy array of data in (12* number of years,lat,lon) 
+
+     Output:: 
+        stds - if 3d numpy was entered, 3d (12,lat,lon)
+    '''
+    data = misc.reshapeMonthlyData(dataset1)
+    stds = data.std(axis = 0, ddof = 1)
+    return stds
+
+def calcAnnualCycleStdevSubRegion(dataset1):
+    '''
+     Purpose:: 
+        Calculate monthly standard deviations for every sub-region
+     
+     Input::
+        dataset1 - 2d numpy array of data in (nregion, 12* number of years) 
+
+     Output:: 
+        stds - (nregion, 12)
+    '''
+    nregion, nT = dataset1.shape
+    data = dataset1.reshape([nregion, nT/12, 12])
+    stds = data.std(axis = 1, ddof = 1)
+    return stds
+
+def calcAnnualCycleDomainMeans(dataset1):
+    '''
+     Purpose:: 
+        Calculate spatially averaged monthly climatology and standard deviation
+
+     Input::
+        dataset1 - 3d numpy array of data in (12* number of years,lat,lon) 
+
+     Output::  
+        means - time series (12)
+    '''
+    data = misc.reshapeMonthlyData(dataset1)
+    
+    # Calculate the means, month by month
+    means = np.zeros(12)
+    for i in np.arange(12):
+        means[i] = data[:, i, :, :].mean()
+        
+    return means
+
+def calcSpatialStdevRatio(evaluationData, referenceData):
+    '''
+    Purpose ::
+        Calculate the ratio of spatial standard deviation (model standard deviation)/(observed standard deviation)
+
+    Input ::
+        evaluationData - model data array (lat, lon)
+        referenceData- observation data array (lat,lon)
+
+    Output::
+        ratio of standard deviation (a scholar) 
+    
+    '''
+    stdevRatio = evaluationData[(evaluationData.mask==False) & (referenceData.mask==False)].std()/ \
+                 referenceData[(evaluationData.mask==False) & (referenceData.mask==False)].std()  
+    return stdevRatio
+
+def calcTemporalStdev(dataset1):
+    '''
+     Purpose:: 
+        Calculate sample standard deviations over the time
+
+     Input::
+        dataset1 - 3d numpy array of data in (time,lat,lon) 
+        
+
+     Output::  
+        stds - time series (lat, lon)
+    '''
+    stds = dataset1.std(axis = 0, ddof = 1)
+    return stds
+
+def calcAnnualCycleDomainStdev(dataset1):
+    '''
+     Purpose:: 
+        Calculate sample standard deviations representing the domain in each month
+
+     Input::
+        dataset1 - 3d numpy array of data in (12* number of years,lat,lon) 
+
+     Output::  
+        stds - time series (12)
+    '''
+    data = misc.reshapeMonthlyData(dataset1)
+    
+    # Calculate the standard deviation, month by months
+    stds = np.zeros(12)
+    for i in np.arange(12):
+        stds[i] = data[:, i, :, :].std(ddof = 1)
+        
+    return stds
+
+def calcBiasAveragedOverTime(evaluationData, referenceData, option):        # Mean Bias
+    '''
+    Purpose:: 
+        Calculate the mean difference between two fields over time for each grid point.
+
+     Input::
+        referenceData - array of data 
+        evaluationData - array of data with same dimension of referenceData 
+        option - string indicating absolute values or not
+
+     Output::  
+        bias - difference between referenceData and evaluationData averaged over the first dimension
+    
+    '''
+    # Calculate mean difference between two fields over time for each grid point
+    # Precrocessing of both obs and model data ensures the absence of missing values
+    diff = evaluationData - referenceData
+    if(option == 'abs'): 
+        diff = abs(diff)
+    bias = diff.mean(axis = 0)
+    return bias
+
+
+def calcBiasAveragedOverTimeAndSigLev(evaluationData, referenceData):
+    '''
+    Purpose::
+        Calculate mean difference between two fields over time for each grid point
+    
+    Classify missing data resulting from multiple times (using threshold 
+    data requirement)
+    
+    i.e. if the working time unit is monthly data, and we are dealing with 
+    multiple months of data then when we show mean of several months, we need
+    to decide what threshold of missing data we tolerate before classifying a
+    data point as missing data.
+ 
+        
+     Input::
+        referenceData - array of data 
+        evaluationData - array of data with same dimension of referenceData 
+
+     Output::  
+        bias - difference between referenceData and evaluationData averaged over the first dimension
+        sigLev - significance of the difference (masked array)
+        For example: sig[iy,ix] = 0.95 means that the observation and model is different at 95% confidence level 
+        at X=ix and Y=iy
+    '''
+    # If either gridcell in each data set is missing, set that cell to
+    # missing for the output significance level
+    evaluationDataMask = process.create_mask_using_threshold(evaluationData, threshold = 0.75)
+    referenceDataMask = process.create_mask_using_threshold(referenceData, threshold = 0.75)
+    
+    # The overall mask associated with missing data
+    overallMask = np.logical_or(evaluationDataMask, referenceDataMask)
+    
+    diff = evaluationData - referenceData
+    bias = diff.mean(axis = 0)
+    sigLev = 1 - stats.ttest_rel(evaluationData, referenceData, axis = 0)[1]
+    sigLev[overallMask] = -100.
+    sigLev = ma.masked_equal(sigLev, -100.) 
+    # Set mask for bias metric using missing data in obs or model data series
+    # i.e. if obs contains more than threshold (e.g.50%) missing data 
+    # then classify time average bias as missing data for that location. 
+    bias = ma.masked_array(bias.data, overallMask)
+    return bias, sigLev
+
+
+def calcBiasAveragedOverTimeAndDomain(evaluationData, referenceData):
+    '''
+    Purpose:: 
+        Calculate the mean difference between two fields over time and domain
+     Input::
+        referenceData - array of data 
+        evaluationData - array of data with same dimension of referenceData 
+        
+     Output::  
+        bias - difference between referenceData and evaluationData averaged over time and space
+    
+    '''
+
+    diff = evaluationData - referenceData
+    
+    bias = diff.mean()
+    return bias
+
+def calcBias(evaluationData, referenceData):
+    '''
+    Purpose:: 
+        Calculate the difference between two fields at each grid point
+
+     Input::
+        referenceData - array of data 
+        evaluationData - array of data with same dimension of referenceData 
+        
+     Output::  
+        diff - difference between referenceData and evaluationData
+    
+    '''
+
+    diff = evaluationData - referenceData
+    return diff
+
+def calcRootMeanSquaredDifferenceAveragedOverTime(evaluationData, referenceData):
+    '''
+    Purpose:: 
+        Calculate root mean squared difference (RMS errors) averaged over time between two fields for each grid point
+
+     Input::
+        referenceData - array of data 
+        evaluationData - array of data with same dimension of referenceData 
+        
+     Output::  
+        rms - root mean squared difference, if the input is 1-d data, the output becomes a single floating number.
+    
+    '''
+    sqdiff = (evaluationData - referenceData)** 2
+    rms = np.sqrt(sqdiff.mean(axis = 0))
+    return rms
+
+
+def calcRootMeanSquaredDifferenceAveragedOverTimeAndDomain(evaluationData, referenceData):
+    '''
+    Purpose:: 
+        Calculate root mean squared difference (RMS errors) averaged over time and space between two fields
+
+     Input::
+        referenceData - array of data (should be 3-d array)
+        evaluationData - array of data with same dimension of referenceData 
+        
+     Output::  
+        rms - root mean squared difference averaged over time and space
+    '''
+    sqdiff = (evaluationData - referenceData)** 2
+    rms = np.sqrt(sqdiff.mean())
+    return rms
+
+def calcTemporalCorrelation(evaluationData, referenceData):
+    '''
+    Purpose ::
+        Calculate the temporal correlation.
+    
+    Assumption(s) ::
+        The first dimension of two datasets is the time axis.
+    
+    Input ::
+        evaluationData - model data array of any shape
+        referenceData- observation data array of any shape
+            
+    Output::
+        temporalCorelation - A 2-D array of temporal correlation coefficients at each subregion
+        sigLev - A 2-D array of confidence levels related to temporalCorelation 
+    
+    REF: 277-281 in Stat methods in atmos sci by Wilks, 1995, Academic Press, 467pp.
+    sigLev: the correlation between model and observation is significant at sigLev * 100 %
+    '''
+    evaluationDataMask = process.create_mask_using_threshold(evaluationData, threshold = 0.75)
+    referenceDataMask = process.create_mask_using_threshold(referenceData, threshold = 0.75)
+    
+    nregion = evaluationData.shape[0]
+    temporalCorrelation = ma.zeros([nregion])-100.
+    sigLev = ma.zeros([nregion])-100.
+    for iregion in np.arange(nregion):
+        temporalCorrelation[iregion], sigLev[iregion] = stats.pearsonr(evaluationData[iregion,:], referenceData[iregion,:])
+        sigLev[iregion] = 1 - sigLev[iregion]
+                    
+    temporalCorrelation=ma.masked_equal(temporalCorrelation.data, -100.)        
+    sigLev=ma.masked_equal(sigLev.data, -100.)    
+    
+    return temporalCorrelation, sigLev
+
+def calcTemporalCorrelationSubRegion(evaluationData, referenceData):
+    '''
+    Purpose ::
+        Calculate the temporal correlation.
+    
+    Assumption(s) ::
+        both evaluation and reference data are subregion averaged time series
+    
+    Input ::
+        evaluationData - model data array [region,t]
+        referenceData- observation data [region, t]
+            
+    Output::
+        temporalCorelation - A 1-D array of temporal correlation coefficients at each grid point.
+        sigLev - A 1-D array of confidence levels related to temporalCorelation 
+    
+    REF: 277-281 in Stat methods in atmos sci by Wilks, 1995, Academic Press, 467pp.
+    sigLev: the correlation between model and observation is significant at sigLev * 100 %
+    '''
+        
+    
+    temporalCorrelation = 0.
+    sigLev = 0.
+    t1=evaluationData[:]
+    t2=referenceData[:]
+    if t1.min()!=t1.max() and t2.min()!=t2.max():
+        temporalCorrelation, sigLev=stats.pearsonr(t1,t2)
+        sigLev=1.-sigLev  # p-value => confidence level
+                    
+        return temporalCorrelation
+
+def calcPatternCorrelation(evaluationData, referenceData):
+    '''
+    Purpose ::
+        Calculate the spatial correlation.
+
+    Input ::
+        evaluationData - model data array (lat, lon)
+        referenceData- observation data array (lat,lon)
+
+    Output::
+        patternCorrelation - a single floating point
+        sigLev - a single floating point representing the confidence level 
+    
+    '''
+   
+    patternCorrelation, sigLev = stats.pearsonr(evaluationData[(evaluationData.mask==False) & (referenceData.mask==False)],
+                          referenceData[(evaluationData.mask==False) & (referenceData.mask==False)])
+    return patternCorrelation, sigLev
+
+
+def calcPatternCorrelationEachTime(evaluationData, referenceData):
+    '''
+     Purpose ::
+        Calculate the spatial correlation for each time
+
+     Assumption(s) ::
+        The first dimension of two datasets is the time axis.
+
+     Input ::
+        evaluationData - model data array (time,lat, lon)
+        referenceData- observation data array (time,lat,lon)
+
+     Output::
+        patternCorrelation - a timeseries (time)
+        sigLev - a time series (time)
+    ''' 
+    nT = evaluationData.shape[0]
+    patternCorrelation = ma.zeros(nT)-100.
+    sigLev = ma.zeros(nT)-100.
+    for it in np.arange(nT):
+        patternCorrelation[it], sigLev[it] = calcPatternCorrelation(evaluationData[it,:,:], referenceData[it,:,:])
+
+    return patternCorrelation,sigLev
+
+def calcNashSutcliff(evaluationData, referenceData):
+    '''
+    Assumption(s)::  
+    	Both evaluationData and referenceData are the same shape.
+        * lat, lon must match up
+        * time steps must align (i.e. months vs. months)
+    
+    Input::
+    	evaluationData - 3d (time, lat, lon) array of data
+        referenceData - 3d (time, lat, lon) array of data
+    
+    Output:
+        nashcor - 1d array aligned along the time dimension of the input
+        datasets. Time Series of Nash-Sutcliff Coefficient of efficiency
+     
+    '''
+    # Flatten the spatial dimensions
+    data1 = evaluationData[:]
+    data2 = referenceData[:]
+    nT = data1.shape[0]
+    data1.shape = nT, data1.size / nT
+    data2.shape = nT, data2.size / nT 
+    meanData2 = data2.mean(axis = 1)
+    
+    # meanData2 must be reshaped to 2D as to obey
+    # numpy broadcasting rules
+    meanData2.shape = nT, 1
+    nashcor = 1 - ((((data2 - data1) ** 2).sum(axis = 1)) / 
+               (((data2 - meanData2) ** 2).sum(axis = 1)))
+    return nashcor
+
+
+def calcPdf(evaluationData, referenceData):
+    '''
+    Routine to calculate a normalized Probability Distribution Function with 
+    bins set according to data range.
+    Equation from Perkins et al. 2007
+
+        PS=sum(min(Z_O_i, Z_M_i)) where Z is the distribution (histogram of the data for either set)
+        called in do_rcmes_processing_sub.py
+         
+    Inputs::
+        2 arrays of data
+        t1 is the modelData and t2 is 3D obsdata - time,lat, lon NB, time here 
+        is the number of time values eg for time period 199001010000 - 199201010000 
+        
+        if annual means-opt 1, was chosen, then t2.shape = (2,lat,lon)
+        
+        if monthly means - opt 2, was choosen, then t2.shape = (24,lat,lon)
+        
+    User inputs: number of bins to use and edges (min and max)
+    Output:
+
+        one float which represents the PDF for the year
+
+    TODO:  Clean up this docstring so we have a single purpose statement
+     
+    Routine to calculate a normalised PDF with bins set according to data range.
+
+    Input::
+        2 data  arrays, modelData and obsData
+
+    Output::
+        PDF for the year
+
+    '''
+    # float to store the final PDF similarity score
+    similarityScore = 0.0
+
+    print 'min modelData', evaluationData[:, :, :].min()
+    print 'max modelData', evaluationData[:, :, :].max()
+    print 'min obsData', referenceData[:, :, :].min()
+    print 'max obsData', referenceData[:, :, :].max()
+    # find a distribution for the entire dataset
+    #prompt the user to enter the min, max and number of bin values. 
+    # The max, min info above is to help guide the user with these choises
+    print '****PDF input values from user required **** \n'
+    nbins = int (raw_input('Please enter the number of bins to use. \n'))
+    minEdge = float(raw_input('Please enter the minimum value to use for the edge. \n'))
+    maxEdge = float(raw_input('Please enter the maximum value to use for the edge. \n'))
+    
+    mybins = np.linspace(minEdge, maxEdge, nbins)
+    print 'nbins is', nbins, 'mybins are', mybins
+    
+    pdfMod, edges = np.histogram(evaluationData, bins = mybins, normed = True, density = True)  
+    print 'evaluationData distribution and edges', pdfMod, edges
+    pdfObs, edges = np.histogram(referenceData, bins = mybins, normed = True, density = True)           
+    print 'referenceData distribution and edges', pdfObs, edges    
+    
+    #find minimum at each bin between lists 
+    i = 0
+    for model_value in pdfMod :
+        print 'model_value is', model_value, 'pdfObs[', i, '] is', pdfObs[i]
+        if model_value < pdfObs[i]:
+            similarityScore += model_value
+        else:
+            similarityScore += pdfObs[i] 
+        i += 1 
+    print 'similarity_score is', similarityScore
+    return similarityScore
+    
+
+
+def calculate_metrics_and_make_plots(varName, workdir, lons, lats, obsData, mdlData, obsRgn, mdlRgn, obsList, mdlList, subRegions, \
+                                     subRgnLon0, subRgnLon1, subRgnLat0, subRgnLat1):
+    '''
+    Purpose:: 
+        Calculate all the metrics used in Kim et al. [2013] paper and plot them 
+
+    Input::
+        varName - evaluating variable
+        workdir -
+        lons -
+        lats -
+        obsData -
+        mdlData -
+        obsRgn -
+        mdlRgn -
+        obsList -
+        mdlList -
+        subRegions - 
+        subRgnLon0, subRgnLat0 - southwest boundary of sub-regions [numSubRgn]
+        subRgnLon1, subRgnLat1 - northeast boundary of sub-regions [numSubRgn]
+    Output:: 
+        png files
+        
+     '''
+   
+   
+    nobs, nt, ny, nx = obsData.shape
+    nmodel = mdlData.shape[0]
+    ### TODO: unit conversion (K to C)
+    if varName == 'temp':
+        obsData[0, :, :, :] = obsData[0, :, :, :] - 273.15
+        if subRegions:
+            obsRgn[0, :, :] = obsRgn[0, :, :] - 273.15
+    if varName == 'prec':
+        obsData[0, :, :, :] = obsData[0, :, :, :]*86400.
+        if subRegions:
+            obsRgn[0, :, :] = obsRgn[0, :, :]*86400.
+    ###    
+    oTser, oClim = calcClimYear( obsData[0, :, :, :])
+    bias_of_overall_average = ma.zeros([nmodel, ny, nx])
+    spatial_stdev_ratio = np.zeros([nmodel])
+    spatial_corr = np.zeros([nmodel])
+    mdlList.append('ENS')
+    
+    for imodel in np.arange(nmodel):
+        mTser, mClim = calcClimYear( mdlData[imodel,:,:,:])
+        bias_of_overall_average[imodel,:,:] = calcBias(mClim, oClim)
+        spatial_corr[imodel], sigLev = calcPatternCorrelation(oClim, mClim)
+        spatial_stdev_ratio[imodel] = calcSpatialStdevRatio(mClim, oClim)   
+    fig_return = plotter.draw_contour_map(oClim, lats, lons, workdir+'/observed_climatology_'+varName, fmt='png', gridshape=(1, 1),
+                   clabel='', ptitle='', subtitles=obsList, cmap=None, 
+                   clevs=None, nlevs=10, parallels=None, meridians=None,
+                   extend='neither')    
+    # TODO:
+    # Be sure to update "gridshape" argument to be the number of sub plots (rows,columns). This should be improved so that the 
+    # gridshape is optimally determined for a given number of models. For example:
+    # For 3 models, a gridshape of (2,2) would be sensible:
+    # X X 
+    # X
+    #
+    fig_return = plotter.draw_contour_map(bias_of_overall_average, lats, lons, workdir+'/bias_of_climatology_'+varName, fmt='png', gridshape=(4, 2),
+                   clabel='', ptitle='', subtitles=mdlList, cmap=None, 
+                   clevs=None, nlevs=10, parallels=None, meridians=None,
+                   extend='neither')
+    Taylor_data = np.array([spatial_stdev_ratio, spatial_corr]).transpose()
+    
+    fig_return = plotter.draw_taylor_diagram(Taylor_data, mdlList, refname='CRU', fname = workdir+'/Taylor_'+varName, fmt='png',frameon=False)
+
+    if subRegions:
+        nseason = 2      # (0: summer and 1: winter)
+        nregion = len(subRgnLon0)
+        season_name = ['summer','winter']
+        rowlabels = ['PNw','PNe','CAn','CAs','SWw','SWe','COL','GPn','GPc','GC','GL','NE','SE','FL']
+        collabels = ['M1','M2','M3','M4','M5','M6','ENS']
+        collabels[nmodel-1] = 'ENS'
+        
+        for iseason in [0,1]:
+            portrait_subregion = np.zeros([4, nregion, nmodel])
+            portrait_titles = ['(a) Normalized Bias', '(b) Normalized STDV', '(c) Normalized RMSE', '(d) Correlation']
+            if iseason == 0:
+                monthBegin=6
+                monthEnd=8
+            if iseason == 1:
+                monthBegin=12
+                monthEnd=2
+                      
+            obsTser,obsClim = calcClimSeasonSubRegion(6,8,obsRgn[0,:,:])
+            for imodel in np.arange(nmodel):
+                mTser, mClim =  calcClimSeasonSubRegion(6,8,mdlRgn[imodel,:,:])
+                for iregion in np.arange(nregion):
+                      portrait_subregion[0,iregion,imodel] = calcBias(mClim[iregion],obsClim[iregion])/calcTemporalStdev(obsTser[iregion,:])   
+                      portrait_subregion[1,iregion,imodel] = calcTemporalStdev(mTser[iregion,:])/ calcTemporalStdev(obsTser[iregion,:]) 
+                      portrait_subregion[2,iregion,imodel] = calcRootMeanSquaredDifferenceAveragedOverTime(mTser[iregion,:], obsTser[iregion,:])/calcTemporalStdev(obsTser[iregion,:])
+                      portrait_subregion[3,iregion, imodel] = calcTemporalCorrelationSubRegion(mTser[iregion,:],obsTser[iregion,:])
+            portrait_return = plotter.draw_portrait_diagram(portrait_subregion, rowlabels, collabels[0:nmodel], workdir+'/portrait_diagram_'+season_name[iseason]+'_'+varName, fmt='png', 
+                             gridshape=(2, 2), xlabel='', ylabel='', clabel='', 
+                             ptitle='', subtitles=portrait_titles, cmap=None, clevs=None, 
+                             nlevs=10, extend='neither')  
+            # annual cycle
+            nmonth = 12
+            times = np.arange(nmonth)
+            data_names = [obsList[0]] + list(mdlList)
+            annual_cycle = np.zeros([nregion, nmonth, nmodel+1])
+            obsTser, annual_cycle[:, :, 0] = calcAnnualCycleMeansSubRegion(obsRgn[0,:,:])
+            obsStd = calcAnnualCycleStdevSubRegion(obsRgn[0,:,:])
+            for imodel in np.arange(nmodel):
+                mdlTser, annual_cycle[:, :, imodel+1] = calcAnnualCycleMeansSubRegion(mdlRgn[imodel, :, :])
+            # Make annual_cycle shape compatible with draw_time_series
+            annual_cycle = annual_cycle.swapaxes(1, 2)
+            tseries_return = plotter.draw_time_series(annual_cycle, times, data_names, workdir+'/time_series_'+varName, gridshape=(7, 2), 
+                  subtitles=rowlabels, label_month=True)
+            
+         
+        

Modified: incubator/climate/trunk/rcmet/src/main/python/rcmes/utils/misc.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/rcmet/src/main/python/rcmes/utils/misc.py?rev=1520687&r1=1520686&r2=1520687&view=diff
==============================================================================
--- incubator/climate/trunk/rcmet/src/main/python/rcmes/utils/misc.py (original)
+++ incubator/climate/trunk/rcmet/src/main/python/rcmes/utils/misc.py Fri Sep  6 20:29:37 2013
@@ -1002,39 +1002,45 @@ def make_list_of_era_surf_files(firstTim
     return filenamelist
 
 
-#
-
 def assign_subRgns_from_a_text_file(infile):
-    # Read pre-fabricated sugregion information from a text file
-    # Note: python indexing includes the beginning point but excludes the ending point
-    f = open(infile, 'r')
-    for i in np.arange(8):
-        string = f.readline()
-        print 'Line ', i, ': Content ', string
-    string = f.readline()
-    numSubRgn = int(string[20:22])
-    print 'numSubRgn = ', numSubRgn
-    for i in np.arange(3):
-        string = f.readline()
-    # Read input string and extract subRegion info (name, longs, lats) from the string
+    '''
+    Extract subregion definitions from a text file
+    
+    Input: 
+        infile - string representing the path to the subregion definition config file
+        
+    Output:
+        numSubRgn - the number of subregion definitions extracted from the input file
+        subRgnName - a Numpy masked array of subregion names
+        subRgnLon0 - a Numpy masked array of subregion "Degrees East" values
+        subRgnLon1 - a Numpy masked array of subregion "Degrees West" values
+        subRgnLat0 - a Numpy masked array of subregion "Degrees North" values
+        subRgnLat1 - a Numpy masked array of subregion "Degrees South" values
+    '''
+    
+    # Parse subregions from provided file
+    subregions = readSubRegionsFile(infile)
+    numSubRgn  = len(subregions)
+    
+    print subregions
+    
+    # Define Numpy masked arrays to hold the subregion definition data
     subRgnName = []
     subRgnLon0 = ma.zeros((numSubRgn))
     subRgnLon1 = ma.zeros((numSubRgn))
     subRgnLat0 = ma.zeros((numSubRgn))
     subRgnLat1 = ma.zeros((numSubRgn))
+    
+    # Populate the arrays with the data extracted from the file
     for i in np.arange(numSubRgn):
-        string = f.readline()
-        subRgnName.append(string[0:19])
-        subRgnLon0[i] = float(string[30:37])
-        subRgnLon1[i] = float(string[40:47])
-        subRgnLat0[i] = float(string[50:55])
-        subRgnLat1[i] = float(string[60:65])
-    f.close()
-    print 'subRgnName: ', subRgnName
-    print 'subRgnLon0: ', subRgnLon0
-    print 'subRgnLon1: ', subRgnLon1
-    print 'subRgnLat0: ', subRgnLat0
-    print 'subRgnLat1: ', subRgnLat1
+        parts = region_parts[i]
+        subRgnName.append(parts[0].strip('"'))
+        subRgnLon0[i] = float(parts[1])
+        subRgnLon1[i] = float(parts[2])
+        subRgnLat0[i] = float(parts[3])
+        subRgnLat1[i] = float(parts[4])
+    
+    # Return the computed arrays
     return numSubRgn, subRgnName, subRgnLon0, subRgnLon1, subRgnLat0, subRgnLat1
 
 def createSubRegionObjectInteractively():
@@ -1379,3 +1385,29 @@ def select_metrics():
     
     return metricOption
 
+
+def msg(key=None, val=None, indent=0, returnResult=False):
+    ''' Output messages to stdout, with support for indentation 
+       and formatted printing of lists and dicts
+    '''
+    if key == None:
+        message = ' '*indent
+    else:
+        message = ' '*indent + (key if val == None else key + ':\t')
+    if isinstance(val, dict):
+        values = ['{0}: {1}'.format(' '*(indent+2) + str(k),
+            msg(val=v, indent=indent+2, returnResult=True))
+            for k,v in val.items()]
+        message += '\n' + '\n'.join(values)
+    elif isinstance(val, (list, tuple)):
+        values = [' '*(indent+2) +
+            msg(val=v, indent=indent+2, returnResult=True) 
+            for v in val]
+        message += '\n' + '\n'.join(values)
+    else:
+        if val != None:
+            message += str(val)
+    if returnResult:
+        return message
+    else:
+        print message

Modified: incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/ParameterSelectCtrl.js
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/ParameterSelectCtrl.js?rev=1520687&r1=1520686&r2=1520687&view=diff
==============================================================================
--- incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/ParameterSelectCtrl.js (original)
+++ incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/ParameterSelectCtrl.js Fri Sep  6 20:29:37 2013
@@ -102,17 +102,28 @@ function($rootScope, $scope, $http, $tim
 	$scope.runEvaluation = function() {
 		$scope.runningEval = true;
 
-		// TODO
-		// Currently this has the 1 model, 1 observation format hard coded in. This shouldn't
-		// be the long-term case! This needs to be changed!!!!!!!!
-		var obsIndex = -1,
-			modelIndex = -1;
+        // Containers for dataset information
+		var obsDatasetIds = [],
+		    obsDatasetParameterIds = [],
+			modelDatasetIds = [],
+			modelDatasetParameterIds = [],
+			modelDatasetTimes = [],
+			modelDatasetLats = [],
+			modelDatasetLons = [];
 
+        // Populate containers with information for each selected dataset
 		for (var i = 0; i < $scope.datasets.length; i++) {
-			if ($scope.datasets[i]['isObs'] == 1)
-				obsIndex = i;
-			else
-				modelIndex = i;
+		    console.log($scope.datasets[i].id)
+			if ($scope.datasets[i]['isObs'] == 1) {
+				obsDatasetIds.push($scope.datasets[i].datasetId)
+				obsDatasetParameterIds.push($scope.datasets[i].id)
+			} else {
+				modelDatasetIds.push($scope.datasets[i].id)
+				modelDatasetParameterIds.push($scope.datasets[i].param)
+				modelDatasetTimes.push($scope.datasets[i].time)
+				modelDatasetLats.push($scope.datasets[i].lat)
+				modelDatasetLons.push($scope.datasets[i].lon)
+			}
 		}
 
 		// TODO At the moment we aren't running all the metrics that the user selected. We're only
@@ -129,38 +140,36 @@ function($rootScope, $scope, $http, $tim
 			}
 		};
 
+        // Prepare information to send to backend service
 		var data = {params: { 
-			'obsDatasetId'     : $scope.datasets[obsIndex]['id'],
-			'obsParameterId'   : $scope.datasets[obsIndex]['param'],
+			'obsDatasetIds'    : obsDatasetIds,
+			'obsParameterIds'  : obsDatasetParameterIds,
+			
 			'startTime'        : $scope.displayParams.start + " 00:00:00",
 			'endTime'          : $scope.displayParams.end + " 00:00:00",
 			'latMin'           : $scope.displayParams.latMin,
 			'latMax'           : $scope.displayParams.latMax,
 			'lonMin'           : $scope.displayParams.lonMin,
 			'lonMax'           : $scope.displayParams.lonMax,
-			'filelist'         : $scope.datasets[modelIndex]['id'],
-			'modelVarName'     : $scope.datasets[modelIndex]['param'],
-			'modelTimeVarName' : $scope.datasets[modelIndex]['time'],
-			'modelLatVarName'  : $scope.datasets[modelIndex]['lat'],
-			'modelLonVarName'  : $scope.datasets[modelIndex]['lon'],
-			'regridOption'     : 'model',
+			
+			'filelist'         : modelDatasetIds,
+			'modelVarName'     : modelDatasetParameterIds,
+			'modelTimeVarName' : modelDatasetTimes,
+			'modelLatVarName'  : modelDatasetLats,
+			'modelLonVarName'  : modelDatasetLons,
+			
+			'regridOption'     : ((evaluationSettings.getSettings().spatialSelect.isObs) ? 'obs' : 'model'),
+			'regridBasis'      : evaluationSettings.getSettings().spatialSelect.id,
 			'timeRegridOption' : evaluationSettings.getSettings().temporal.selected,
-			'metricOption'     : metricToRun,
+			'metricOption'     : metricToRun,   // Should be a list of metrics to run
+			'subregionFile'    : evaluationSettings.getSettings().subregionFile,
 			'callback'         : 'JSON_CALLBACK',
 		}};
 
 		$http.jsonp($rootScope.baseURL + '/rcmes/run/', data).
 		success(function(data) {
-			var comp = data['comparisonPath'].split('/');
-			var model = data['modelPath'].split('/');
-			var obs = data['obsPath'].split('/');
 			var evalWorkDir = data['evalWorkDir'];
 
-			$rootScope.evalResults = {};
-			$rootScope.evalResults.comparisonPath = comp[comp.length - 1];
-			$rootScope.evalResults.modelPath = model[model.length - 1];
-			$rootScope.evalResults.obsPath = obs[obs.length - 1];
-
 			$scope.runningEval = false;
 
 			$timeout(function() {
@@ -170,6 +179,7 @@ function($rootScope, $scope, $http, $tim
 					window.location = "#/results";
 				}
 			}, 100);
+			
 		}).error(function() {
 			$scope.runningEval = false;
 		});

Modified: incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/RcmedSelectionCtrl.js
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/RcmedSelectionCtrl.js?rev=1520687&r1=1520686&r2=1520687&view=diff
==============================================================================
--- incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/RcmedSelectionCtrl.js (original)
+++ incubator/climate/trunk/rcmet/src/main/ui/app/js/controllers/RcmedSelectionCtrl.js Fri Sep  6 20:29:37 2013
@@ -71,9 +71,10 @@ function($rootScope, $scope, $http, $tim
 
 		newDataset['isObs'] = 1;
 		// Save the dataset id (the important part) and name (for display purposes)
-		newDataset['id'] = $scope.datasetSelection['dataset_id'];
+		newDataset['datasetId'] = $scope.datasetSelection['dataset_id'];
 		newDataset['name'] = $scope.datasetSelection['longname'];
 		// Save the parameter id (the important part) and name (for display purposes)
+		newDataset['id']    = $scope.parameterSelection['parameter_id'];
 		newDataset['param'] = $scope.parameterSelection['parameter_id'];
 		newDataset['paramName'] = $scope.parameterSelection['longname'];
 		// Save the (fake) lat/lon information. We test with the TRMM dataset. RCMED currently

Modified: incubator/climate/trunk/rcmet/src/main/ui/app/partials/main.html
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/rcmet/src/main/ui/app/partials/main.html?rev=1520687&r1=1520686&r2=1520687&view=diff
==============================================================================
--- incubator/climate/trunk/rcmet/src/main/ui/app/partials/main.html (original)
+++ incubator/climate/trunk/rcmet/src/main/ui/app/partials/main.html Fri Sep  6 20:29:37 2013
@@ -15,11 +15,11 @@
     <select ng-model="settings.temporal.selected" ng-options="opt for opt in settings.temporal.options"></select>
     <hr />
     <h4>Select which dataset to use as the grid base.</h4>
-    <select ng-model="settings.spatialSelect" ng-options="dataset.name for dataset in datasets"></select>
+    <select ng-model="settings.spatialSelect" ng-options="dataset as dataset.name for dataset in datasets"></select>
     <hr />
     <h4>Select a file which will define the bounds of subregions.</h4>
     <form class="form-inline" autocomplete="off">
-      <input id="subregionFileInput" predictive-file-browser-input ng-model="filePathInput" type="text" class="input-xlarge" autocomplete="off" />
+      <input id="subregionFileInput" predictive-file-browser-input ng-model="settings.subregionFile" type="text" class="input-xlarge" autocomplete="off" />
     </form>
   </div>
   <div class="modal-footer">



Mime
View raw message