climate-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jo...@apache.org
Subject svn commit: r1570446 - in /incubator/climate/trunk/ocw-ui/backend: processing.py tests/test_processing.py
Date Fri, 21 Feb 2014 04:37:59 GMT
Author: joyce
Date: Fri Feb 21 04:37:58 2014
New Revision: 1570446

URL: http://svn.apache.org/r1570446
Log:
CLIMATE-332 - Add full eval unary plot title generation and test

- Fix >80 wide lines in _generate_unary_eval_plot_title.
- Remove temp 'metric' variable that served no purpose.
- Add tests for proper title generation on full evaluation (i.e., an
  evaluation that has a reference dataset and at least 1 target
  dataset).

Modified:
    incubator/climate/trunk/ocw-ui/backend/processing.py
    incubator/climate/trunk/ocw-ui/backend/tests/test_processing.py

Modified: incubator/climate/trunk/ocw-ui/backend/processing.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw-ui/backend/processing.py?rev=1570446&r1=1570445&r2=1570446&view=diff
==============================================================================
--- incubator/climate/trunk/ocw-ui/backend/processing.py (original)
+++ incubator/climate/trunk/ocw-ui/backend/processing.py Fri Feb 21 04:37:58 2014
@@ -484,34 +484,35 @@ def _generate_binary_eval_plot_title(eva
     )
 
 def _generate_unary_eval_plot_title(evaluation, dataset_index, metric_index):
-    ''' Generate a plot title for a given unary metric run over a specified target dataset.
+    ''' Generate a plot title for a given unary metric run.
 
     :param evaluation: The Evaluation object from which to pull name information.
     :type evaluation: ocw.evaluation.Evaluation
-    :param dataset_index: The index of the target dataset to use when generating the name.
+    :param dataset_index: The index of the target dataset to use when 
+        generating the name.
     :type dataset_index: Integer >= 0 < len(evaluation.target_datasets)
     :param metric_index: The index of the metric to use when generating the name.
     :type metric_index: Integer >= 0 < len(evaluation.metrics)
 
     :returns: The plot title for the requested metric run.
     '''
-    metric = evaluation.unary_metrics[metric_index]
 
-    # Unary metrics can be run over both the reference dataset and the target datasets. It's
-    # possible for an evaluation to only have one and not the other. If there is a reference
-    # dataset then the 0th result index refers to the result of the metric being run on the
-    # reference dataset. Any future indexes into the target dataset list must then be offset
-    # by one. If there's no reference dataset then we don't have to bother with any of this.
+    # Unary metrics can be run over both the reference dataset and the target 
+    # datasets. It's possible for an evaluation to only have one and not the 
+    # other. If there is a reference dataset then the 0th result index refers to 
+    # the result of the metric being run on the reference dataset. Any future 
+    # indexes into the target dataset list must then be offset by one. If 
+    # there's no reference dataset then we don't have to bother with any of this.
     if evaluation.ref_dataset:
         if dataset_index == 0:
             return "{} of {}".format(
-                metric.__class__.__name__,
-                evaluaton.ref_dataset.name
+                evaluation.unary_metrics[metric_index].__class__.__name__,
+                evaluation.ref_dataset.name
             )
         else:
             dataset_index -= 1
 
     return "{} of {}".format(
-        metric.__class__.__name__,
+        evaluation.unary_metrics[metric_index].__class__.__name__,
         evaluation.target_datasets[dataset_index].name
     )

Modified: incubator/climate/trunk/ocw-ui/backend/tests/test_processing.py
URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw-ui/backend/tests/test_processing.py?rev=1570446&r1=1570445&r2=1570446&view=diff
==============================================================================
--- incubator/climate/trunk/ocw-ui/backend/tests/test_processing.py (original)
+++ incubator/climate/trunk/ocw-ui/backend/tests/test_processing.py Fri Feb 21 04:37:58 2014
@@ -175,6 +175,17 @@ class TestPlotTitleCreation(unittest.Tes
             'Bias of Ref compared to T1'
         )
 
+    def test_unary_plot_title_generation_full_eval(self):
+        self.assertEqual(
+            bp._generate_unary_eval_plot_title(self.full_evaluation, 0, 0),
+            'TemporalStdDev of Ref'
+        )
+
+        self.assertEqual(
+            bp._generate_unary_eval_plot_title(self.full_evaluation, 1, 0),
+            'TemporalStdDev of T1'
+        )
+
 def _create_fake_dataset(name):
     lats = numpy.array(range(-10, 25, 1))
     lons = numpy.array(range(-30, 40, 1))



Mime
View raw message