climate-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From huiky...@apache.org
Subject [1/7] climate git commit: CLIMATE-658 - Restructure evaluation results to handle missing data values
Date Tue, 25 Aug 2015 17:06:51 GMT
Repository: climate
Updated Branches:
  refs/heads/master 45779fb98 -> 77443d7ad


CLIMATE-658 - Restructure evaluation results to handle missing data values

- now the evaluation results maintain missing value information
- update _run_no_subregion_evaluation and _run_unary_metric_evaluation in evaluation.py
- update test_evaluation


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/c088cfab
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/c088cfab
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/c088cfab

Branch: refs/heads/master
Commit: c088cfabd9f9df760a83f8b9f003e26a11730a29
Parents: d4eeb03
Author: huikyole <huikyole@argo.jpl.nasa.gov>
Authored: Wed Aug 12 01:09:23 2015 -0700
Committer: huikyole <huikyole@argo.jpl.nasa.gov>
Committed: Wed Aug 12 01:09:23 2015 -0700

----------------------------------------------------------------------
 ocw/evaluation.py            | 29 +++++++++++++++++++----------
 ocw/tests/test_evaluation.py | 29 +++++++++--------------------
 2 files changed, 28 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/c088cfab/ocw/evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/evaluation.py b/ocw/evaluation.py
index 89e34b4..11c1ae3 100644
--- a/ocw/evaluation.py
+++ b/ocw/evaluation.py
@@ -25,6 +25,8 @@ from metrics import Metric, UnaryMetric, BinaryMetric
 from dataset import Dataset, Bounds
 import ocw.dataset_processor as DSP
 
+import numpy.ma as ma
+
 logger = logging.getLogger(__name__)
 
 class Evaluation(object):
@@ -289,23 +291,30 @@ class Evaluation(object):
 
     def _run_no_subregion_evaluation(self):
         results = []
-        for target in self.target_datasets:
-            results.append([])
-            for metric in self.metrics:
-                run_result = metric.run(self.ref_dataset, target)
-                results[-1].append(run_result)
+        for metric in self.metrics:
+            run_result_shape = list((metric.run(self.ref_dataset, self.target_datasets[0])).shape)
+            run_result_shape.insert(0, len(self.target_datasets))
+            run_result = ma.zeros(run_result_shape)
+            
+            for itarget, target in enumerate(self.target_datasets):
+                run_result[itarget,:] = metric.run(self.ref_dataset, target)
+            results.append(run_result)
         return results
 
     def _run_unary_metric_evaluation(self):
         unary_results = []
         for metric in self.unary_metrics:
-            unary_results.append([])
             # Unary metrics should be run over the reference Dataset also
             if self.ref_dataset:
-                unary_results[-1].append(metric.run(self.ref_dataset))
-
-            for target in self.target_datasets:
-                unary_results[-1].append(metric.run(target))
+                unary_results.append(metric.run(self.ref_dataset))
+
+            unary_result_shape = list((metric.run(self.target_datasets[0])).shape)
+            unary_result_shape.insert(0, len(self.target_datasets))
+            unary_result = ma.zeros(unary_result_shape)
+            for itarget, target in enumerate(self.target_datasets):
+                unary_result[itarget,:] = metric.run(target)
+            unary_results.append(unary_result)
+                     
         return unary_results
 
     def _run_subregion_unary_evaluation(self):

http://git-wip-us.apache.org/repos/asf/climate/blob/c088cfab/ocw/tests/test_evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/tests/test_evaluation.py b/ocw/tests/test_evaluation.py
index 4a02714..c2b85de 100644
--- a/ocw/tests/test_evaluation.py
+++ b/ocw/tests/test_evaluation.py
@@ -128,40 +128,29 @@ class TestEvaluation(unittest.TestCase):
     def test_result_shape(self):
         bias_eval = Evaluation(
             self.test_dataset,
-            [self.another_test_dataset, self.another_test_dataset],
-            [Bias()]
+            [self.another_test_dataset, self.another_test_dataset, self.another_test_dataset],
+            [Bias(), Bias()]
         )
         bias_eval.run()
 
         # Expected result shape is
-        # [
-        #   [
-        #       bias.run(reference, target1)
-        #   ],
-        #   [
-        #       bias.run(reference, target2)
-        #   ]
-        # ]
+        # [bias, bias] where bias.shape[0] = number of datasets
         self.assertTrue(len(bias_eval.results) == 2)
-        self.assertTrue(len(bias_eval.results[0]) == 1)
-        self.assertTrue(len(bias_eval.results[1]) == 1)
+        self.assertTrue(bias_eval.results[0].shape[0] == 3)
 
     def test_unary_result_shape(self):
         new_eval = Evaluation(
             self.test_dataset,
-            [self.another_test_dataset, self.another_test_dataset],
+            [self.another_test_dataset, self.another_test_dataset, self.another_test_dataset,
self.another_test_dataset],
             [TemporalStdDev()]
         )
         new_eval.run()
 
         # Expected result shape is
-        # [
-        #   temporalstddev.run(reference),
-        #   temporalstddev.run(target1),
-        #   temporalstddev.run(target2)
-        # ]
-        self.assertTrue(len(new_eval.unary_results) == 1)
-        self.assertTrue(len(new_eval.unary_results[0]) == 3)
+        # [stddev] where stddev.shape[0] = number of datasets
+        
+        self.assertTrue(len(new_eval.unary_results) == 2)
+        self.assertTrue(new_eval.unary_results[1].shape[0] == 4)
 
     def test_subregion_result_shape(self):
         bound = Bounds(


Mime
View raw message