climate-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jo...@apache.org
Subject [2/8] climate git commit: CLIMATE-637 - Subregion clean up and improvements
Date Mon, 01 Jun 2015 18:02:35 GMT
CLIMATE-637 - Subregion clean up and improvements

- Add tests for unary/binary evaluation results output formatting to
  ensure that data is nested properly in the results lists.
- Add subregion support for unary metrics.
- Adjust the ordering that subregions are computed during an evaluation
  for performance improvements.


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/f4849f27
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/f4849f27
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/f4849f27

Branch: refs/heads/master
Commit: f4849f272a71a73ce955828deb811f625305d85a
Parents: 6ab02d8
Author: Michael Joyce <joyce@apache.org>
Authored: Thu May 28 09:37:14 2015 -0700
Committer: Michael Joyce <joyce@apache.org>
Committed: Thu May 28 09:37:14 2015 -0700

----------------------------------------------------------------------
 ocw/evaluation.py            |  42 ++++++++++++---
 ocw/tests/test_evaluation.py | 107 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 143 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/f4849f27/ocw/evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/evaluation.py b/ocw/evaluation.py
index 96c19ad..9581d96 100644
--- a/ocw/evaluation.py
+++ b/ocw/evaluation.py
@@ -234,7 +234,10 @@ class Evaluation(object):
                 self.results = self._run_no_subregion_evaluation()
 
         if self._should_run_unary_metrics():
-            self.unary_results = self._run_unary_metric_evaluation()
+            if self.subregions:
+                self.unary_results = self._run_subregion_unary_evaluation()
+            else:
+                self.unary_results = self._run_unary_metric_evaluation()
 
     def _evaluation_is_valid(self):
         '''Check if the evaluation is well-formed.
@@ -267,15 +270,18 @@ class Evaluation(object):
 
     def _run_subregion_evaluation(self):
         results = []
+        new_refs = [DSP.subset(s, self.ref_dataset) for s in self.subregions]
+
         for target in self.target_datasets:
             results.append([])
+            new_targets = [DSP.subset(s, target) for s in self.subregions]
+
             for metric in self.metrics:
                 results[-1].append([])
-                for subregion in self.subregions:
-                    # Subset the reference and target dataset with the 
-                    # subregion information.
-                    new_ref = DSP.subset(subregion, self.ref_dataset)
-                    new_tar = DSP.subset(subregion, target)
+
+                for i in range(len(self.subregions)):
+                    new_ref = new_refs[i]
+                    new_tar = new_targets[i]
 
                     run_result = metric.run(new_ref, new_tar)
                     results[-1][-1].append(run_result)
@@ -302,6 +308,30 @@ class Evaluation(object):
                 unary_results[-1].append(metric.run(target))
         return unary_results
 
+    def _run_subregion_unary_evaluation(self):
+        unary_results = []
+        if self.ref_dataset:
+            new_refs = [DSP.subset(s, self.ref_dataset) for s in self.subregions]
+
+        new_targets = [
+            [DSP.subset(s, t) for s in self.subregions]
+            for t in self.target_datasets
+        ]
+
+        for metric in self.unary_metrics:
+            unary_results.append([])
+
+            for i in range(len(self.subregions)):
+                unary_results[-1].append([])
+
+                if self.ref_dataset:
+                    unary_results[-1][-1].append(metric.run(new_refs[i]))
+
+                for t in range(len(self.target_datasets)):
+                    unary_results[-1][-1].append(metric.run(new_targets[t][i]))
+
+        return unary_results
+
     def __str__(self):
         formatted_repr = (
             "<Evaluation - ref_dataset: {}, "

http://git-wip-us.apache.org/repos/asf/climate/blob/f4849f27/ocw/tests/test_evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/tests/test_evaluation.py b/ocw/tests/test_evaluation.py
index 9e8d126..4a02714 100644
--- a/ocw/tests/test_evaluation.py
+++ b/ocw/tests/test_evaluation.py
@@ -125,5 +125,112 @@ class TestEvaluation(unittest.TestCase):
         bias_results_shape = tuple(bias_eval.results[0][0].shape)
         self.assertEqual(input_shape, bias_results_shape)
 
+    def test_result_shape(self):
+        bias_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset],
+            [Bias()]
+        )
+        bias_eval.run()
+
+        # Expected result shape is
+        # [
+        #   [
+        #       bias.run(reference, target1)
+        #   ],
+        #   [
+        #       bias.run(reference, target2)
+        #   ]
+        # ]
+        self.assertTrue(len(bias_eval.results) == 2)
+        self.assertTrue(len(bias_eval.results[0]) == 1)
+        self.assertTrue(len(bias_eval.results[1]) == 1)
+
+    def test_unary_result_shape(self):
+        new_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset],
+            [TemporalStdDev()]
+        )
+        new_eval.run()
+
+        # Expected result shape is
+        # [
+        #   temporalstddev.run(reference),
+        #   temporalstddev.run(target1),
+        #   temporalstddev.run(target2)
+        # ]
+        self.assertTrue(len(new_eval.unary_results) == 1)
+        self.assertTrue(len(new_eval.unary_results[0]) == 3)
+
+    def test_subregion_result_shape(self):
+        bound = Bounds(
+                10, 18, 
+                100, 108, 
+                dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
+
+        bias_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset],
+            [Bias()],
+            [bound]
+        )
+        bias_eval.run()
+
+        # Expected result shape is
+        # [
+        #   [
+        #       [   # Subregions cause this extra layer
+        #           bias.run(reference, target1)
+        #       ]
+        #   ],
+        #   [
+        #       [
+        #           bias.run(reference, target2)
+        #       ]
+        #   ]
+        # ]
+        self.assertTrue(len(bias_eval.results) == 2)
+
+        self.assertTrue(len(bias_eval.results[0]) == 1)
+        self.assertTrue(type(bias_eval.results[0]) == type([]))
+        self.assertTrue(len(bias_eval.results[1]) == 1)
+        self.assertTrue(type(bias_eval.results[1]) == type([]))
+
+        self.assertTrue(len(bias_eval.results[0][0]) == 1)
+        self.assertTrue(len(bias_eval.results[1][0]) == 1)
+
+    def test_subregion_unary_result_shape(self):
+        bound = Bounds(
+                10, 18, 
+                100, 108, 
+                dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1))
+
+        new_eval = Evaluation(
+            self.test_dataset,
+            [self.another_test_dataset, self.another_test_dataset],
+            [TemporalStdDev()],
+            [bound]
+        )
+        new_eval.run()
+
+        # Expected result shape is
+        # [
+        #   [   
+        #       [   # Subregions cause this extra layer
+        #           temporalstddev.run(reference),
+        #           temporalstddev.run(target1),
+        #           temporalstddev.run(target2)
+        #       ]
+        #   ]
+        # ]
+        self.assertTrue(len(new_eval.unary_results) == 1)
+        self.assertTrue(type(new_eval.unary_results) == type([]))
+
+        self.assertTrue(len(new_eval.unary_results[0]) == 1)
+
+        self.assertTrue(len(new_eval.unary_results[0][0]) == 3)
+
+
 if __name__  == '__main__':
     unittest.main()


Mime
View raw message