Return-Path: X-Original-To: apmail-climate-commits-archive@minotaur.apache.org Delivered-To: apmail-climate-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 787AA1051B for ; Wed, 28 Aug 2013 18:21:27 +0000 (UTC) Received: (qmail 8565 invoked by uid 500); 28 Aug 2013 18:21:27 -0000 Delivered-To: apmail-climate-commits-archive@climate.apache.org Received: (qmail 8546 invoked by uid 500); 28 Aug 2013 18:21:26 -0000 Mailing-List: contact commits-help@climate.incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@climate.incubator.apache.org Delivered-To: mailing list commits@climate.incubator.apache.org Received: (qmail 8539 invoked by uid 99); 28 Aug 2013 18:21:26 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 28 Aug 2013 18:21:26 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 28 Aug 2013 18:21:23 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 2EFD823888E7; Wed, 28 Aug 2013 18:21:02 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1518315 - in /incubator/climate/trunk/ocw: evaluation.py metrics.py tests/test_evaluation.py Date: Wed, 28 Aug 2013 18:21:02 -0000 To: commits@climate.incubator.apache.org From: goodale@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130828182102.2EFD823888E7@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: goodale Date: Wed Aug 28 18:21:01 2013 New Revision: 1518315 URL: http://svn.apache.org/r1518315 Log: CLIMATE-281: Updating Evaluation Class (and Metrics) * Added a test to cover Evaluation.run() method using a simple Bias() calculation * Updated all the methods in Evaluation so they have the proper use of self. * Fixed a nested list when running metric.run() within _run_no_subregion_evaluation * Updated the docstrings and returns from metrics.py so they now return Numpy Arrays across the board Modified: incubator/climate/trunk/ocw/evaluation.py incubator/climate/trunk/ocw/metrics.py incubator/climate/trunk/ocw/tests/test_evaluation.py Modified: incubator/climate/trunk/ocw/evaluation.py URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw/evaluation.py?rev=1518315&r1=1518314&r2=1518315&view=diff ============================================================================== --- incubator/climate/trunk/ocw/evaluation.py (original) +++ incubator/climate/trunk/ocw/evaluation.py Wed Aug 28 18:21:01 2013 @@ -222,14 +222,14 @@ class Evaluation(object): logging.warning(error) return - if _should_run_regular_metrics(): + if self._should_run_regular_metrics(): if self.subregions: - self.results = _run_subregion_evaluation() + self.results = self._run_subregion_evaluation() else: - self.results = _run_no_subregion_evaluation() + self.results = self._run_no_subregion_evaluation() - if _should_run_unary_metrics(): - self.unary_results = _run_unary_metric_evaluation() + if self._should_run_unary_metrics(): + self.unary_results = self._run_unary_metric_evaluation() def _evaluation_is_valid(self): '''Check if the evaluation is well-formed. @@ -240,8 +240,8 @@ class Evaluation(object): * If there is a regular metric there must be a reference dataset and at least one target dataset. ''' - run_reg = _should_run_regular_metrics() - run_unary = _should_run_unary_metrics() + run_reg = self._should_run_regular_metrics() + run_unary = self._should_run_unary_metrics() reg_valid = self.ref_dataset != None and len(self.target_datasets) > 0 unary_valid = self.ref_dataset != None or len(self.target_datasets) > 0 @@ -254,13 +254,13 @@ class Evaluation(object): else: return false - def _should_run_regular_metrics(): + def _should_run_regular_metrics(self): return len(self.metrics) > 0 - def _should_run_unary_metrics(): + def _should_run_unary_metrics(self): return len(self.unary_metrics) > 0 - def _run_subregion_evaluation(): + def _run_subregion_evaluation(self): results = [] for target in self.target_datasets: results.append([]) @@ -276,16 +276,17 @@ class Evaluation(object): results[-1][-1].append(run_result) return results - def _run_no_subregion_evaluation(): + def _run_no_subregion_evaluation(self): results = [] for target in self.target_datasets: results.append([]) for metric in self.metrics: - run_result = [metric.run(self.ref_dataset, target)] + datasets = (self.ref_dataset, target) + run_result = metric.run(datasets) results[-1].append(run_result) return results - def _run_unary_metric_evaluation(): + def _run_unary_metric_evaluation(self): unary_results = [] for metric in self.unary_metrics: unary_results.append([]) Modified: incubator/climate/trunk/ocw/metrics.py URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw/metrics.py?rev=1518315&r1=1518314&r2=1518315&view=diff ============================================================================== --- incubator/climate/trunk/ocw/metrics.py (original) +++ incubator/climate/trunk/ocw/metrics.py Wed Aug 28 18:21:01 2013 @@ -46,8 +46,8 @@ class Metric(): binary, then datasets[0] contains the reference dataset and \ datasets[1] contains the target dataset. :type datasets: Tuple - :returns: A list containing the results of running the metric. - :trype: List + :returns: An Array containing the results of running the metric. + :trype: Numpy Array ''' @@ -72,11 +72,11 @@ class Bias(Metric): reference dataset is given in datasets[0] and the target \ dataset is given in datasets[1]. :type datasets: Tuple - :returns: A list containing the difference between the reference \ + :returns: An array containing the difference between the reference \ dataset and the target dataset. - :rtype: List + :rtype: Numpy Array ''' - return [datasets[0].values - datasets[1].values] + return datasets[0].values - datasets[1].values class TemporalStdDev(Metric): @@ -99,7 +99,7 @@ class TemporalStdDev(Metric): :param datasets: The datasets on which to calculate the temporal \ std. dev. in datasets[0]. :type datasets: Tuple - :returns: A list containing the temporal std. dev. - :rtype: List + :returns: An array containing the temporal std. dev. + :rtype: Numpy Array ''' return datasets[0].values.std(axi=0, ddof=1) Modified: incubator/climate/trunk/ocw/tests/test_evaluation.py URL: http://svn.apache.org/viewvc/incubator/climate/trunk/ocw/tests/test_evaluation.py?rev=1518315&r1=1518314&r2=1518315&view=diff ============================================================================== --- incubator/climate/trunk/ocw/tests/test_evaluation.py (original) +++ incubator/climate/trunk/ocw/tests/test_evaluation.py Wed Aug 28 18:21:01 2013 @@ -117,6 +117,13 @@ class TestEvaluation(unittest.TestCase): self.assertEqual(len(self.eval.metrics), 0) self.eval.add_metrics([Bias(), Bias()]) self.assertEqual(len(self.eval.metrics), 2) + + def test_bias_output_shape(self): + bias_eval = Evaluation(self.test_dataset, [self.another_test_dataset], [Bias()]) + bias_eval.run() + input_shape = tuple(self.test_dataset.values.shape) + bias_results_shape = tuple(bias_eval.results[0][0].shape) + self.assertEqual(input_shape, bias_results_shape) if __name__ == '__main__': unittest.main()