climate-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From huiky...@apache.org
Subject [1/2] climate git commit: CLIMATE-855 - Fix test_local and test_dataset_processor
Date Wed, 17 Aug 2016 18:57:11 GMT
Repository: climate
Updated Branches:
  refs/heads/master 817c854ba -> 28964ae74


CLIMATE-855 - Fix test_local and test_dataset_processor

- test.local and test.dataset_processor have been updated


Project: http://git-wip-us.apache.org/repos/asf/climate/repo
Commit: http://git-wip-us.apache.org/repos/asf/climate/commit/cce14dab
Tree: http://git-wip-us.apache.org/repos/asf/climate/tree/cce14dab
Diff: http://git-wip-us.apache.org/repos/asf/climate/diff/cce14dab

Branch: refs/heads/master
Commit: cce14dab8982f1591e4b5d57b786b1c5315039a4
Parents: 817c854
Author: huikyole <huikyole@argo.jpl.nasa.gov>
Authored: Tue Aug 16 15:56:18 2016 -0700
Committer: huikyole <huikyole@argo.jpl.nasa.gov>
Committed: Tue Aug 16 15:56:18 2016 -0700

----------------------------------------------------------------------
 ocw/data_source/local.py            |  2 +-
 ocw/dataset_processor.py            | 25 +++++--------------------
 ocw/tests/test_dataset_processor.py |  8 ++++----
 ocw/tests/test_local.py             | 18 +++++++++---------
 4 files changed, 19 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/climate/blob/cce14dab/ocw/data_source/local.py
----------------------------------------------------------------------
diff --git a/ocw/data_source/local.py b/ocw/data_source/local.py
index 98de937..d32cf1b 100644
--- a/ocw/data_source/local.py
+++ b/ocw/data_source/local.py
@@ -323,7 +323,7 @@ def load_multiple_files(file_path,
     :param time_name: (Optional) The time variable name to extract from the
         dataset.
     :type time_name: :mod:`string`
-    :returns: An array of OCW Dataset objects, an array of dataset names
+    :returns: An array of OCW Dataset objects
     :rtype: :class:`list`
     '''
 

http://git-wip-us.apache.org/repos/asf/climate/blob/cce14dab/ocw/dataset_processor.py
----------------------------------------------------------------------
diff --git a/ocw/dataset_processor.py b/ocw/dataset_processor.py
index 8691534..072f771 100755
--- a/ocw/dataset_processor.py
+++ b/ocw/dataset_processor.py
@@ -395,8 +395,7 @@ def subset(target_dataset, subregion, subregion_name=None, extract=True,
user_ma
         subregion_name = target_dataset.name
 
     if hasattr(subregion, 'lat_min'):
-        #_are_bounds_contained_by_dataset(target_dataset, subregion)   
-        # this boundary check is not necessary with the updated Bounds and subset
+        _are_bounds_contained_by_dataset(target_dataset, subregion)   
 
         if target_dataset.lats.ndim == 2 and target_dataset.lons.ndim == 2:
             start_time_index = np.where(
@@ -1424,28 +1423,14 @@ def _are_bounds_contained_by_dataset(dataset, bounds):
 
     # TODO:  THIS IS TERRIBLY inefficent and we need to use a geometry
     # lib instead in the future
-    if not (np.round(lat_min, 3) <= np.round(bounds.lat_min, 3) <=
-            np.round(lat_max, 3)):
-        error = ("bounds.lat_min: %s is not between lat_min: %s and"
-                 " lat_max: %s" % (bounds.lat_min, lat_min, lat_max))
-        errors.append(error)
-
-    if not (np.round(lat_min, 3) <= np.round(bounds.lat_max, 3) <=
-            np.round(lat_max, 3)):
+    if (lat_min > bounds.lat_max):
         error = ("bounds.lat_max: %s is not between lat_min: %s and"
-                 "lat_max: %s" % (bounds.lat_max, lat_min, lat_max))
-        errors.append(error)
-
-    if not (np.round(lon_min, 3) <= np.round(bounds.lon_min, 3) <=
-            np.round(lon_max, 3)):
-        error = ("bounds.lon_min: %s is not between lon_min: %s and"
-                 "lon_max: %s" % (bounds.lon_min, lon_min, lon_max))
+                 " lat_max: %s" % (bounds.lat_max, lat_min, lat_max))
         errors.append(error)
 
-    if not (np.round(lon_min, 3) <= np.round(bounds.lon_max, 3) <=
-            np.round(lon_max, 3)):
+    if (lon_min > bounds.lon_max):
         error = ("bounds.lon_max: %s is not between lon_min: %s and"
-                 "lon_max: %s" % (bounds.lon_max, lon_min, lon_max))
+                 " lon_max: %s" % (bounds.lon_max, lon_min, lon_max))
         errors.append(error)
 
     if not start <= bounds.start <= end:

http://git-wip-us.apache.org/repos/asf/climate/blob/cce14dab/ocw/tests/test_dataset_processor.py
----------------------------------------------------------------------
diff --git a/ocw/tests/test_dataset_processor.py b/ocw/tests/test_dataset_processor.py
index 0f8131e..42bdc2c 100644
--- a/ocw/tests/test_dataset_processor.py
+++ b/ocw/tests/test_dataset_processor.py
@@ -592,22 +592,22 @@ class TestFailingSubset(unittest.TestCase):
         )
 
     def test_out_of_dataset_bounds_lat_min(self):
-        self.subregion.lat_min = -90
+        self.subregion.lat_max = -90
         with self.assertRaises(ValueError):
             dp.subset(self.target_dataset, self.subregion)
 
     def test_out_of_dataset_bounds_lat_max(self):
-        self.subregion.lat_max = 90
+        self.subregion.lat_min = 90
         with self.assertRaises(ValueError):
             dp.subset(self.target_dataset, self.subregion)
 
     def test_out_of_dataset_bounds_lon_min(self):
-        self.subregion.lon_min = -180
+        self.subregion.lon_max = -180
         with self.assertRaises(ValueError):
             dp.subset(self.target_dataset, self.subregion)
 
     def test_out_of_dataset_bounds_lon_max(self):
-        self.subregion.lon_max = 180
+        self.subregion.lon_min = 180
         with self.assertRaises(ValueError):
             dp.subset(self.target_dataset, self.subregion)
 

http://git-wip-us.apache.org/repos/asf/climate/blob/cce14dab/ocw/tests/test_local.py
----------------------------------------------------------------------
diff --git a/ocw/tests/test_local.py b/ocw/tests/test_local.py
index f3226f7..5f40466 100644
--- a/ocw/tests/test_local.py
+++ b/ocw/tests/test_local.py
@@ -117,17 +117,17 @@ class TestLoadMultipleFiles(unittest.TestCase):
         os.remove(self.file_path)
 
     def test_function_load_multiple_files_data_name(self):
-        dataset, data_name = local.load_multiple_files(self.file_path, "value")
-        self.assertEqual(data_name, ['model'])
+        dataset = local.load_multiple_files(self.file_path, "value")
+        self.assertEqual([dataset[0].name], ['model'])
 
     def test_function_load_multiple_files_lons(self):
         """To test load_multiple_file function for longitudes"""
-        dataset, data_name = local.load_multiple_files(self.file_path, "value")
+        dataset = local.load_multiple_files(self.file_path, "value")
         self.assertItemsEqual(dataset[0].lons, self.longitudes)
 
     def test_function_load_multiple_files_times(self):
         """To test load_multiple_files function for times"""
-        dataset, data_name = local.load_multiple_files(self.file_path, "value")
+        dataset = local.load_multiple_files(self.file_path, "value")
 
         newTimes = datetime.datetime(2001, 01, 01), datetime.datetime(
             2001, 02, 01), datetime.datetime(2001, 03, 01)
@@ -136,19 +136,19 @@ class TestLoadMultipleFiles(unittest.TestCase):
     def test_function_load_multiple_files_values(self):
         """To test load_multiple_files function for values"""
         new_values = self.values[:, 0, :, :]
-        dataset, data_name = local.load_multiple_files(
+        dataset = local.load_multiple_files(
             self.file_path, "value")
         self.assertTrue(numpy.allclose(dataset[0].values, new_values))
 
     def test_load_multiple_files_custom_dataset_name(self):
         """Test adding a custom name to a dataset"""
-        dataset, data_name = local.load_multiple_files(self.file_path,
-                                                       "value",
-                                                       dataset_name='foo')
+        dataset = local.load_multiple_files(self.file_path,
+                                                   "value",
+                                                   dataset_name='foo')
         self.assertEqual(dataset[0].name, 'foo')
 
     def test_dataset_origin(self):
-        dataset, data_name = local.load_multiple_files(self.file_path, 'value')
+        dataset = local.load_multiple_files(self.file_path, 'value')
         expected_keys = set(['source', 'path', 'lat_name', 'lon_name',
                              'time_name'])
         self.assertEqual(set(dataset[0].origin.keys()), expected_keys)


Mime
View raw message