arrow-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject [arrow] branch master updated: ARROW-1711: [Python] Fix flake8 calls to lint the right directories
Date Tue, 24 Oct 2017 16:57:28 GMT
This is an automated email from the ASF dual-hosted git repository.

wesm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/master by this push:
     new b08f7e3  ARROW-1711: [Python] Fix flake8 calls to lint the right directories
b08f7e3 is described below

commit b08f7e326b8fc1a000a277eef3bc84a0106d6c3c
Author: Wes McKinney <wes.mckinney@twosigma.com>
AuthorDate: Tue Oct 24 12:57:23 2017 -0400

    ARROW-1711: [Python] Fix flake8 calls to lint the right directories
    
    This got messed up during one of the patches in which these files were refactored. Once
the build fails, I will fix the lint errors
    
    Author: Wes McKinney <wes.mckinney@twosigma.com>
    
    Closes #1242 from wesm/ARROW-1711 and squashes the following commits:
    
    cd4b655d [Wes McKinney] Fix more flake8 warnings
    2eb8bf49 [Wes McKinney] Fix flake8 issues
    cef7a7c9 [Wes McKinney] Fix flake8 calls to lint the right directories
---
 ci/travis_lint.sh                          |  6 ++---
 python/pyarrow/array.pxi                   |  2 +-
 python/pyarrow/feather.py                  |  2 +-
 python/pyarrow/io-hdfs.pxi                 |  4 +--
 python/pyarrow/parquet.py                  |  2 +-
 python/pyarrow/serialization.py            | 16 +++---------
 python/pyarrow/tests/test_feather.py       |  2 +-
 python/pyarrow/tests/test_serialization.py | 41 ++++++++++++++++--------------
 8 files changed, 34 insertions(+), 41 deletions(-)

diff --git a/ci/travis_lint.sh b/ci/travis_lint.sh
index 9b7b474..8c95664 100755
--- a/ci/travis_lint.sh
+++ b/ci/travis_lint.sh
@@ -31,10 +31,10 @@ popd
 # Fail fast on style checks
 sudo pip install flake8
 
-PYARROW_DIR=$TRAVIS_BUILD_DIR/python
+PYARROW_DIR=$TRAVIS_BUILD_DIR/python/pyarrow
 
-flake8 --count $PYTHON_DIR/pyarrow
+flake8 --count $PYARROW_DIR
 
 # Check Cython files with some checks turned off
 flake8 --count --config=$PYTHON_DIR/.flake8.cython \
-       $PYTHON_DIR/pyarrow
+       $PYARROW_DIR
diff --git a/python/pyarrow/array.pxi b/python/pyarrow/array.pxi
index c5f28a9..c596d2a 100644
--- a/python/pyarrow/array.pxi
+++ b/python/pyarrow/array.pxi
@@ -47,7 +47,7 @@ cdef _is_array_like(obj):
     try:
         import pandas
         return isinstance(obj, (np.ndarray, pd.Series, pd.Index, Categorical))
-    except:
+    except ImportError:
         return isinstance(obj, np.ndarray)
 
 
diff --git a/python/pyarrow/feather.py b/python/pyarrow/feather.py
index aba76a0..2091c91 100644
--- a/python/pyarrow/feather.py
+++ b/python/pyarrow/feather.py
@@ -116,7 +116,7 @@ def write_feather(df, dest):
     writer = FeatherWriter(dest)
     try:
         writer.write(df)
-    except:
+    except Exception:
         # Try to make sure the resource is closed
         import gc
         writer = None
diff --git a/python/pyarrow/io-hdfs.pxi b/python/pyarrow/io-hdfs.pxi
index e6285e4..e653813 100644
--- a/python/pyarrow/io-hdfs.pxi
+++ b/python/pyarrow/io-hdfs.pxi
@@ -32,7 +32,7 @@ def have_libhdfs():
         with nogil:
             check_status(HaveLibHdfs())
         return True
-    except:
+    except Exception:
         return False
 
 
@@ -41,7 +41,7 @@ def have_libhdfs3():
         with nogil:
             check_status(HaveLibHdfs3())
         return True
-    except:
+    except Exception:
         return False
 
 
diff --git a/python/pyarrow/parquet.py b/python/pyarrow/parquet.py
index b6a7b12..0a40f5f 100644
--- a/python/pyarrow/parquet.py
+++ b/python/pyarrow/parquet.py
@@ -915,7 +915,7 @@ def write_table(table, where, row_group_size=None, version='1.0',
             use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
             **kwargs)
         writer.write_table(table, row_group_size=row_group_size)
-    except:
+    except Exception:
         if writer is not None:
             writer.close()
         if isinstance(where, six.string_types):
diff --git a/python/pyarrow/serialization.py b/python/pyarrow/serialization.py
index eed6aae..9dc8ee6 100644
--- a/python/pyarrow/serialization.py
+++ b/python/pyarrow/serialization.py
@@ -20,10 +20,10 @@ import sys
 
 import numpy as np
 
-import pyarrow as pa
 from pyarrow import serialize_pandas, deserialize_pandas
 from pyarrow.lib import _default_serialization_context
 
+
 def register_default_serialization_handlers(serialization_context):
 
     # ----------------------------------------------------------------------
@@ -43,58 +43,48 @@ def register_default_serialization_handlers(serialization_context):
             custom_serializer=lambda obj: str(obj),
             custom_deserializer=lambda data: long(data))  # noqa: F821
 
-
     def _serialize_ordered_dict(obj):
         return list(obj.keys()), list(obj.values())
 
-
     def _deserialize_ordered_dict(data):
         return OrderedDict(zip(data[0], data[1]))
 
-
     serialization_context.register_type(
         OrderedDict, "OrderedDict",
         custom_serializer=_serialize_ordered_dict,
         custom_deserializer=_deserialize_ordered_dict)
 
-
     def _serialize_default_dict(obj):
         return list(obj.keys()), list(obj.values()), obj.default_factory
 
-
     def _deserialize_default_dict(data):
         return defaultdict(data[2], zip(data[0], data[1]))
 
-
     serialization_context.register_type(
         defaultdict, "defaultdict",
         custom_serializer=_serialize_default_dict,
         custom_deserializer=_deserialize_default_dict)
 
-
     serialization_context.register_type(
         type(lambda: 0), "function",
         pickle=True)
 
     # ----------------------------------------------------------------------
     # Set up serialization for numpy with dtype object (primitive types are
-    # handled efficiently with Arrow's Tensor facilities, see python_to_arrow.cc)
-
+    # handled efficiently with Arrow's Tensor facilities, see
+    # python_to_arrow.cc)
 
     def _serialize_numpy_array(obj):
         return obj.tolist(), obj.dtype.str
 
-
     def _deserialize_numpy_array(data):
         return np.array(data[0], dtype=np.dtype(data[1]))
 
-
     serialization_context.register_type(
         np.ndarray, 'np.array',
         custom_serializer=_serialize_numpy_array,
         custom_deserializer=_deserialize_numpy_array)
 
-
     # ----------------------------------------------------------------------
     # Set up serialization for pandas Series and DataFrame
 
diff --git a/python/pyarrow/tests/test_feather.py b/python/pyarrow/tests/test_feather.py
index 76f0844..810ee3c 100644
--- a/python/pyarrow/tests/test_feather.py
+++ b/python/pyarrow/tests/test_feather.py
@@ -289,7 +289,7 @@ class TestFeatherReader(unittest.TestCase):
         path = random_path()
         try:
             write_feather(df, path)
-        except:
+        except Exception:
             pass
 
         assert not os.path.exists(path)
diff --git a/python/pyarrow/tests/test_serialization.py b/python/pyarrow/tests/test_serialization.py
index 67798ac..7878a09 100644
--- a/python/pyarrow/tests/test_serialization.py
+++ b/python/pyarrow/tests/test_serialization.py
@@ -62,7 +62,7 @@ def assert_equal(obj1, obj2):
             # Workaround to make comparison of OrderedDicts work on Python 2.7
             if obj1 == obj2:
                 return
-        except:
+        except Exception:
             pass
         if obj1.__dict__ == {}:
             print("WARNING: Empty dict in ", obj1)
@@ -300,6 +300,7 @@ def test_datetime_serialization(large_memory_map):
         for d in data:
             serialization_roundtrip(d, mmap)
 
+
 def test_torch_serialization(large_memory_map):
     pytest.importorskip("torch")
     import torch
@@ -311,6 +312,7 @@ def test_torch_serialization(large_memory_map):
             obj = torch.from_numpy(np.random.randn(1000).astype(t))
             serialization_roundtrip(obj, mmap)
 
+
 def test_numpy_immutable(large_memory_map):
     with pa.memory_map(large_memory_map, mode="r+") as mmap:
         obj = np.zeros([10])
@@ -342,6 +344,7 @@ def test_serialization_callback_numpy():
 
     pa.serialize(DummyClass())
 
+
 def test_buffer_serialization():
 
     class BufferClass(object):
@@ -371,24 +374,24 @@ def test_arrow_limits(self):
         # Test that objects that are too large for Arrow throw a Python
         # exception. These tests give out of memory errors on Travis and need
         # to be run on a machine with lots of RAM.
-        l = 2 ** 29 * [1.0]
-        serialization_roundtrip(l, mmap)
-        del l
-        l = 2 ** 29 * ["s"]
-        serialization_roundtrip(l, mmap)
-        del l
-        l = 2 ** 29 * [["1"], 2, 3, [{"s": 4}]]
-        serialization_roundtrip(l, mmap)
-        del l
-        l = 2 ** 29 * [{"s": 1}] + 2 ** 29 * [1.0]
-        serialization_roundtrip(l, mmap)
-        del l
-        l = np.zeros(2 ** 25)
-        serialization_roundtrip(l, mmap)
-        del l
-        l = [np.zeros(2 ** 18) for _ in range(2 ** 7)]
-        serialization_roundtrip(l, mmap)
-        del l
+        x = 2 ** 29 * [1.0]
+        serialization_roundtrip(x, mmap)
+        del x
+        x = 2 ** 29 * ["s"]
+        serialization_roundtrip(x, mmap)
+        del x
+        x = 2 ** 29 * [["1"], 2, 3, [{"s": 4}]]
+        serialization_roundtrip(x, mmap)
+        del x
+        x = 2 ** 29 * [{"s": 1}] + 2 ** 29 * [1.0]
+        serialization_roundtrip(x, mmap)
+        del x
+        x = np.zeros(2 ** 25)
+        serialization_roundtrip(x, mmap)
+        del x
+        x = [np.zeros(2 ** 18) for _ in range(2 ** 7)]
+        serialization_roundtrip(x, mmap)
+        del x
 
 
 def test_serialization_callback_error():

-- 
To stop receiving notification emails like this one, please contact
['"commits@arrow.apache.org" <commits@arrow.apache.org>'].

Mime
View raw message