superset-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From maximebeauche...@apache.org
Subject [incubator-superset] branch master updated: [flake8] Resolve E1?? errors (#3805)
Date Fri, 10 Nov 2017 20:06:24 GMT
This is an automated email from the ASF dual-hosted git repository.

maximebeauchemin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-superset.git


The following commit(s) were added to refs/heads/master by this push:
     new 690de86  [flake8] Resolve E1?? errors (#3805)
690de86 is described below

commit 690de862e8e2ee7904aff0b77ebf631e2df0c4ca
Author: John Bodley <4567245+john-bodley@users.noreply.github.com>
AuthorDate: Fri Nov 10 12:06:22 2017 -0800

    [flake8] Resolve E1?? errors (#3805)
---
 superset/cli.py                           | 12 ++--
 superset/connectors/base/models.py        |  6 +-
 superset/connectors/connector_registry.py |  2 +-
 superset/connectors/druid/models.py       | 20 ++++---
 superset/db_engine_specs.py               |  8 ++-
 superset/models/helpers.py                |  2 +-
 superset/security.py                      |  6 +-
 superset/sql_lab.py                       |  3 +-
 superset/stats_logger.py                  |  5 +-
 superset/viz.py                           |  8 +--
 tests/access_tests.py                     | 10 ++--
 tests/base_tests.py                       | 20 +++----
 tests/core_tests.py                       | 11 ++--
 tests/druid_tests.py                      | 97 +++++++++++++++++--------------
 tests/email_tests.py                      |  8 +--
 tests/utils_tests.py                      |  6 +-
 tests/viz_tests.py                        |  3 +-
 tox.ini                                   | 10 ----
 18 files changed, 127 insertions(+), 110 deletions(-)

diff --git a/superset/cli.py b/superset/cli.py
index dbf7b9f..37a0f63 100755
--- a/superset/cli.py
+++ b/superset/cli.py
@@ -141,13 +141,17 @@ def load_examples(load_test_data):
 @manager.option(
     '-d', '--datasource',
     help=(
-            "Specify which datasource name to load, if omitted, all "
-            "datasources will be refreshed"))
+        "Specify which datasource name to load, if omitted, all "
+        "datasources will be refreshed"
+    ),
+)
 @manager.option(
     '-m', '--merge',
     help=(
-            "Specify using 'merge' property during operation. "
-            "Default value is False "))
+        "Specify using 'merge' property during operation. "
+        "Default value is False "
+    ),
+)
 def refresh_druid(datasource, merge):
     """Refresh druid datasources"""
     session = db.session()
diff --git a/superset/connectors/base/models.py b/superset/connectors/base/models.py
index 3808391..0a03366 100644
--- a/superset/connectors/base/models.py
+++ b/superset/connectors/base/models.py
@@ -47,8 +47,10 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
         return relationship(
             'Slice',
             primaryjoin=lambda: and_(
-              foreign(Slice.datasource_id) == self.id,
-              foreign(Slice.datasource_type) == self.type))
+                foreign(Slice.datasource_id) == self.id,
+                foreign(Slice.datasource_type) == self.type,
+            ),
+        )
 
     # placeholder for a relationship to a derivative of BaseColumn
     columns = []
diff --git a/superset/connectors/connector_registry.py b/superset/connectors/connector_registry.py
index 5924797..ffcf5ad 100644
--- a/superset/connectors/connector_registry.py
+++ b/superset/connectors/connector_registry.py
@@ -72,4 +72,4 @@ class ConnectorRegistry(object):
             cls, session, database, datasource_name, schema=None):
         datasource_class = ConnectorRegistry.sources[database.type]
         return datasource_class.query_datasources_by_name(
-                session, database, datasource_name, schema=None)
+            session, database, datasource_name, schema=None)
diff --git a/superset/connectors/druid/models.py b/superset/connectors/druid/models.py
index 3415d92..afbfb26 100644
--- a/superset/connectors/druid/models.py
+++ b/superset/connectors/druid/models.py
@@ -1,4 +1,4 @@
- # pylint: disable=invalid-unary-operand-type
+# pylint: disable=invalid-unary-operand-type
 from collections import OrderedDict
 from copy import deepcopy
 from datetime import datetime, timedelta
@@ -919,8 +919,8 @@ class DruidDatasource(Model, BaseDatasource):
         columns_dict = {c.column_name: c for c in self.columns}
 
         all_metrics, post_aggs = self._metrics_and_post_aggs(
-                                      metrics,
-                                      metrics_dict)
+            metrics,
+            metrics_dict)
 
         aggregations = OrderedDict()
         for m in self.metrics:
@@ -996,16 +996,16 @@ class DruidDatasource(Model, BaseDatasource):
             client.topn(**pre_qry)
             query_str += "// Two phase query\n// Phase 1\n"
             query_str += json.dumps(
-              client.query_builder.last_query.query_dict, indent=2)
+                client.query_builder.last_query.query_dict, indent=2)
             query_str += "\n"
             if phase == 1:
                 return query_str
             query_str += (
-              "// Phase 2 (built based on phase one's results)\n")
+                "// Phase 2 (built based on phase one's results)\n")
             df = client.export_pandas()
             qry['filter'] = self._add_filter_from_pre_query_data(
-                                df,
-                                qry['dimensions'], filters)
+                df,
+                qry['dimensions'], filters)
             qry['threshold'] = timeseries_limit or 1000
             if row_limit and granularity == 'all':
                 qry['threshold'] = row_limit
@@ -1046,8 +1046,10 @@ class DruidDatasource(Model, BaseDatasource):
                     "// Phase 2 (built based on phase one's results)\n")
                 df = client.export_pandas()
                 qry['filter'] = self._add_filter_from_pre_query_data(
-                                    df,
-                                    qry['dimensions'], filters)
+                    df,
+                    qry['dimensions'],
+                    filters,
+                )
                 qry['limit_spec'] = None
             if row_limit:
                 qry['limit_spec'] = {
diff --git a/superset/db_engine_specs.py b/superset/db_engine_specs.py
index 2c2ad0b..421ca03 100644
--- a/superset/db_engine_specs.py
+++ b/superset/db_engine_specs.py
@@ -461,7 +461,9 @@ class PrestoEngineSpec(BaseEngineSpec):
         result_set_df = db.get_df(
             """SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S
                ORDER BY concat(table_schema, '.', table_name)""".format(
-                   datasource_type.upper()), None)
+                datasource_type.upper(),
+            ),
+            None)
         result_sets = defaultdict(list)
         for unused, row in result_set_df.iterrows():
             result_sets[row['table_schema']].append(row['table_name'])
@@ -879,8 +881,8 @@ class HiveEngineSpec(PrestoEngineSpec):
         backend_name = url.get_backend_name()
 
         # Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS
-        if backend_name == "hive" and "auth" in url.query.keys() and \
-                        impersonate_user is True and username is not None:
+        if (backend_name == "hive" and "auth" in url.query.keys() and
+                impersonate_user is True and username is not None):
             configuration["hive.server2.proxy.user"] = username
         return configuration
 
diff --git a/superset/models/helpers.py b/superset/models/helpers.py
index 6cc21fc..f179dbe 100644
--- a/superset/models/helpers.py
+++ b/superset/models/helpers.py
@@ -156,7 +156,7 @@ def merge_perm(sm, permission_name, view_menu_name, connection):
             .values(
                 permission_id=permission.id,
                 view_menu_id=view_menu.id,
-                ),
+            ),
         )
 
 
diff --git a/superset/security.py b/superset/security.py
index 8f96acb..ebb6246 100644
--- a/superset/security.py
+++ b/superset/security.py
@@ -107,8 +107,10 @@ def is_admin_only(pvm):
     if (pvm.view_menu.name in READ_ONLY_MODEL_VIEWS and
             pvm.permission.name not in READ_ONLY_PERMISSION):
         return True
-    return (pvm.view_menu.name in ADMIN_ONLY_VIEW_MENUS or
-        pvm.permission.name in ADMIN_ONLY_PERMISSIONS)
+    return (
+        pvm.view_menu.name in ADMIN_ONLY_VIEW_MENUS or
+        pvm.permission.name in ADMIN_ONLY_PERMISSIONS
+    )
 
 
 def is_alpha_only(pvm):
diff --git a/superset/sql_lab.py b/superset/sql_lab.py
index dffa85c..937f222 100644
--- a/superset/sql_lab.py
+++ b/superset/sql_lab.py
@@ -105,7 +105,8 @@ def get_sql_results(
 
 
 def execute_sql(
-    ctask, query_id, return_results=True, store_results=False, user_name=None):
+    ctask, query_id, return_results=True, store_results=False, user_name=None,
+):
     """Executes the sql query returns the results."""
     session = get_session(not ctask.request.called_directly)
 
diff --git a/superset/stats_logger.py b/superset/stats_logger.py
index 470f2e2..bbeadd7 100644
--- a/superset/stats_logger.py
+++ b/superset/stats_logger.py
@@ -33,8 +33,9 @@ class DummyStatsLogger(BaseStatsLogger):
             Fore.CYAN + "[stats_logger] (incr) " + key + Style.RESET_ALL)
 
     def decr(self, key):
-        logging.debug(Fore.CYAN + "[stats_logger] (decr) " + key +
-                     Style.RESET_ALL)
+        logging.debug((
+            Fore.CYAN + "[stats_logger] (decr) " + key +
+            Style.RESET_ALL))
 
     def gauge(self, key, value):
         logging.debug((
diff --git a/superset/viz.py b/superset/viz.py
index 4a5bc1f..ee6c2fc 100644
--- a/superset/viz.py
+++ b/superset/viz.py
@@ -1208,9 +1208,9 @@ class DistributionBarViz(DistributionPieViz):
         d = super(DistributionBarViz, self).query_obj()  # noqa
         fd = self.form_data
         if (
-                len(d['groupby']) <
-                len(fd.get('groupby') or []) + len(fd.get('columns') or [])
-                ):
+            len(d['groupby']) <
+            len(fd.get('groupby') or []) + len(fd.get('columns') or [])
+        ):
             raise Exception(
                 _("Can't have overlap between Series and Breakdowns"))
         if not fd.get('metrics'):
@@ -1523,7 +1523,7 @@ class IFrameViz(BaseViz):
     is_timeseries = False
 
     def get_df(self):
-       return None
+        return None
 
 
 class ParallelCoordinatesViz(BaseViz):
diff --git a/tests/access_tests.py b/tests/access_tests.py
index 8aa4c26..5a8e8ee 100644
--- a/tests/access_tests.py
+++ b/tests/access_tests.py
@@ -547,11 +547,11 @@ class RequestAccessTests(SupersetTestCase):
             '/superset/update_role/',
             data=json.dumps({
                 'users': [{
-                        'username': 'gamma',
-                        'first_name': 'Gamma',
-                        'last_name': 'Gamma',
-                        'email': 'gamma@superset.com',
-                    }],
+                    'username': 'gamma',
+                    'first_name': 'Gamma',
+                    'last_name': 'Gamma',
+                    'email': 'gamma@superset.com',
+                }],
                 'role_name': update_role_str,
             }),
             follow_redirects=True,
diff --git a/tests/base_tests.py b/tests/base_tests.py
index af94596..4ba5f81 100644
--- a/tests/base_tests.py
+++ b/tests/base_tests.py
@@ -28,9 +28,9 @@ class SupersetTestCase(unittest.TestCase):
 
     def __init__(self, *args, **kwargs):
         if (
-                        self.requires_examples and
-                        not os.environ.get('SOLO_TEST') and
-                        not os.environ.get('examples_loaded')
+            self.requires_examples and
+            not os.environ.get('SOLO_TEST') and
+            not os.environ.get('examples_loaded')
         ):
             logging.info("Loading examples")
             cli.load_examples(load_test_data=True)
@@ -133,8 +133,8 @@ class SupersetTestCase(unittest.TestCase):
     def get_slice(self, slice_name, session):
         slc = (
             session.query(models.Slice)
-                .filter_by(slice_name=slice_name)
-                .one()
+            .filter_by(slice_name=slice_name)
+            .one()
         )
         session.expunge_all()
         return slc
@@ -169,20 +169,20 @@ class SupersetTestCase(unittest.TestCase):
     def get_main_database(self, session):
         return (
             db.session.query(models.Database)
-                .filter_by(database_name='main')
-                .first()
+            .filter_by(database_name='main')
+            .first()
         )
 
     def get_access_requests(self, username, ds_type, ds_id):
         DAR = models.DatasourceAccessRequest
         return (
             db.session.query(DAR)
-                .filter(
+            .filter(
                 DAR.created_by == sm.find_user(username=username),
                 DAR.datasource_type == ds_type,
                 DAR.datasource_id == ds_id,
-                )
-                .first()
+            )
+            .first()
         )
 
     def logout(self):
diff --git a/tests/core_tests.py b/tests/core_tests.py
index 5236fdf..6c3bae1 100644
--- a/tests/core_tests.py
+++ b/tests/core_tests.py
@@ -365,7 +365,8 @@ class CoreTests(SupersetTestCase):
 
         resp = self.client.get('/kv/{}/'.format(kv.id))
         self.assertEqual(resp.status_code, 200)
-        self.assertEqual(json.loads(value),
+        self.assertEqual(
+            json.loads(value),
             json.loads(resp.data.decode('utf-8')))
 
         try:
@@ -436,8 +437,8 @@ class CoreTests(SupersetTestCase):
         self.login(username=username)
         dash = (
             db.session.query(models.Dashboard)
-                .filter_by(slug="births")
-                .first()
+            .filter_by(slug="births")
+            .first()
         )
         origin_title = dash.dashboard_title
         positions = []
@@ -459,8 +460,8 @@ class CoreTests(SupersetTestCase):
         self.get_resp(url, data=dict(data=json.dumps(data)))
         updatedDash = (
             db.session.query(models.Dashboard)
-                .filter_by(slug="births")
-                .first()
+            .filter_by(slug="births")
+            .first()
         )
         self.assertEqual(updatedDash.dashboard_title, 'new title')
         # # bring back dashboard original title
diff --git a/tests/druid_tests.py b/tests/druid_tests.py
index 7ac3829..09ecc87 100644
--- a/tests/druid_tests.py
+++ b/tests/druid_tests.py
@@ -21,49 +21,49 @@ class PickableMock(Mock):
         return (Mock, ())
 
 SEGMENT_METADATA = [{
-  "id": "some_id",
-  "intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
-  "columns": {
-    "__time": {
-        "type": "LONG", "hasMultipleValues": False,
-        "size": 407240380, "cardinality": None, "errorMessage": None},
-    "dim1": {
-        "type": "STRING", "hasMultipleValues": False,
-        "size": 100000, "cardinality": 1944, "errorMessage": None},
-    "dim2": {
-        "type": "STRING", "hasMultipleValues": True,
-        "size": 100000, "cardinality": 1504, "errorMessage": None},
-    "metric1": {
-        "type": "FLOAT", "hasMultipleValues": False,
-        "size": 100000, "cardinality": None, "errorMessage": None},
-  },
-  "aggregators": {
-    "metric1": {
-        "type": "longSum",
-        "name": "metric1",
-        "fieldName": "metric1"},
-  },
-  "size": 300000,
-  "numRows": 5000000,
+    "id": "some_id",
+    "intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
+    "columns": {
+        "__time": {
+            "type": "LONG", "hasMultipleValues": False,
+            "size": 407240380, "cardinality": None, "errorMessage": None},
+        "dim1": {
+            "type": "STRING", "hasMultipleValues": False,
+            "size": 100000, "cardinality": 1944, "errorMessage": None},
+        "dim2": {
+            "type": "STRING", "hasMultipleValues": True,
+            "size": 100000, "cardinality": 1504, "errorMessage": None},
+        "metric1": {
+            "type": "FLOAT", "hasMultipleValues": False,
+            "size": 100000, "cardinality": None, "errorMessage": None},
+    },
+    "aggregators": {
+        "metric1": {
+            "type": "longSum",
+            "name": "metric1",
+            "fieldName": "metric1"},
+    },
+    "size": 300000,
+    "numRows": 5000000,
 }]
 
 GB_RESULT_SET = [
-  {
-    "version": "v1",
-    "timestamp": "2012-01-01T00:00:00.000Z",
-    "event": {
-      "dim1": 'Canada',
-      "metric1": 12345678,
+    {
+        "version": "v1",
+        "timestamp": "2012-01-01T00:00:00.000Z",
+        "event": {
+            "dim1": 'Canada',
+            "metric1": 12345678,
+        },
     },
-  },
-  {
-    "version": "v1",
-    "timestamp": "2012-01-01T00:00:00.000Z",
-    "event": {
-      "dim1": 'USA',
-      "metric1": 12345678 / 2,
+    {
+        "version": "v1",
+        "timestamp": "2012-01-01T00:00:00.000Z",
+        "event": {
+            "dim1": 'USA',
+            "metric1": 12345678 / 2,
+        },
     },
-  },
 ]
 
 
@@ -337,26 +337,30 @@ class DruidTests(SupersetTestCase):
                 metric_name='unused_count',
                 verbose_name='COUNT(*)',
                 metric_type='count',
-                json=json.dumps({'type': 'count', 'name': 'unused_count'})),
+                json=json.dumps({'type': 'count', 'name': 'unused_count'}),
+            ),
             'some_sum': DruidMetric(
                 metric_name='some_sum',
                 verbose_name='SUM(*)',
                 metric_type='sum',
-                json=json.dumps({'type': 'sum', 'name': 'sum'})),
+                json=json.dumps({'type': 'sum', 'name': 'sum'}),
+            ),
             'a_histogram': DruidMetric(
                 metric_name='a_histogram',
                 verbose_name='APPROXIMATE_HISTOGRAM(*)',
                 metric_type='approxHistogramFold',
                 json=json.dumps(
-                    {'type': 'approxHistogramFold', 'name': 'a_histogram'}),
+                    {'type': 'approxHistogramFold', 'name': 'a_histogram'},
                 ),
+            ),
             'aCustomMetric': DruidMetric(
                 metric_name='aCustomMetric',
                 verbose_name='MY_AWESOME_METRIC(*)',
                 metric_type='aCustomType',
                 json=json.dumps(
-                    {'type': 'customMetric', 'name': 'aCustomMetric'}),
+                    {'type': 'customMetric', 'name': 'aCustomMetric'},
                 ),
+            ),
             'quantile_p95': DruidMetric(
                 metric_name='quantile_p95',
                 verbose_name='P95(*)',
@@ -365,7 +369,9 @@ class DruidTests(SupersetTestCase):
                     'type': 'quantile',
                     'probability': 0.95,
                     'name': 'p95',
-                    'fieldName': 'a_histogram'})),
+                    'fieldName': 'a_histogram',
+                }),
+            ),
             'aCustomPostAgg': DruidMetric(
                 metric_name='aCustomPostAgg',
                 verbose_name='CUSTOM_POST_AGG(*)',
@@ -375,7 +381,10 @@ class DruidTests(SupersetTestCase):
                     'name': 'aCustomPostAgg',
                     'field': {
                         'type': 'fieldAccess',
-                        'fieldName': 'aCustomMetric'}})),
+                        'fieldName': 'aCustomMetric',
+                    },
+                }),
+            ),
         }
 
         metrics = ['some_sum']
diff --git a/tests/email_tests.py b/tests/email_tests.py
index bdb56ca..8213a6f 100644
--- a/tests/email_tests.py
+++ b/tests/email_tests.py
@@ -89,8 +89,8 @@ class EmailSmtpTest(unittest.TestCase):
             'from', 'to', MIMEMultipart(), app.config, dryrun=False)
         assert not mock_smtp.called
         mock_smtp_ssl.assert_called_with(
-             app.config.get('SMTP_HOST'),
-             app.config.get('SMTP_PORT'),
+            app.config.get('SMTP_HOST'),
+            app.config.get('SMTP_PORT'),
         )
 
     @mock.patch('smtplib.SMTP_SSL')
@@ -104,8 +104,8 @@ class EmailSmtpTest(unittest.TestCase):
             'from', 'to', MIMEMultipart(), app.config, dryrun=False)
         assert not mock_smtp_ssl.called
         mock_smtp.assert_called_with(
-             app.config.get('SMTP_HOST'),
-             app.config.get('SMTP_PORT'),
+            app.config.get('SMTP_HOST'),
+            app.config.get('SMTP_PORT'),
         )
         assert not mock_smtp.login.called
 
diff --git a/tests/utils_tests.py b/tests/utils_tests.py
index 0ed1b71..22623aa 100644
--- a/tests/utils_tests.py
+++ b/tests/utils_tests.py
@@ -110,8 +110,10 @@ class UtilsTestCase(unittest.TestCase):
         self.assertEquals(form_data, expected)
 
     def test_datetime_f(self):
-        self.assertEquals(datetime_f(datetime(1990, 9, 21, 19, 11, 19, 626096)),
-            '<nobr>1990-09-21T19:11:19.626096</nobr>')
+        self.assertEquals(
+            datetime_f(datetime(1990, 9, 21, 19, 11, 19, 626096)),
+            '<nobr>1990-09-21T19:11:19.626096</nobr>',
+        )
         self.assertEquals(len(datetime_f(datetime.now())), 28)
         self.assertEquals(datetime_f(None), '<nobr>None</nobr>')
         iso = datetime.now().isoformat()[:10].split('-')
diff --git a/tests/viz_tests.py b/tests/viz_tests.py
index f21434d..06096e9 100644
--- a/tests/viz_tests.py
+++ b/tests/viz_tests.py
@@ -545,7 +545,8 @@ class PartitionVizTestCase(unittest.TestCase):
         self.assertEqual(3, len(nest[0]['children']))
         self.assertEqual(3, len(nest[0]['children'][0]['children']))
         self.assertEqual(1, len(nest[0]['children'][0]['children'][0]['children']))
-        self.assertEqual(1,
+        self.assertEqual(
+            1,
             len(nest[0]['children']
                 [0]['children']
                 [0]['children']
diff --git a/tox.ini b/tox.ini
index a8b9050..ffdce84 100644
--- a/tox.ini
+++ b/tox.ini
@@ -17,16 +17,6 @@ exclude =
     superset/migrations
     superset/templates
 ignore =
-    E111
-    E114
-    E116
-    E121
-    E123
-    E125
-    E126
-    E127
-    E128
-    E131
     E302
     E303
     E305

-- 
To stop receiving notification emails like this one, please contact
['"commits@superset.apache.org" <commits@superset.apache.org>'].

Mime
View raw message