From commits-return-2060-archive-asf-public=cust-asf.ponee.io@superset.incubator.apache.org Fri Jan 11 03:22:18 2019 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id C61CA180778 for ; Fri, 11 Jan 2019 03:22:16 +0100 (CET) Received: (qmail 6422 invoked by uid 500); 11 Jan 2019 02:22:07 -0000 Mailing-List: contact commits-help@superset.incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@superset.incubator.apache.org Delivered-To: mailing list commits@superset.incubator.apache.org Received: (qmail 6352 invoked by uid 99); 11 Jan 2019 02:22:07 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 11 Jan 2019 02:22:07 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 73C7187045; Fri, 11 Jan 2019 02:22:07 +0000 (UTC) Date: Fri, 11 Jan 2019 02:22:40 +0000 To: "commits@superset.apache.org" Subject: [incubator-superset] 36/43: fixed more tests MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit From: johnbodley@apache.org In-Reply-To: <154717332450.605.1618987592169557933@gitbox.apache.org> References: <154717332450.605.1618987592169557933@gitbox.apache.org> X-Git-Host: gitbox.apache.org X-Git-Repo: incubator-superset X-Git-Refname: refs/heads/feature--embeddable-charts-pilot X-Git-Reftype: branch X-Git-Rev: 7f1ad570fbeecaa5d49a49bb506a847a3424337d X-Git-NotificationType: diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated Message-Id: <20190111022207.73C7187045@gitbox.apache.org> This is an automated email from the ASF dual-hosted git repository. johnbodley pushed a commit to branch feature--embeddable-charts-pilot in repository https://gitbox.apache.org/repos/asf/incubator-superset.git commit 7f1ad570fbeecaa5d49a49bb506a847a3424337d Author: Conglei Shi AuthorDate: Mon Dec 17 11:54:28 2018 -0800 fixed more tests --- tests/druid_func_tests.py | 85 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 20 deletions(-) diff --git a/tests/druid_func_tests.py b/tests/druid_func_tests.py index 9be24d9..79f52d8 100644 --- a/tests/druid_func_tests.py +++ b/tests/druid_func_tests.py @@ -255,7 +255,10 @@ class DruidFuncTestCase(unittest.TestCase): post_aggs = ['some_agg'] ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs)) groupby = [] - metrics = ['metric1'] + metrics = [{ + 'label': 'metric1', + 'expressionType': 'BUILTIN', + }] ds.get_having_filters = Mock(return_value=[]) client.query_builder = Mock() client.query_builder.last_query = Mock() @@ -341,7 +344,10 @@ class DruidFuncTestCase(unittest.TestCase): post_aggs = ['some_agg'] ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs)) groupby = ['col1'] - metrics = ['metric1'] + metrics = [{ + 'label': 'metric1', + 'expressionType': 'BUILTIN', + }] ds.get_having_filters = Mock(return_value=[]) client.query_builder.last_query.query_dict = {'mock': 0} # client.topn is called twice @@ -415,7 +421,10 @@ class DruidFuncTestCase(unittest.TestCase): post_aggs = ['some_agg'] ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs)) groupby = ['col1', 'col2'] - metrics = ['metric1'] + metrics = [{ + 'label': 'metric1', + 'expressionType': 'BUILTIN', + }] ds.get_having_filters = Mock(return_value=[]) client.query_builder = Mock() client.query_builder.last_query = Mock() @@ -692,11 +701,17 @@ class DruidFuncTestCase(unittest.TestCase): 'label': 'My Adhoc Metric', } - metrics = ['some_sum'] + some_sum = { + 'label': 'some_sum', + 'expressionType': 'BUILTIN', + } + + metrics = [some_sum] + saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs( metrics, metrics_dict) - assert set(saved_metrics.keys()) == {'some_sum'} + assert set(saved_metrics.keys()) == {some_sum['label']} assert post_aggs == {} metrics = [adhoc_metric] @@ -706,26 +721,36 @@ class DruidFuncTestCase(unittest.TestCase): assert set(saved_metrics.keys()) == set([adhoc_metric['label']]) assert post_aggs == {} - metrics = ['some_sum', adhoc_metric] + metrics = [some_sum, adhoc_metric] saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs( metrics, metrics_dict) - assert set(saved_metrics.keys()) == {'some_sum', adhoc_metric['label']} + assert set(saved_metrics.keys()) == {some_sum['label'], adhoc_metric['label']} assert post_aggs == {} - metrics = ['quantile_p95'] + quantile_p95 = { + 'label': 'quantile_p95', + 'expressionType': 'BUILTIN', + } + + metrics = [quantile_p95] saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs( metrics, metrics_dict) - result_postaggs = set(['quantile_p95']) + result_postaggs = set([quantile_p95['label']]) assert set(saved_metrics.keys()) == {'a_histogram'} assert set(post_aggs.keys()) == result_postaggs - metrics = ['aCustomPostAgg'] + aCustomPostAgg = { + 'label': 'aCustomPostAgg', + 'expressionType': 'BUILTIN', + } + metrics = [aCustomPostAgg] + saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs( metrics, metrics_dict) - result_postaggs = set(['aCustomPostAgg']) + result_postaggs = set([aCustomPostAgg['label']]) assert set(saved_metrics.keys()) == {'aCustomMetric'} assert set(post_aggs.keys()) == result_postaggs @@ -808,31 +833,43 @@ class DruidFuncTestCase(unittest.TestCase): ds.metrics = list(metrics_dict.values()) groupby = ['dim1'] - metrics = ['count1'] + metrics = [{ + 'label': 'count1', + 'expressionType': 'BUILTIN', + }] + + timeseries_limit_metric = { + 'label': 'sum1', + 'expressionType': 'BUILTIN', + } granularity = 'all' # get the counts of the top 5 'dim1's, order by 'sum1' ds.run_query( groupby, metrics, granularity, from_dttm, to_dttm, - timeseries_limit=5, timeseries_limit_metric='sum1', + timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric, client=client, order_desc=True, filter=[], ) qry_obj = client.topn.call_args_list[0][1] self.assertEqual('dim1', qry_obj['dimension']) - self.assertEqual('sum1', qry_obj['metric']) + self.assertEqual('sum1', qry_obj['metric']['label']) aggregations = qry_obj['aggregations'] post_aggregations = qry_obj['post_aggregations'] self.assertEqual({'count1', 'sum1'}, set(aggregations.keys())) self.assertEqual(set(), set(post_aggregations.keys())) # get the counts of the top 5 'dim1's, order by 'div1' + timeseries_limit_metric = { + 'label': 'div1', + 'expressionType': 'BUILTIN', + } ds.run_query( groupby, metrics, granularity, from_dttm, to_dttm, - timeseries_limit=5, timeseries_limit_metric='div1', + timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric, client=client, order_desc=True, filter=[], ) qry_obj = client.topn.call_args_list[1][1] self.assertEqual('dim1', qry_obj['dimension']) - self.assertEqual('div1', qry_obj['metric']) + self.assertEqual('div1', qry_obj['metric']['label']) aggregations = qry_obj['aggregations'] post_aggregations = qry_obj['post_aggregations'] self.assertEqual({'count1', 'sum1', 'sum2'}, set(aggregations.keys())) @@ -840,28 +877,36 @@ class DruidFuncTestCase(unittest.TestCase): groupby = ['dim1', 'dim2'] # get the counts of the top 5 ['dim1', 'dim2']s, order by 'sum1' + timeseries_limit_metric = { + 'label': 'sum1', + 'expressionType': 'BUILTIN', + } ds.run_query( groupby, metrics, granularity, from_dttm, to_dttm, - timeseries_limit=5, timeseries_limit_metric='sum1', + timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric, client=client, order_desc=True, filter=[], ) qry_obj = client.groupby.call_args_list[0][1] self.assertEqual({'dim1', 'dim2'}, set(qry_obj['dimensions'])) - self.assertEqual('sum1', qry_obj['limit_spec']['columns'][0]['dimension']) + self.assertEqual('sum1', qry_obj['limit_spec']['columns'][0]['dimension']['label']) aggregations = qry_obj['aggregations'] post_aggregations = qry_obj['post_aggregations'] self.assertEqual({'count1', 'sum1'}, set(aggregations.keys())) self.assertEqual(set(), set(post_aggregations.keys())) # get the counts of the top 5 ['dim1', 'dim2']s, order by 'div1' + timeseries_limit_metric = { + 'label': 'div1', + 'expressionType': 'BUILTIN', + } ds.run_query( groupby, metrics, granularity, from_dttm, to_dttm, - timeseries_limit=5, timeseries_limit_metric='div1', + timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric, client=client, order_desc=True, filter=[], ) qry_obj = client.groupby.call_args_list[1][1] self.assertEqual({'dim1', 'dim2'}, set(qry_obj['dimensions'])) - self.assertEqual('div1', qry_obj['limit_spec']['columns'][0]['dimension']) + self.assertEqual('div1', qry_obj['limit_spec']['columns'][0]['dimension']['label']) aggregations = qry_obj['aggregations'] post_aggregations = qry_obj['post_aggregations'] self.assertEqual({'count1', 'sum1', 'sum2'}, set(aggregations.keys()))