zookeeper-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ph...@apache.org
Subject [06/15] zookeeper git commit: fix pylint warnings.
Date Wed, 20 Dec 2017 21:30:30 GMT
fix pylint warnings.


Project: http://git-wip-us.apache.org/repos/asf/zookeeper/repo
Commit: http://git-wip-us.apache.org/repos/asf/zookeeper/commit/3f6d6771
Tree: http://git-wip-us.apache.org/repos/asf/zookeeper/tree/3f6d6771
Diff: http://git-wip-us.apache.org/repos/asf/zookeeper/diff/3f6d6771

Branch: refs/heads/jenkins-tools
Commit: 3f6d6771880354d97d6c277c2dea56143c3ee1bf
Parents: 687c14a
Author: Michael Han <hanm@apache.org>
Authored: Tue Apr 11 13:49:54 2017 -0700
Committer: Michael Han <hanm@apache.org>
Committed: Tue Apr 11 13:49:54 2017 -0700

----------------------------------------------------------------------
 zk-test-report/.idea/workspace.xml     |  69 ++++---
 zk-test-report/report.html             |   6 +-
 zk-test-report/zk-unittest-reporter.py | 259 --------------------------
 zk-test-report/zk_test_analyzer.py     | 272 ++++++++++++++++++++++++++++
 4 files changed, 307 insertions(+), 299 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/zookeeper/blob/3f6d6771/zk-test-report/.idea/workspace.xml
----------------------------------------------------------------------
diff --git a/zk-test-report/.idea/workspace.xml b/zk-test-report/.idea/workspace.xml
index 4f22ea0..54fe45f 100644
--- a/zk-test-report/.idea/workspace.xml
+++ b/zk-test-report/.idea/workspace.xml
@@ -21,18 +21,6 @@
   </component>
   <component name="FileEditorManager">
     <leaf>
-      <file leaf-file-name="zk-unittest-reporter.py" pinned="false" current-in-tab="true">
-        <entry file="file://$PROJECT_DIR$/zk-unittest-reporter.py">
-          <provider selected="true" editor-type-id="text-editor">
-            <state vertical-scroll-proportion="0.6998951">
-              <caret line="23" column="3" selection-start-line="23" selection-start-column="3"
selection-end-line="23" selection-end-column="3" />
-              <folding>
-                <element signature="e#1207#1222#0" expanded="true" />
-              </folding>
-            </state>
-          </provider>
-        </entry>
-      </file>
       <file leaf-file-name="pydevd.py" pinned="false" current-in-tab="false">
         <entry file="file://$APPLICATION_HOME_DIR$/helpers/pydev/pydevd.py">
           <provider selected="true" editor-type-id="text-editor">
@@ -43,11 +31,23 @@
           </provider>
         </entry>
       </file>
+      <file leaf-file-name="zk_test_analyzer.py" pinned="false" current-in-tab="true">
+        <entry file="file://$PROJECT_DIR$/zk_test_analyzer.py">
+          <provider selected="true" editor-type-id="text-editor">
+            <state vertical-scroll-proportion="0.29591838">
+              <caret line="10" column="48" selection-start-line="10" selection-start-column="48"
selection-end-line="10" selection-end-column="48" />
+              <folding>
+                <element signature="e#1040#1088#0" expanded="true" />
+              </folding>
+            </state>
+          </provider>
+        </entry>
+      </file>
       <file leaf-file-name="report_template.html" pinned="false" current-in-tab="false">
         <entry file="file://$PROJECT_DIR$/report_template.html">
           <provider selected="true" editor-type-id="text-editor">
-            <state vertical-scroll-proportion="45.38889">
-              <caret line="43" column="3" selection-start-line="43" selection-start-column="3"
selection-end-line="43" selection-end-column="3" />
+            <state vertical-scroll-proportion="-17.722221">
+              <caret line="22" column="15" selection-start-line="22" selection-start-column="15"
selection-end-line="22" selection-end-column="15" />
               <folding>
                 <element signature="n#style#0;n#span#2;n#body#0;n#html#0;n#!!top" expanded="true"
/>
                 <element signature="n#style#0;n#span#3;n#body#0;n#html#0;n#!!top" expanded="true"
/>
@@ -72,6 +72,7 @@
     <option name="CHANGED_PATHS">
       <list>
         <option value="$PROJECT_DIR$/zk-unittest-reporter.py" />
+        <option value="$PROJECT_DIR$/zk_test_analyzer.py" />
       </list>
     </option>
   </component>
@@ -132,7 +133,7 @@
   <component name="PropertiesComponent">
     <property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable"
/>
     <property name="settings.editor.splitter.proportion" value="0.2" />
-    <property name="last_opened_file_path" value="$PROJECT_DIR$/zk-unittest-reporter.py"
/>
+    <property name="last_opened_file_path" value="$PROJECT_DIR$/zk_test_analyzer.py" />
   </component>
   <component name="PyConsoleOptionsProvider">
     <option name="myPythonConsoleState">
@@ -276,8 +277,8 @@
       <option name="ADD_CONTENT_ROOTS" value="true" />
       <option name="ADD_SOURCE_ROOTS" value="true" />
       <module name="zk-test-report" />
-      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/zk-unittest-reporter.py" />
-      <option name="PARAMETERS" value="--urls https://builds.apache.org/job/ZooKeeper-trunk-openjdk7/
--urls=https://builds.apache.org/job/ZooKeeper_branch35_jdk8/ --max-builds 3 --max-builds
3" />
+      <option name="SCRIPT_NAME" value="$PROJECT_DIR$/zk_test_analyzer.py" />
+      <option name="PARAMETERS" value="--urls=https://builds.apache.org/job/ZooKeeper-trunk-openjdk7/
--urls=https://builds.apache.org/job/ZooKeeper-trunk-jdk8/ --urls=https://builds.apache.org/job/ZooKeeper_branch35_openjdk7/
--urls=https://builds.apache.org/job/ZooKeeper_branch35_jdk7/ --urls=https://builds.apache.org/job/ZooKeeper_branch35_jdk8/
--urls=https://builds.apache.org/job/ZooKeeper_branch34/ --urls=https://builds.apache.org/job/ZooKeeper_branch34_jdk7/
--urls=https://builds.apache.org/job/ZooKeeper_branch34_jdk8/ --max-builds=1 --max-builds=1
--max-builds=1 --max-builds=1 --max-builds=1 --max-builds=1 --max-builds=1 --max-builds=1"
/>
       <option name="SHOW_COMMAND_LINE" value="false" />
       <method />
     </configuration>
@@ -323,12 +324,6 @@
   </component>
   <component name="XDebuggerManager">
     <breakpoint-manager>
-      <breakpoints>
-        <line-breakpoint enabled="true" type="python-line">
-          <url>file://$PROJECT_DIR$/zk-unittest-reporter.py</url>
-          <line>180</line>
-        </line-breakpoint>
-      </breakpoints>
       <option name="time" value="1" />
     </breakpoint-manager>
     <watches-manager />
@@ -342,10 +337,18 @@
         </state>
       </provider>
     </entry>
+    <entry file="file://$APPLICATION_HOME_DIR$/helpers/pydev/pydevd.py">
+      <provider selected="true" editor-type-id="text-editor">
+        <state vertical-scroll-proportion="0.0">
+          <caret line="2281" column="0" selection-start-line="2281" selection-start-column="0"
selection-end-line="2281" selection-end-column="0" />
+          <folding />
+        </state>
+      </provider>
+    </entry>
     <entry file="file://$PROJECT_DIR$/report_template.html">
       <provider selected="true" editor-type-id="text-editor">
-        <state vertical-scroll-proportion="45.38889">
-          <caret line="43" column="3" selection-start-line="43" selection-start-column="3"
selection-end-line="43" selection-end-column="3" />
+        <state vertical-scroll-proportion="-17.722221">
+          <caret line="22" column="15" selection-start-line="22" selection-start-column="15"
selection-end-line="22" selection-end-column="15" />
           <folding>
             <element signature="n#style#0;n#span#2;n#body#0;n#html#0;n#!!top" expanded="true"
/>
             <element signature="n#style#0;n#span#3;n#body#0;n#html#0;n#!!top" expanded="true"
/>
@@ -353,20 +356,12 @@
         </state>
       </provider>
     </entry>
-    <entry file="file://$APPLICATION_HOME_DIR$/helpers/pydev/pydevd.py">
-      <provider selected="true" editor-type-id="text-editor">
-        <state vertical-scroll-proportion="0.0">
-          <caret line="2281" column="0" selection-start-line="2281" selection-start-column="0"
selection-end-line="2281" selection-end-column="0" />
-          <folding />
-        </state>
-      </provider>
-    </entry>
-    <entry file="file://$PROJECT_DIR$/zk-unittest-reporter.py">
+    <entry file="file://$PROJECT_DIR$/zk_test_analyzer.py">
       <provider selected="true" editor-type-id="text-editor">
-        <state vertical-scroll-proportion="0.6998951">
-          <caret line="23" column="3" selection-start-line="23" selection-start-column="3"
selection-end-line="23" selection-end-column="3" />
+        <state vertical-scroll-proportion="0.29591838">
+          <caret line="10" column="48" selection-start-line="10" selection-start-column="48"
selection-end-line="10" selection-end-column="48" />
           <folding>
-            <element signature="e#1207#1222#0" expanded="true" />
+            <element signature="e#1040#1088#0" expanded="true" />
           </folding>
         </state>
       </provider>

http://git-wip-us.apache.org/repos/asf/zookeeper/blob/3f6d6771/zk-test-report/report.html
----------------------------------------------------------------------
diff --git a/zk-test-report/report.html b/zk-test-report/report.html
index ae08d1a..846d31b 100644
--- a/zk-test-report/report.html
+++ b/zk-test-report/report.html
@@ -49,7 +49,7 @@
                   Apache Zookeeper Flaky Tests Dashboard
               </span>
 </p>
-<span>Last updated: <b>04/11/2017 12:19:47</b></span><br>
+<span>Last updated: <b>04/11/2017 13:26:01</b></span><br>
 <span>Count of flaky tests (cumulated from all jobs):
     <b>1</b></span><br>
 <br><br>
@@ -116,9 +116,9 @@
                  style="display: none; width:500px; white-space: normal">
                 
                 Failed : 
-                <a href="https://builds.apache.org/job/ZooKeeper-trunk-openjdk7//1431">1431</a>&nbsp;
-                
                 <a href="https://builds.apache.org/job/ZooKeeper-trunk-openjdk7//1433">1433</a>&nbsp;
+                
+                <a href="https://builds.apache.org/job/ZooKeeper-trunk-openjdk7//1434">1434</a>&nbsp;
                 <br/>
                 Timed Out : <br/>
                 Hanging : <br/>

http://git-wip-us.apache.org/repos/asf/zookeeper/blob/3f6d6771/zk-test-report/zk-unittest-reporter.py
----------------------------------------------------------------------
diff --git a/zk-test-report/zk-unittest-reporter.py b/zk-test-report/zk-unittest-reporter.py
deleted file mode 100644
index 4ccfac3..0000000
--- a/zk-test-report/zk-unittest-reporter.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env python
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# pylint: disable=invalid-name
-# To disable 'invalid constant name' warnings.
-# pylint: disable=import-error
-# Testing environment may not have all dependencies.
-
-"""
-This script uses Jenkins REST api to collect test results of given builds and generate test
report.
-It is primarily used to monitor build health and flaky tests across different builds.
-Print help: zk-unittest-reporter.py -h
-"""
-
-import argparse
-import logging
-import os
-import time
-import re
-import requests
-from collections import OrderedDict
-from collections import defaultdict
-from jinja2 import Template
-
-# If any of these strings appear in the console output, it's a build one should probably
ignore
-# for analyzing failed/hanging tests.
-BAD_RUN_STRINGS = [
-    "Slave went offline during the build",  # Machine went down, can't do anything about
it.
-    "The forked VM terminated without properly saying goodbye",  # JVM crashed.
-]
-
-PATTERN_RUNNING_TEST = re.compile('(.*) RUNNING TEST METHOD (.*)')
-PATTERN_FAILED_TEST = re.compile('(.*)- FAILED (.*)')
-PATTERN_SUCCEED_TEST = re.compile('(.*)- SUCCEEDED (.*)')
-
-logging.basicConfig()
-logger = logging.getLogger(__name__)
-
-# Set of timeout/failed tests across all given urls.
-all_failed_tests = dict()
-# Contains { <url> : { <bad_test> : { 'all': [<build ids>], 'failed': [<build
ids>]} } }
-url_to_bad_test_results = OrderedDict()
-
-
-def classify_tests(build_url):
-    response = requests.get(build_url)
-    if response.status_code != 200:
-        print "Error getting consoleText. Response = {} {}".format(
-            response.status_code, response.reason)
-        return
-
-    all_tests = defaultdict(int)
-    failed_tests = defaultdict(int)
-
-    print len(response.content)
-
-    for line in response.content.splitlines():
-        ans = PATTERN_RUNNING_TEST.match(line)
-        if ans:
-            test_case = ans.group(2)
-            all_tests[test_case] += 1
-        ans = PATTERN_FAILED_TEST.match(line)
-        if ans:
-            test_case = ans.group(2)
-            failed_tests[test_case] += 1
-        for bad_string in BAD_RUN_STRINGS:
-            if re.match(".*" + bad_string + ".*", line):
-                print "Bad string found in build:\n > {}".format(line)
-                return
-    print "Result > total tests: {:4}   failed : {:4}".format(
-        sum(all_tests.values()), sum(failed_tests.values()))
-    return [all_tests, failed_tests]
-
-
-def generate_report(build_url):
-    """
-    Given build_url of an executed build, analyzes its console text, and returns
-    [list of all tests, list of failed tests].
-    Returns None if can't get console text or if there is any other error.
-    """
-    logger.info("Analyzing %s", build_url)
-    response = requests.get(build_url + "/api/json").json()
-    if response["building"]:
-        logger.info("Skipping this build since it is in progress.")
-        return {}
-    build_result = classify_tests(build_url + "/consoleText")
-    if not build_result:
-        logger.info("Ignoring build %s", build_url)
-        return
-    return build_result
-
-
-def parse_cli_args(cli_args):
-    job_urls = cli_args.urls
-    excluded_builds_arg = cli_args.excluded_builds
-    max_builds_arg = cli_args.max_builds
-    if excluded_builds_arg is not None and len(excluded_builds_arg) != len(job_urls):
-        raise Exception("Number of --excluded-builds arguments should be same as that of
--urls "
-                        "since values are matched.")
-    if max_builds_arg is not None and len(max_builds_arg) != len(job_urls):
-        raise Exception("Number of --max-builds arguments should be same as that of --urls
"
-                        "since values are matched.")
-    final_expanded_urls = []
-    for (i, job_url) in enumerate(job_urls):
-        max_builds = 10000  # Some high number
-        if max_builds_arg is not None and max_builds_arg[i] != 0:
-            max_builds = int(max_builds_arg[i])
-        excluded_builds = []
-        if excluded_builds_arg is not None and excluded_builds_arg[i] != "None":
-            excluded_builds = [int(x) for x in excluded_builds_arg[i].split(",")]
-        response = requests.get(job_url + "/api/json").json()
-        if response.has_key("activeConfigurations"):
-            for config in response["activeConfigurations"]:
-                final_expanded_urls.append({'url':config["url"], 'max_builds': max_builds,
-                                            'excludes': excluded_builds})
-        else:
-            final_expanded_urls.append({'url':job_url, 'max_builds': max_builds,
-                                        'excludes': excluded_builds})
-    return final_expanded_urls
-
-
-def analyze_build(args):
-    # Iterates over each url, gets test results and prints flaky tests.
-    expanded_urls = parse_cli_args(args)
-    for url_max_build in expanded_urls:
-        url = url_max_build["url"]
-        excludes = url_max_build["excludes"]
-        json_response = requests.get(url + "/api/json").json()
-        if json_response.has_key("builds"):
-            builds = json_response["builds"]
-            logger.info("Analyzing job: %s", url)
-        else:
-            builds = [{'number' : json_response["id"], 'url': url}]
-            logger.info("Analyzing build : %s", url)
-        build_id_to_results = {}
-        num_builds = 0
-        build_ids = []
-        build_ids_without_tests_run = []
-        for build in builds:
-            build_id = build["number"]
-            if build_id in excludes:
-                continue
-            result = generate_report(build["url"])
-            if not result:
-                continue
-            if len(result[0]) > 0:
-                build_id_to_results[build_id] = result
-            else:
-                build_ids_without_tests_run.append(build_id)
-            num_builds += 1
-            build_ids.append(build_id)
-            if num_builds == url_max_build["max_builds"]:
-                break
-
-        # Collect list of bad tests.
-        bad_tests = dict()
-        for build in build_id_to_results:
-            [_, failed_tests] = build_id_to_results[build]
-            all_failed_tests.update(failed_tests)
-            bad_tests.update(failed_tests)
-
-        # For each bad test, get build ids where it ran, timed out, failed or hanged.
-        test_to_build_ids = {key: {'all': dict(), 'timeout': dict(), 'failed': dict(),
-                                   'hanging': dict(), 'good': dict(), 'bad_count': 0}
-                             for key in bad_tests}
-        for build in build_id_to_results:
-            [all_tests, failed_tests] = build_id_to_results[build]
-            for bad_test in test_to_build_ids:
-                is_bad = False
-                if bad_test in all_tests:
-                    test_to_build_ids[bad_test]["all"].setdefault(build, 0)
-                if bad_test in failed_tests:
-                    test_to_build_ids[bad_test]['failed'].setdefault(build, 0)
-                    is_bad = True
-                if is_bad:
-                    test_to_build_ids[bad_test]['bad_count'] += 1
-                else:
-                    test_to_build_ids[bad_test]['good'].setdefault(build, 0)
-
-        # Calculate flakyness % and successful builds for each test. Also sort build ids.
-        for bad_test in test_to_build_ids:
-            test_result = test_to_build_ids[bad_test]
-            test_result['flakyness'] = test_result['bad_count'] * 100.0 / len(test_result['all'])
-            test_result['success'] = test_result['good']
-            for key in ['all', 'timeout', 'failed', 'hanging', 'success']:
-                test_result[key] = sorted(test_result[key])
-
-        # Sort tests in descending order by flakyness.
-        sorted_test_to_build_ids = OrderedDict(
-            sorted(test_to_build_ids.iteritems(), key=lambda x: x[1]['flakyness'], reverse=True))
-        url_to_bad_test_results[url] = sorted_test_to_build_ids
-
-        if len(sorted_test_to_build_ids) > 0:
-            print "URL: {}".format(url)
-            print "{:>60}  {:10}  {:25}  {}".format(
-                "Test Name", "Total Runs", "Bad Runs(failed/timeout/hanging)", "Flakyness")
-            for bad_test in sorted_test_to_build_ids:
-                test_status = sorted_test_to_build_ids[bad_test]
-                print "{:>60}  {:10}  {:7} ( {:4} / {:5} / {:5} )  {:2.0f}%".format(
-                    bad_test, len(test_status['all']), test_status['bad_count'],
-                    len(test_status['failed']), len(test_status['timeout']),
-                    len(test_status['hanging']), test_status['flakyness'])
-        else:
-            print "No flaky tests founds."
-            if len(build_ids) == len(build_ids_without_tests_run):
-                print "None of the analyzed builds have test result."
-
-        print "Builds analyzed: {}".format(build_ids)
-        print "Builds without any test runs: {}".format(build_ids_without_tests_run)
-        print ""
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--urls', metavar='URL', action='append', required=True,
-                        help='Urls to analyze')
-    parser.add_argument('--excluded-builds', metavar='n1,n2', action='append',
-                        help='List of build numbers to exclude (or "None"). Not required,
'
-                        'but if specified, number of uses should be same as that of --urls
'
-                        'since the values are matched.')
-    parser.add_argument('--max-builds', metavar='n', action='append', type=int,
-                        help='The maximum number of builds to use (if available on jenkins).
Specify '
-                        'should be same as that of --urls since the values are matched.')
-    parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")
-    args = parser.parse_args()
-    if args.verbose:
-        logger.setLevel(logging.INFO)
-
-    analyze_build(args)
-
-    all_bad_tests = all_failed_tests
-    dev_support_dir = os.path.dirname(os.path.abspath(__file__))
-    with open(os.path.join(dev_support_dir, "report_template.html"), "r") as f:
-        template = Template(f.read())
-
-    with open("report.html", "w") as f:
-        datetime = time.strftime("%m/%d/%Y %H:%M:%S")
-        f.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests),
-                                results=url_to_bad_test_results))
-
-if __name__ == "__main__":
-    main()
-
-

http://git-wip-us.apache.org/repos/asf/zookeeper/blob/3f6d6771/zk-test-report/zk_test_analyzer.py
----------------------------------------------------------------------
diff --git a/zk-test-report/zk_test_analyzer.py b/zk-test-report/zk_test_analyzer.py
new file mode 100644
index 0000000..fcd6038
--- /dev/null
+++ b/zk-test-report/zk_test_analyzer.py
@@ -0,0 +1,272 @@
+#!/usr/bin/env python
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This script uses Jenkins REST api to collect test results of given builds and generate test
report.
+It is primarily used to monitor build health and flaky tests across different builds.
+Print help: zk_test_analyzer.py -h
+"""
+
+from collections import OrderedDict, defaultdict
+import argparse
+import logging
+import os
+import time
+import re
+import requests
+from jinja2 import Template
+
+# If any of these strings appear in the console output, it's a build one should probably
ignore
+# for analyzing failed/hanging tests.
+BAD_RUN_STRINGS = [
+    "Slave went offline during the build",  # Machine went down, can't do anything about
it.
+    "Forked Java VM exited abnormally",  # JVM crashed.
+]
+
+PATTERN_RUNNING_TEST = re.compile('(.*) RUNNING TEST METHOD (.*)')
+PATTERN_FAILED_TEST = re.compile('(.*)- FAILED (.*)')
+PATTERN_SUCCEED_TEST = re.compile('(.*)- SUCCEEDED (.*)')
+MAX_NUM_OF_BUILDS = 10000
+
+logging.basicConfig()
+LOG = logging.getLogger(__name__)
+
+ALL_FAILED_TESTS = defaultdict(int)
+# Contains { <url> : { <bad_test> : { 'all': [<build ids>], 'failed': [<build
ids>]} } }
+URL_TO_BAD_TEST_RESULTS = OrderedDict()
+
+
+def classify_tests(build_url):
+    """
+    Given a build url, pick up all test cases and failed test cases by name.
+
+    :param build_url: Jenkins job url.
+    :return: [all_tests, failed_tests]
+    """
+    response = requests.get(build_url)
+    if response.status_code != 200:
+        print "Error getting consoleText. Response = {} {}".format(
+            response.status_code, response.reason)
+        return
+
+    all_tests = defaultdict(int)
+    failed_tests = defaultdict(int)
+
+    for line in response.content.splitlines():
+        ans = PATTERN_RUNNING_TEST.match(line)
+        if ans:
+            test_case = ans.group(2)
+            all_tests[test_case] += 1
+        ans = PATTERN_FAILED_TEST.match(line)
+        if ans:
+            test_case = ans.group(2)
+            failed_tests[test_case] += 1
+        for bad_string in BAD_RUN_STRINGS:
+            if re.match(".*" + bad_string + ".*", line):
+                print "Bad string found in build:\n > {}".format(line)
+                return
+    print "Result > total tests: {:4}   failed : {:4}".format(
+        sum(all_tests.values()), sum(failed_tests.values()))
+    return [all_tests, failed_tests]
+
+
+def generate_report(build_url):
+    """
+    Given a build url, retrieve the content of the build through
+    Jenkins API and then classify test cases to prepare final
+    test report.
+    :param build_url: Jenkins build url.
+    :return: classified test results.
+    """
+    LOG.info("Analyzing %s", build_url)
+    response = requests.get(build_url + "/api/json").json()
+    if response["building"]:
+        LOG.info("Skipping this build since it is in progress.")
+        return {}
+    build_result = classify_tests(build_url + "/consoleText")
+    if not build_result:
+        LOG.info("Ignoring build %s", build_url)
+        return
+    return build_result
+
+
+def parse_cli_args(cli_args):
+    """
+    Parse command line arguments.
+
+    :param cli_args: command line arguments.
+    :return: None
+    """
+    job_urls = cli_args.urls
+    excluded_builds_arg = cli_args.excluded_builds
+    max_builds_arg = cli_args.max_builds
+    if excluded_builds_arg is not None and len(excluded_builds_arg) != len(job_urls):
+        raise Exception("Number of --excluded-builds arguments should be same as that of
--urls "
+                        "since values are matched.")
+    if max_builds_arg is not None and len(max_builds_arg) != len(job_urls):
+        raise Exception("Number of --max-builds arguments should be same as that of --urls
"
+                        "since values are matched.")
+    final_expanded_urls = []
+    for (i, job_url) in enumerate(job_urls):
+        max_builds = MAX_NUM_OF_BUILDS
+        if max_builds_arg is not None and max_builds_arg[i] != 0:
+            max_builds = int(max_builds_arg[i])
+        excluded_builds = []
+        if excluded_builds_arg is not None and excluded_builds_arg[i] != "None":
+            excluded_builds = [int(x) for x in excluded_builds_arg[i].split(",")]
+        response = requests.get(job_url + "/api/json").json()
+        if "activeConfigurations" in response:
+            for config in response["activeConfigurations"]:
+                final_expanded_urls.append({'url': config["url"], 'max_builds': max_builds,
+                                            'excludes': excluded_builds})
+        else:
+            final_expanded_urls.append({'url': job_url, 'max_builds': max_builds,
+                                        'excludes': excluded_builds})
+    return final_expanded_urls
+
+
+def analyze_build(args):
+    """
+    Given a set of command line arguments, analyze the build and populate
+    various data structures used to generate final test report.
+    :param args: arguments
+    :return: None
+    """
+    expanded_urls = parse_cli_args(args)
+    for url_max_build in expanded_urls:
+        url = url_max_build["url"]
+        excludes = url_max_build["excludes"]
+        json_response = requests.get(url + "/api/json").json()
+        if json_response.has_key("builds"):
+            builds = json_response["builds"]
+            LOG.info("Analyzing job: %s", url)
+        else:
+            builds = [{'number': json_response["id"], 'url': url}]
+            LOG.info("Analyzing build : %s", url)
+        build_id_to_results = {}
+        num_builds = 0
+        build_ids = []
+        build_ids_without_tests_run = []
+        for build in builds:
+            build_id = build["number"]
+            if build_id in excludes:
+                continue
+            result = generate_report(build["url"])
+            if not result:
+                continue
+            if len(result[0]) > 0:
+                build_id_to_results[build_id] = result
+            else:
+                build_ids_without_tests_run.append(build_id)
+            num_builds += 1
+            build_ids.append(build_id)
+            if num_builds == url_max_build["max_builds"]:
+                break
+
+        bad_tests = dict()
+        for build in build_id_to_results:
+            [_, failed_tests] = build_id_to_results[build]
+            ALL_FAILED_TESTS.update(failed_tests)
+            bad_tests.update(failed_tests)
+
+        # For each bad test, get build ids where it ran, timed out, failed or hanged.
+        test_to_build_ids = {key: {'all': dict(), 'timeout': dict(), 'failed': dict(),
+                                   'hanging': dict(), 'good': dict(), 'bad_count': 0}
+                             for key in bad_tests}
+        for build in build_id_to_results:
+            [all_tests, failed_tests] = build_id_to_results[build]
+            for bad_test in test_to_build_ids:
+                is_bad = False
+                if bad_test in all_tests:
+                    test_to_build_ids[bad_test]["all"].setdefault(build, 0)
+                if bad_test in failed_tests:
+                    test_to_build_ids[bad_test]['failed'].setdefault(build, 0)
+                    is_bad = True
+                if is_bad:
+                    test_to_build_ids[bad_test]['bad_count'] += 1
+                else:
+                    test_to_build_ids[bad_test]['good'].setdefault(build, 0)
+
+        # Calculate flakyness % and successful builds for each test. Also sort build ids.
+        for bad_test in test_to_build_ids:
+            test_result = test_to_build_ids[bad_test]
+            test_result['flakyness'] = test_result['bad_count'] * 100.0 / len(test_result['all'])
+            test_result['success'] = test_result['good']
+            for key in ['all', 'timeout', 'failed', 'hanging', 'success']:
+                test_result[key] = sorted(test_result[key])
+
+        # Sort tests in descending order by flakyness.
+        sorted_test_to_build_ids = OrderedDict(
+            sorted(test_to_build_ids.iteritems(), key=lambda x: x[1]['flakyness'], reverse=True))
+        URL_TO_BAD_TEST_RESULTS[url] = sorted_test_to_build_ids
+
+        if len(sorted_test_to_build_ids) > 0:
+            print "URL: {}".format(url)
+            print "{:>60}  {:10}  {:25}  {}".format(
+                "Test Name", "Total Runs", "Bad Runs(failed/timeout/hanging)", "Flakyness")
+            for bad_test in sorted_test_to_build_ids:
+                test_status = sorted_test_to_build_ids[bad_test]
+                print "{:>60}  {:10}  {:7} ( {:4} / {:5} / {:5} )  {:2.0f}%".format(
+                    bad_test, len(test_status['all']), test_status['bad_count'],
+                    len(test_status['failed']), len(test_status['timeout']),
+                    len(test_status['hanging']), test_status['flakyness'])
+        else:
+            print "No flaky tests founds."
+            if len(build_ids) == len(build_ids_without_tests_run):
+                print "None of the analyzed builds have test result."
+
+        print "Builds analyzed: {}".format(build_ids)
+        print "Builds without any test runs: {}".format(build_ids_without_tests_run)
+        print ""
+
+
+def main():
+    """
+    Main entry of the module if used as an executable.
+    :return: None
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--urls', metavar='URL', action='append', required=True,
+                        help='Urls to analyze')
+    parser.add_argument('--excluded-builds', metavar='n1,n2', action='append',
+                        help='List of build numbers to exclude (or "None"). Not required,
'
+                        'but if specified, number of uses should be same as that of --urls
'
+                        'since the values are matched.')
+    parser.add_argument('--max-builds', metavar='n', action='append', type=int,
+                        help='The maximum number of builds to use (if available on jenkins).'
+                             'Specify should be same as that of --urls since the values '
+                             'are matched.')
+    parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")
+    args = parser.parse_args()
+    if args.verbose:
+        LOG.setLevel(logging.INFO)
+
+    analyze_build(args)
+
+    all_bad_tests = ALL_FAILED_TESTS
+    dev_support_dir = os.path.dirname(os.path.abspath(__file__))
+    with open(os.path.join(dev_support_dir, "report_template.html"), "r") as report_template:
+        template = Template(report_template.read())
+
+    with open("report.html", "w") as report:
+        datetime = time.strftime("%m/%d/%Y %H:%M:%S")
+        report.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests),
+                                     results=URL_TO_BAD_TEST_RESULTS))
+
+if __name__ == "__main__":
+    main()


Mime
View raw message