hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-15651 Script to report flaky tests. (Apekshit)
Date Thu, 21 Apr 2016 23:16:14 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 4c0587134 -> 57e1dbc8a


HBASE-15651 Script to report flaky tests. (Apekshit)

Change-Id: I5cd5c23985b8c3f928d7ab44e57606b0a5478f15

Signed-off-by: stack <stack@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/57e1dbc8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/57e1dbc8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/57e1dbc8

Branch: refs/heads/master
Commit: 57e1dbc8a65071df3ccd4e0c1f7d124d6a03f0ed
Parents: 4c05871
Author: Apekshit <apeksharma@gmail.com>
Authored: Wed Apr 13 23:02:17 2016 -0700
Committer: stack <stack@apache.org>
Committed: Thu Apr 21 16:16:06 2016 -0700

----------------------------------------------------------------------
 dev-support/report-flakies.py | 146 +++++++++++++++++++++++++++++++++++++
 1 file changed, 146 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/57e1dbc8/dev-support/report-flakies.py
----------------------------------------------------------------------
diff --git a/dev-support/report-flakies.py b/dev-support/report-flakies.py
new file mode 100755
index 0000000..65faa7c
--- /dev/null
+++ b/dev-support/report-flakies.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script uses Jenkins REST api to collect test results of given builds and generates
flakyness
+# data about unittests.
+# Print help: ./report-flakies.py -h
+import argparse
+import logging
+import re
+import requests
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--max-builds", type=int, metavar="n",
+    help="Number of builds to analyze for each job (if available in jenkins). Default: all
"
+        + "available builds.")
+parser.add_argument("--mvn", action="store_true",
+    help="Writes two strings for including/excluding these flaky tests using maven flags.
These "
+        + "strings are written to files so they can be saved as artifacts and easily imported
in "
+        + "other projects.")
+parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")
+parser.add_argument(
+    "urls", help="Space separated list of urls (single/multi-configuration project) to analyze")
+args = parser.parse_args()
+
+logging.basicConfig()
+logger = logging.getLogger("org.apache.hadoop.hbase.report-flakies")
+if args.verbose:
+  logger.setLevel(logging.INFO)
+
+# Given url of an executed build, fetches its test report, and returns dictionary from testname
to
+# pass/skip/fail status.
+def get_build_results(build_url):
+    logger.info("Getting test results for %s", build_url)
+    url = build_url + "testReport/api/json?tree=suites[cases[className,name,status]]"
+    response = requests.get(url)
+    if response.status_code == 404:
+        logger.info("No test results for %s", build_url)
+        return {}
+    json_response = response.json()
+
+    tests = {}
+    for test_cases in json_response["suites"]:
+        for test in test_cases["cases"]:
+            # Truncate initial "org.apache.hadoop.hbase." from all tests.
+            test_name = (test["className"] + "#" + test["name"])[24:]
+            tests[test_name] = test["status"]
+    return tests
+
+# If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'),
+# get urls for individual jobs.
+jobs_list = []
+for url in args.urls.split():
+    json_response = requests.get(url + "/api/json").json()
+    if json_response.has_key("activeConfigurations"):
+        for config in json_response["activeConfigurations"]:
+            jobs_list.append(config["url"])
+    elif json_response.has_key("builds"):
+        jobs_list.append(url)
+    else:
+        raise Exception("Bad url ({0}).".format(url))
+
+global_bad_tests = set()
+# Iterates over each job, gets its test results and prints flaky tests.
+for job_url in jobs_list:
+    logger.info("Analyzing job: %s", job_url)
+    build_id_to_results = {}
+    builds = requests.get(job_url + "/api/json").json()["builds"]
+    num_builds = 0
+    build_ids = []
+    build_ids_without_result = []
+    for build in builds:
+        build_id = build["number"]
+        build_ids.append(build_id)
+        build_result = get_build_results(build["url"])
+        if len(build_result) > 0:
+            build_id_to_results[build_id] = build_result
+        else:
+            build_ids_without_result.append(build_id)
+        num_builds += 1
+        if num_builds == args.max_builds:
+            break
+
+    # Collect list of bad tests.
+    bad_tests = set()
+    for build in build_id_to_results:
+        for test in build_id_to_results[build]:
+            if build_id_to_results[build][test] == "REGRESSION":
+                bad_tests.add(test)
+                global_bad_tests.add(test)
+
+    # Get total and failed build times for each bad test.
+    build_counts = {key:dict([('total', 0), ('failed', 0)]) for key in bad_tests}
+    for build in build_id_to_results:
+        build_results = build_id_to_results[build]
+        for bad_test in bad_tests:
+            if build_results.has_key(bad_test):
+                if build_results[bad_test] != "SKIPPED":  # Ignore the test if it's skipped.
+                    build_counts[bad_test]['total'] += 1
+                if build_results[bad_test] == "REGRESSION":
+                    build_counts[bad_test]['failed'] += 1
+
+    if len(bad_tests) > 0:
+        print "Job: {}".format(job_url)
+        print "{:>100}  {:6}  {:10}  {}".format("Test Name", "Failed", "Total Runs", "Flakyness")
+        for bad_test in bad_tests:
+            fail = build_counts[bad_test]['failed']
+            total = build_counts[bad_test]['total']
+            print "{:>100}  {:6}  {:10}  {:2.0f}%".format(bad_test, fail, total, fail*100.0/total)
+    else:
+        print "No flaky tests founds."
+        if len(builds_ids) == len(build_ids_without_result):
+            print "None of the analyzed builds have test result."
+
+    print "Builds analyzed: " + str(build_ids)
+    print "Builds with no results: " + str(build_ids_without_result)
+    print ""
+
+if args.mvn:
+    includes = ""
+    excludes = ""
+    for test in global_bad_tests:
+        test = re.sub(".*\.", "", test)  # Remove package name prefix.
+        test = re.sub("#.*", "", test)  # Remove individual unittest's name
+        includes += test + ","
+        excludes += "**/" + test + ".java,"
+    with open("./includes", "w") as inc_file:
+        inc_file.write(includes)
+        inc_file.close()
+    with open("./excludes", "w") as exc_file:
+        exc_file.write(excludes)
+        exc_file.close()


Mime
View raw message