hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject hbase git commit: HBASE-15896 Add timeout tests to flaky list from report-flakies.py - Adds timed-out tests to flaky list. Dumpes two new files for reference, 'timeout' and 'failed' for corresponding list of bad tests. - Set --max-builds for different ur
Date Fri, 27 May 2016 14:34:32 GMT
Repository: hbase
Updated Branches:
  refs/heads/master da0d74cd2 -> aa016c78a


HBASE-15896 Add timeout tests to flaky list from report-flakies.py - Adds timed-out tests
to flaky list. Dumpes two new files for reference, 'timeout' and 'failed' for corresponding
list of bad tests. - Set --max-builds for different urls separately. This is needed so that
we can turn the knobs for post-commit job and flaky-tests job separately. (Apekshit)

Change-Id: I88e1f9a8924eed1b5010106e73edede3aff34b0b

Signed-off-by: stack <stack@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aa016c78
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aa016c78
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aa016c78

Branch: refs/heads/master
Commit: aa016c78a72515a0ebed5adb38cc34207c7d8013
Parents: da0d74c
Author: Apekshit <apeksharma@gmail.com>
Authored: Tue May 24 20:39:54 2016 -0700
Committer: stack <stack@apache.org>
Committed: Fri May 27 07:34:10 2016 -0700

----------------------------------------------------------------------
 dev-support/findHangingTests.py | 103 +++++++++-----------
 dev-support/report-flakies.py   | 178 ++++++++++++++++++-----------------
 2 files changed, 136 insertions(+), 145 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/aa016c78/dev-support/findHangingTests.py
----------------------------------------------------------------------
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
index deccc8b..ce49f48 100755
--- a/dev-support/findHangingTests.py
+++ b/dev-support/findHangingTests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -19,64 +19,47 @@
 # script to find hanging test from Jenkins build output
 # usage: ./findHangingTests.py <url of Jenkins build console>
 #
-import urllib2
+import re
+import requests
 import sys
-import string
-if len(sys.argv) != 2 :
-  print "ERROR : Provide the jenkins job console URL as the only argument."
-  exit(1)
-print "Fetching " + sys.argv[1]
-response = urllib2.urlopen(sys.argv[1])
-i = 0;
-tests = {}
-failed_tests = {}
-summary = 0
-host = False
-patch = False
-branch = False
-while True:
-  n = response.readline()
-  if n == "" :
-    break
-  if not host and n.find("Building remotely on") >= 0:
-    host = True
-    print n.strip()    
-    continue
-  if not patch and n.find("Testing patch for ") >= 0:
-    patch = True
-    print n.strip()    
-    continue
-  if not branch and n.find("Testing patch on branch ") >= 0:
-    branch = True
-    print n.strip()    
-    continue
-  if n.find("PATCH APPLICATION FAILED") >= 0:
-    print "PATCH APPLICATION FAILED"
-    sys.exit(1) 
-  if summary == 0 and n.find("Running tests.") >= 0:
-    summary = summary + 1
-    continue
-  if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0:
-    summary = summary + 1
-    continue
-  if summary == 2 and n.find("[INFO] Apache HBase ") >= 0:
-    sys.stdout.write(n)
-    continue
-  if n.find("org.apache.hadoop.hbase") < 0:
-    continue 
-  test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)])
-  if n.find("Running org.apache.hadoop.hbase") > -1 :
-    tests[test_name] = False
-  if n.find("Tests run:") > -1 :
-    if n.find("FAILURE") > -1 or n.find("ERROR") > -1:
-      failed_tests[test_name] = True
-    tests[test_name] = True
-response.close()
 
-print "Printing hanging tests"
-for key, value in tests.iteritems():
-  if value == False:
-    print "Hanging test : " + key
-print "Printing Failing tests"
-for key, value in failed_tests.iteritems():
-  print "Failing test : " + key
+def get_hanging_tests(console_url):
+    response = requests.get(console_url)
+    if response.status_code != 200:
+        print "Error getting consoleText. Response = {} {}".format(
+            response.status_code, response.reason)
+        return {}
+
+    all_tests = set()
+    hanging_tests = set()
+    failed_tests = set()
+    for line in response.content.splitlines():
+        result1 = re.match("^Running org.apache.hadoop.hbase.(\w*\.)*(\w*)", line)
+        if result1:
+            test_case = result1.group(2)
+            hanging_tests.add(test_case)
+            all_tests.add(test_case)
+        result2 = re.match("^Tests run:.*- in org.apache.hadoop.hbase.(\w*\.)*(\w*)", line)
+        if result2:
+            test_case = result2.group(2)
+            hanging_tests.remove(test_case)
+            if "FAILURE!" in line:
+                failed_tests.add(test_case)
+    print "Result > total tests: {:4}   hanging : {:4}   failed : {:4}".format(
+        len(all_tests), len(hanging_tests), len(failed_tests))
+    return [all_tests, hanging_tests, failed_tests]
+
+if __name__ == "__main__":
+    if len(sys.argv) != 2 :
+        print "ERROR : Provide the jenkins job console URL as the only argument."
+        sys.exit(1)
+
+    print "Fetching {}".format(sys.argv[1])
+    [all_tests, hanging_tests, failed_tests] = get_hanging_tests(sys.argv[1])
+    print "Found {} hanging tests:".format(len(hanging_tests))
+    for test in hanging_tests:
+        print test
+    print "\n"
+    print "Found {} failed tests:".format(len(failed_tests))
+    for test in failed_tests:
+        print test

http://git-wip-us.apache.org/repos/asf/hbase/blob/aa016c78/dev-support/report-flakies.py
----------------------------------------------------------------------
diff --git a/dev-support/report-flakies.py b/dev-support/report-flakies.py
index e5e66cc..a97591d 100755
--- a/dev-support/report-flakies.py
+++ b/dev-support/report-flakies.py
@@ -16,137 +16,145 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# This script uses Jenkins REST api to collect test results of given builds and generates
flakyness
-# data about unittests.
-# Print help: ./report-flakies.py -h
+# This script uses Jenkins REST api to collect test result(s) of given build/builds and generates
+# flakyness data about unittests.
+# Print help: report-flakies.py -h
 import argparse
+import findHangingTests
 import logging
-import re
 import requests
+import sys
 
 parser = argparse.ArgumentParser()
-parser.add_argument("--max-builds", type=int, metavar="n",
-    help="Number of builds to analyze for each job (if available in jenkins). Default: all
"
-        + "available builds.")
+parser.add_argument("--urls", metavar="url[ max-builds]", action="append", required=True,
+    help="Urls to analyze, which can refer to simple projects, multi-configuration projects
or "
+         "individual build run. Optionally, specify maximum builds to analyze for this url
"
+         "(if available on jenkins) using space as separator. By default, all available "
+         "builds are analyzed.")
 parser.add_argument("--mvn", action="store_true",
     help="Writes two strings for including/excluding these flaky tests using maven flags.
These "
-        + "strings are written to files so they can be saved as artifacts and easily imported
in "
-        + "other projects.")
+         "strings are written to files so they can be saved as artifacts and easily imported
in "
+         "other projects. Also writes timeout and failing tests in separate files for "
+           "reference.")
 parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")
-parser.add_argument(
-    "urls", help="Space separated list of urls (single/multi-configuration project) to analyze")
 args = parser.parse_args()
 
 logging.basicConfig()
-logger = logging.getLogger("org.apache.hadoop.hbase.report-flakies")
+logger = logging.getLogger(__name__)
 if args.verbose:
-  logger.setLevel(logging.INFO)
+    logger.setLevel(logging.INFO)
 
-# Given url of an executed build, fetches its test report, and returns dictionary from testname
to
-# pass/skip/fail status.
-def get_build_results(build_url):
+
+# Given url of an executed build, analyzes its console text, and returns
+# [list of all tests, list of timeout tests, list of failed tests].
+def get_bad_tests(build_url):
     logger.info("Getting test results for %s", build_url)
-    url = build_url + "testReport/api/json?tree=suites[cases[className,name,status]]"
-    response = requests.get(url)
-    if response.status_code == 404:
-        logger.info("No test results for %s", build_url)
-        return {}
-    json_response = response.json()
-
-    tests = {}
-    for test_cases in json_response["suites"]:
-        for test in test_cases["cases"]:
-            # Truncate initial "org.apache.hadoop.hbase." from all tests.
-            test_name = (test["className"] + "#" + test["name"])[24:]
-            tests[test_name] = test["status"]
-    return tests
+    console_url = build_url + "/consoleText"
+    return findHangingTests.get_hanging_tests(console_url)
+
 
 # If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'),
 # get urls for individual jobs.
-jobs_list = []
-for url in args.urls.split():
+def expand_multi_configuration_projects(urls_list):
+    expanded_urls = []
+    for url_max_build in urls_list:
+        splits = url_max_build.split()
+        url = splits[0]
+        max_builds = 10000  # Some high value
+        if len(splits) == 2:
+            max_builds = int(splits[1])
+        json_response = requests.get(url + "/api/json").json()
+        if json_response.has_key("activeConfigurations"):
+            for config in json_response["activeConfigurations"]:
+                expanded_urls.append({'url':config["url"], 'max_builds': max_builds})
+        else:
+            expanded_urls.append({'url':url, 'max_builds': max_builds})
+    return expanded_urls
+
+
+# Set of timeout/failed tests across all given urls.
+all_timeout_tests = set()
+all_failed_tests = set()
+
+# Iterates over each url, gets test results and prints flaky tests.
+expanded_urls  = expand_multi_configuration_projects(args.urls)
+for url_max_build in expanded_urls:
+    url = url_max_build["url"]
     json_response = requests.get(url + "/api/json").json()
-    if json_response.has_key("activeConfigurations"):
-        for config in json_response["activeConfigurations"]:
-            jobs_list.append(config["url"])
-    elif json_response.has_key("builds"):
-        jobs_list.append(url)
+    if json_response.has_key("builds"):
+        builds = json_response["builds"]
+        logger.info("Analyzing job: %s", url)
     else:
-        raise Exception("Bad url ({0}).".format(url))
-
-global_bad_tests = set()
-# Iterates over each job, gets its test results and prints flaky tests.
-for job_url in jobs_list:
-    logger.info("Analyzing job: %s", job_url)
+        builds = [{'number' : json_response["id"], 'url': url}]
+        logger.info("Analyzing build : %s", url)
     build_id_to_results = {}
-    builds = requests.get(job_url + "/api/json").json()["builds"]
     num_builds = 0
     build_ids = []
-    build_ids_without_result = []
+    build_ids_without_tests_run = []
     for build in builds:
         build_id = build["number"]
         build_ids.append(build_id)
-        build_result = get_build_results(build["url"])
-        if len(build_result) > 0:
-            build_id_to_results[build_id] = build_result
+        result = get_bad_tests(build["url"])
+        if len(result[0]) > 0:
+            build_id_to_results[build_id] = result
         else:
-            build_ids_without_result.append(build_id)
+            build_ids_without_tests_run.append(build_id)
         num_builds += 1
-        if num_builds == args.max_builds:
+        if num_builds == url_max_build["max_builds"]:
             break
 
     # Collect list of bad tests.
     bad_tests = set()
     for build in build_id_to_results:
-        for test in build_id_to_results[build]:
-            if (build_id_to_results[build][test] == "REGRESSION"
-                or build_id_to_results[build][test] == "FAILED"):
-                bad_tests.add(test)
-                global_bad_tests.add(test)
-
-    # Get total and failed build times for each bad test.
-    build_counts = {key:dict([('total', 0), ('failed', 0)]) for key in bad_tests}
+        [_, timeout_tests, failed_tests] = build_id_to_results[build]
+        all_timeout_tests.update(timeout_tests)
+        all_failed_tests.update(failed_tests)
+        bad_tests.update(timeout_tests.union(failed_tests))
+
+    # Get total and failed/timeout times for each bad test.
+    build_counts = {key : {'total': 0, 'timeout': 0, 'fail': 0 } for key in bad_tests}
     for build in build_id_to_results:
-        build_results = build_id_to_results[build]
+        [all_tests, timeout_tests, failed_tests] = build_id_to_results[build]
         for bad_test in bad_tests:
-            if build_results.has_key(bad_test):
-                if build_results[bad_test] != "SKIPPED":  # Ignore the test if it's skipped.
-                    build_counts[bad_test]['total'] += 1
-                if build_results[bad_test] == "REGRESSION":
-                    build_counts[bad_test]['failed'] += 1
+            if all_tests.issuperset([bad_test]):
+                build_counts[bad_test]["total"] += 1
+            if timeout_tests.issuperset([bad_test]):
+                build_counts[bad_test]['timeout'] += 1
+            if failed_tests.issuperset([bad_test]):
+                build_counts[bad_test]['fail'] += 1
 
     if len(bad_tests) > 0:
-        print "Job: {}".format(job_url)
-        print "{:>100}  {:6}  {:10}  {}".format("Test Name", "Failed", "Total Runs", "Flakyness")
+        print "URL: {}".format(url)
+        print "{:>60}  {:25}  {:10}  {}".format(
+            "Test Name", "Bad Runs(failed/timeout)", "Total Runs", "Flakyness")
         for bad_test in bad_tests:
-            fail = build_counts[bad_test]['failed']
+            fail = build_counts[bad_test]['fail']
+            timeout = build_counts[bad_test]['timeout']
             total = build_counts[bad_test]['total']
-            print "{:>100}  {:6}  {:10}  {:2.0f}%".format(bad_test, fail, total, fail*100.0/total)
+            print "{:>60}  {:10} ({:4} / {:4})  {:10}  {:2.0f}%".format(
+                bad_test, fail + timeout, fail, timeout, total, (fail + timeout) * 100.0
/ total)
     else:
         print "No flaky tests founds."
-        if len(build_ids) == len(build_ids_without_result):
+        if len(build_ids) == len(build_ids_without_tests_run):
             print "None of the analyzed builds have test result."
 
-    print "Builds analyzed: " + str(build_ids)
-    print "Builds with no results: " + str(build_ids_without_result)
+    print "Builds analyzed: {}".format(build_ids)
+    print "Builds without any test runs: {}".format(build_ids_without_tests_run)
     print ""
 
+
+all_bad_tests = all_timeout_tests.union(all_failed_tests)
 if args.mvn:
-    # There might be multiple tests failing within each TestCase, avoid duplication of TestCase
names.
-    test_cases = set()
-    for test in global_bad_tests:
-        test = re.sub(".*\.", "", test)  # Remove package name prefix.
-        test = re.sub("#.*", "", test)  # Remove individual unittest's name
-        test_cases.add(test)
-
-    includes = ",".join(test_cases)
+    includes = ",".join(all_bad_tests)
     with open("./includes", "w") as inc_file:
         inc_file.write(includes)
-        inc_file.close()
 
-    excludes = ""
-    for test_case in test_cases:
-        excludes += "**/" + test_case + ".java,"
+    excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests]
     with open("./excludes", "w") as exc_file:
-        exc_file.write(excludes)
-        exc_file.close()
+        exc_file.write(",".join(excludes))
+
+    with open("./timeout", "w") as file:
+        file.write(",".join(all_timeout_tests))
+
+    with open("./failed", "w") as file:
+        file.write(",".join(all_failed_tests))


Mime
View raw message