From e872aab547b4dadef84258116e439bd95b3f386a Mon Sep 17 00:00:00 2001 From: Apekshit Date: Wed, 13 Apr 2016 23:02:17 -0700 Subject: [PATCH] HBASE-15651 Script to report flaky tests. (Apekshit) Change-Id: I5cd5c23985b8c3f928d7ab44e57606b0a5478f15 --- dev-support/report-flakies.py | 121 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100755 dev-support/report-flakies.py diff --git a/dev-support/report-flakies.py b/dev-support/report-flakies.py new file mode 100755 index 0000000..af66001 --- /dev/null +++ b/dev-support/report-flakies.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script uses Jenkins REST api to collect test results of given builds and generates flakyness +# data about unittests. +# Print help: ./report-flakies.py -h +import argparse +import logging +import re +import requests + +parser = argparse.ArgumentParser() +parser.add_argument("urls", help="Space separated list of urls (single/multi-configuration project) to analyze") +parser.add_argument("--max_runs", help="Number of runs to analyze for each job (if available in jenkins)", type=int) +parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true") +parser.add_argument("--mvn", action="store_true", help="Writes two strings for including/excluding these flaky tests using maven flags. These strings are written to files so they can be saved as artifacts and easily imported in other projects.") +args=parser.parse_args() + +if args.verbose: + logging.basicConfig(level=logging.INFO) + +# Given url of an executed build, fetches its test report, and returns dictionary from testname to +# pass/skip/fail status. +def get_build_results(build_url): + logging.info("Getting test results for " + build_url) + url = build_url + "testReport/api/json?tree=suites[cases[className,name,status]]" + response = requests.get(url) + if response.status_code == 404: + logging.info("No test results for " + build_url) + return {} + json_response = response.json() + + tests = {} + for test_cases in json_response["suites"]: + for test in test_cases["cases"]: + # Truncate initial "org.apache.hadoop.hbase." from all tests. + test_name = (test["className"] + "#" + test["name"])[24:] + tests[test_name] = test["status"] + return tests + +# If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'), +# get urls for individual jobs. +jobs_list = [] +for url in args.urls.split(): + json_response = requests.get(url + "/api/json").json() + if json_response.has_key("activeConfigurations"): + for config in json_response["activeConfigurations"]: + jobs_list.append(config["url"]) + elif json_response.has_key("builds"): + jobs_list.append(url) + else: + logging.error("Error: Bad url - " + url) + raise Exception + +global_bad_tests = set() +# Iterates over each job, gets its test results and prints flaky tests. +for job_url in jobs_list: + logging.warning("Analyzing job: " + job_url) + run_id_to_results = {} + runs = requests.get(job_url + "/api/json").json()["builds"] + num_runs = 0 + for run in runs: + run_id_to_results[run["number"]] = get_build_results(run["url"]) + num_runs += 1 + if num_runs == args.max_runs: + break + + # Collect list of bad tests. + bad_tests = set() + for run_id in run_id_to_results: + for test in run_id_to_results[run_id]: + if run_id_to_results[run_id][test] == "REGRESSION": + bad_tests.add(test) + global_bad_tests.add(test) + + # Get total and failed run times for each bad test. + run_counts = {key:dict([('total', 0), ('failed', 0)]) for key in bad_tests} + for run_id in run_id_to_results: + run_results = run_id_to_results[run_id] + for bad_test in bad_tests: + if run_results.has_key(bad_test): + if run_results[bad_test] != "SKIPPED": # Ignore the test if it's skipped. + run_counts[bad_test]['total'] += 1 + if run_results[bad_test] == "REGRESSION": + run_counts[bad_test]['failed'] += 1 + + logging.warning("{:>100} {:6} {:10} {}".format("Test Name", "Failed", "Total Runs", "Flakyness")) + for bad_test in bad_tests: + fail = run_counts[bad_test]['failed'] + total = run_counts[bad_test]['total'] + logging.warning("{:>100} {:10} {:6} {:2.0f}%".format(bad_test, fail, total, fail*100.0/total)) + +if args.mvn: + includes = "" + excludes = "" + for test in global_bad_tests: + test = re.sub(".*\.", "", test) # Remove package name prefix. + test = re.sub("#.*", "", test) # Remove individual unittest's name + includes += test + "," + excludes += "**/" + test + ".java," + with open("./includes", "w") as inc_file: + inc_file.write(includes) + inc_file.close() + with open("./excludes", "w") as exc_file: + exc_file.write(excludes) + exc_file.close() -- 2.3.2 (Apple Git-55)