diff --git a/.gitignore b/.gitignore index 7ea23b2..0fc0baf 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,8 @@ system-status.cfg poller.state* *.config *.org +*.xz +ui/contest/ +ui/static/ __pycache__/ results/ -*.xz \ No newline at end of file diff --git a/README.rst b/README.rst index aea1c66..893b68d 100644 --- a/README.rst +++ b/README.rst @@ -13,6 +13,9 @@ Currently this project only includes simple checks and build testing, all Linux kernel-centric. Patches are not tested against existing kernel selftests. +Please see `the wiki `_ +for how to interact with NIPA. + Goals ===== @@ -33,11 +36,34 @@ Having everyone test their patches locally allows for better scaling (no need for big central infrastructure) and hopefully creates an incentive for contributing. +Running locally +=============== + +`ingest_mdir.py` can ingest patches and run the checks locally +(by the developers on their machines). `ingest_mdir.py` should be pointed +at a directory and run all the checks on patches that directory contains +(patches are expected to be generated by `git format-patch`). + +Example: + +.. code-block:: bash + + cd $linux + git format-patch HEAD~4.. -o /tmp/my-series/ --subject-prefix="PATCH net-next" + git checkout net-next/master -b test + + cd $nipa + ./ingest_mdir.py --mdir /tmp/my-series/ --tree $linux + +Note that we need to check out the linux tree to a branch that matches the base +on which we intend the patches to be applied. NIPA does not know what to reset +the tree to, it will just try to apply the patches to whatever branch is +currently checked out in the `$linux` repo. + Structure ========= -The project is split into multiple programs with different -uses. +The project is split into multiple programs with different uses. `pw_poller.py` fetches emails from patchwork and runs tests in worker threads. There is one worker thread for each tree, enabling testing @@ -49,11 +75,7 @@ and sub-dirs for each patch. Once tests are done another daemon - `pw_upload.py` uploads the results as checks to patchwork. -`ingest_mdir.py` is supposed to serve the purpose of testing -patches locally, it can be pointed at a directory and run all the -checks on patches that directory contains (patches are expected to -be generated by `git format-patch`). `ingest_mdir.py` has not been -tested in a while so it's probably broken. +`ingest_mdir.py` combines all the stages for local use. Configuration ============= @@ -199,7 +221,7 @@ netdev policy. signed ~~~~~~ -Check for patch attestation (as generated by [patatt](https://github.com/mricon/patatt)). Warn when there +Check for patch attestation (as generated by `patatt `_). Warn when there is no signature or if the key for a signature isn't available. Fail if the signature doesn't match the attestation. diff --git a/check_fetcher.py b/check_fetcher.py index 98e4751..7b811fc 100755 --- a/check_fetcher.py +++ b/check_fetcher.py @@ -34,8 +34,9 @@ def main(): config = configparser.ConfigParser() config.read(['nipa.config', 'pw.config', 'checks.config']) + log_dir = config.get('log', 'dir', fallback=NIPA_DIR) log_init(config.get('log', 'type', fallback='org'), - config.get('log', 'file', fallback=os.path.join(NIPA_DIR, "checks.org")), + config.get('log', 'file', fallback=os.path.join(log_dir, "checks.org")), force_single_thread=True) rdir = config.get('dirs', 'results', fallback=os.path.join(NIPA_DIR, "results")) @@ -57,6 +58,7 @@ def main(): json_resp = pw.get_patches_all(delegate=delegate, since=since) jdb = [] old_unchanged = 0 + check_updates = 0 seen_pids = set() for p in json_resp: pdate = datetime.datetime.fromisoformat(p["date"]) @@ -68,8 +70,14 @@ def main(): continue seen_pids.add(p["id"]) - checks = pw.request(p["checks"]) - for c in checks: + seen_checks = set() + checks = pw.request_all(p["checks"]) + for c in reversed(checks): + if c["context"] in seen_checks: + check_updates += 1 + continue + seen_checks.add(c["context"]) + info = { "id": p["id"], "date": p["date"], @@ -101,7 +109,7 @@ def main(): new_db.append(row) new_db += jdb print(f'Old db: {len(old_db)}, retained: {old_stayed}') - print(f'Fetching: patches: {len(json_resp)}, patches old-unchanged: {old_unchanged}, checks fetched: {len(jdb)}') + print(f'Fetching: patches: {len(json_resp)}, patches old-unchanged: {old_unchanged}, checks fetched: {len(jdb)}, checks were updates: {check_updates}') print(f'Writing: refreshed: {skipped}, new: {len(new_db) - old_stayed}, expired: {horizon_gc} new len: {len(new_db)}') with open(tgt_json, "w") as fp: diff --git a/contest/backend/query.py b/contest/backend/query.py new file mode 100644 index 0000000..9e73c5b --- /dev/null +++ b/contest/backend/query.py @@ -0,0 +1,304 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + + +from flask import Flask +from flask import Response +from flask import request +import json +import psycopg2 +import os +import re +import datetime + + +app = Flask("NIPA contest query") + +db_name = os.getenv('DB_NAME') +psql = psycopg2.connect(database=db_name) +psql.autocommit = True + +# How many branches to query to get flakes for last month +flake_cnt = 300 + + +@app.route('/') +def hello(): + return '

boo!

' + + +@app.route('/branches') +def branches(): + with psql.cursor() as cur: + cur.execute("SELECT branch, t_date, base, url FROM branches ORDER BY t_date DESC LIMIT 40") + rows = [{"branch": r[0], "date": r[1].isoformat() + "+00:00", "base": r[2], "url": r[3]} for r in cur.fetchall()] + rows.reverse() + return rows + + +def get_oldest_branch_date(br_cnt, br_pfx=None): + """ + Find the branch_date of the oldest branch that should be included + based on the requested number of branches. + Returns the cutoff date string or None if no limit should be applied. + """ + with psql.cursor() as cur: + # Slap the -2 in here as the first letter of the date, + # to avoid prefix of prefix matches + pfx_flt = f"WHERE branch LIKE '{br_pfx}-2%' " if br_pfx else "" + + order_limit = f"ORDER BY branch_date DESC LIMIT {br_cnt}" + + # Get unique branch dates from both tables, ordered by date descending + # We use UNION to combine unique branch_dates from both tables + # Make sure to limit both sides to avoid a huge merge + query = f""" + (SELECT DISTINCT branch_date FROM results {pfx_flt} {order_limit}) + UNION + (SELECT DISTINCT branch_date FROM results_pending {pfx_flt} {order_limit}) + {order_limit} + """ + + cur.execute(query) + rows = cur.fetchall() + + if len(rows) < br_cnt: + # DB doesn't have enough data, no need to limit + return None + + # Return the oldest branch_date from our limit + return rows[-1][0] # Last row is the oldest due to DESC order + + +def result_as_l2(raw): + row = json.loads(raw) + flat = [] + + for l1 in row["results"]: + if "results" not in l1: + flat.append(l1) + else: + for case in l1["results"]: + data = l1.copy() + del data["results"] + if "time" in data: + del data["time"] + # in case of retry, the subtest might not have been re-executed + if "retry" in data: + del data["retry"] + data |= case + data["test"] = l1["test"] + '.' + case["test"] + flat.append(data) + row["results"] = flat + return json.dumps(row) + + +@app.route('/results') +def results(): + limit = 0 + where = [] + log = "" + + form = request.args.get('format') + pending = request.args.get('pending') in {'1', 'y', 'yes', 'true'} + remote = request.args.get('remote') + if remote and re.match(r'^[\w_ -]+$', remote) is None: + remote = None + + br_name = request.args.get('branch-name') + if br_name: + if re.match(r'^[\w_ -]+$', br_name) is None: + return {} + + br_cnt = br_name + limit = 100 + where.append(f"branch = '{br_name}'") + t1 = t2 = datetime.datetime.now() + else: + t1 = datetime.datetime.now() + + br_cnt = request.args.get('branches') + try: + br_cnt = int(br_cnt) + except (TypeError, ValueError): + br_cnt = None + if not br_cnt: + br_cnt = 10 + + br_pfx = request.args.get('br-pfx') + if br_pfx: + # Slap the -2 in here as the first letter of the date, to avoid prefix of prefix matches + where.append(f"branch LIKE '{br_pfx}-2%'") + + # Get the cutoff date for the requested number of branches + cutoff_date = get_oldest_branch_date(br_cnt, br_pfx) + if cutoff_date: + where.append(f"branch_date >= '{cutoff_date}'") + + t2 = datetime.datetime.now() + + # Set a reasonable limit to prevent runaway queries + limit = 10000 + + if remote: + where.append(f"remote = '{remote}'") + log += ', remote' + + where = "WHERE " + " AND ".join(where) if where else "" + + if not form or form == "normal": + with psql.cursor() as cur: + cur.execute(f"SELECT json_normal FROM results {where} ORDER BY branch_date DESC LIMIT {limit}") + all_rows = [r[0] for r in cur.fetchall()] + + if pending: + # Get pending results from results_pending table + cur.execute(f""" + SELECT json_build_object( + 'branch', branch, + 'remote', remote, + 'executor', executor, + 'start', (t_start AT TIME ZONE 'UTC')::text, + 'end', null, + 'results', null + )::text + FROM results_pending {where} ORDER BY branch_date DESC LIMIT {limit} + """) + all_rows += [r[0] for r in cur.fetchall()] + rows = "[" + ",".join(all_rows) + "]" + elif form == "l2": + with psql.cursor() as cur: + # Get completed results only, pending + l2 makes no sense + cur.execute(f"SELECT json_normal, json_full FROM results {where} ORDER BY branch_date DESC LIMIT {limit}") + rows = "[" + for r in cur.fetchall(): + if rows[-1] != '[': + rows += ',' + if r[1] and len(r[1]) > 50: + rows += result_as_l2(r[1]) + else: + rows += r[0] + rows += ']' + log += ', l2' + else: + rows = "[]" + + t3 = datetime.datetime.now() + print(f"Query for {br_cnt} branches, {limit} records{log} took: {str(t3-t1)} ({str(t2-t1)}+{str(t3-t2)})") + + return Response(rows, mimetype='application/json') + + +@app.route('/remotes') +def remotes(): + t1 = datetime.datetime.now() + + with psql.cursor() as cur: + cur.execute("SELECT remote FROM results GROUP BY remote LIMIT 50") + rows = [r[0] for r in cur.fetchall()] + + t2 = datetime.datetime.now() + print(f"Query for remotes: {str(t2-t1)}") + + return rows + + +@app.route('/stability') +def stability(): + # auto = query only tests which NIPA ignores based on stability + auto = request.args.get('auto') + + where = "" + if auto == "y" or auto == '1' or auto == 't': + where = "WHERE autoignore = true" + elif auto == "n" or auto == '0' or auto == 'f': + where = "WHERE autoignore = false" + + with psql.cursor() as cur: + cur.execute(f"SELECT * FROM stability {where}") + + columns = [desc[0] for desc in cur.description] + rows = cur.fetchall() + # Convert each row to a dictionary with column names as keys + data = [{columns[i]: value for i, value in enumerate(row)} for row in rows] + + return data + + +@app.route('/device-info') +def dev_info(): + with psql.cursor() as cur: + cur.execute("SELECT * FROM devices_info") + + columns = [desc[0] for desc in cur.description] + rows = cur.fetchall() + # Convert each row to a dictionary with column names as keys + data = [{columns[i]: value for i, value in enumerate(row)} for row in rows] + + return data + + +@app.route('/flaky-tests') +def flaky_tests(): + """ + Returns tests that are flaky (first try fails, retry passes, and no crash). + """ + global flake_cnt + limit = request.args.get('limit') + try: + limit = int(limit) + month = False + except (TypeError, ValueError): + month = True # Default to querying last month + limit = flake_cnt # Default limit + + t = datetime.datetime.now() + with psql.cursor() as cur: + # Query for tests where first try failed, retry passed, and no crash + query = f""" + SELECT remote, executor, test, branch, branch_date + FROM results, jsonb_to_recordset(json_normal::jsonb->'results') as + x(test text, result text, retry text, crashes text) + WHERE x.result = 'fail' + AND x.retry = 'pass' + AND x.crashes IS NULL + ORDER BY branch_date DESC LIMIT {limit}; + """ + + cur.execute(query) + rows = cur.fetchall() + + print(f"Query for flaky tests took: {str(datetime.datetime.now() - t)}") + + weeks_ago = [] + for weeks in range(1, 5): + target_date = datetime.datetime.now() - datetime.timedelta(weeks=weeks) + weeks_ago.append(target_date.strftime("%Y-%m-%d--%H-%M")) + + cnt = 0 + res = {} + for row in rows: + rem, exe, test, branch, br_date = row + key = (rem, exe, test) + if not month: + res[key] = res.get(key, 0) + 1 + else: + if key not in res: + res[key] = [0, 0, 0, 0] + + for i in range(len(weeks_ago)): + if br_date >= weeks_ago[i]: + res[key][i] += 1 + break + else: + break # stop looking at rows, the records are sorted by date + cnt += 1 + # JSON needs a simple array, not a dict + data = [] + for k, v in res.items(): + data.append({"remote": k[0], "executor": k[1], "test": k[2], "count": v}) + + if month: + # Overcount by 30 to account for fluctuation in flakiness + flake_cnt = cnt + 30 + return data diff --git a/contest/cidiff b/contest/cidiff new file mode 100755 index 0000000..1f18103 --- /dev/null +++ b/contest/cidiff @@ -0,0 +1,54 @@ +#!/bin/bash + +# Use, either: +# cidiff branch1 branch2 +# or +# cidiff +# to diff two "newest" branches (last two in git branch -a output). + +BRANCH1=$1 +BRANCH2=$2 + +if [ x$BRANCH1$BRANCH2 == x ]; then + echo "No branches specified, using two most recent:" + branches=( $(git branch -a | tail -2) ) + BRANCH1=${branches[0]} + BRANCH2=${branches[1]} +elif [ x$BRANCH2 == x ]; then + echo "Single branch specified, using that and the previous one:" + branches=( $(git branch -a | grep -B1 "$1") ) + BRANCH1=${branches[0]} + BRANCH2=${branches[1]} +fi + +echo " " $BRANCH1 "("$(git describe $BRANCH1)")" +echo " " $BRANCH2 "("$(git describe $BRANCH2)")" +echo + +get_base() { + git log -1 --oneline \ + --grep="Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net" $1 | cut -d' ' -f1 +} + +base1=$(get_base $BRANCH1) +base2=$(get_base $BRANCH2) + +if git diff --exit-code --stat $base1 $base2 >/dev/null; then + echo "==== BASE IDENTICAL ====" +else + echo "==== BASE DIFF ====" + git --no-pager diff --stat $base1 $base2 + echo + echo +fi + +tmp1=$(mktemp) +tmp2=$(mktemp) + +git log --format="%s" $base1..$BRANCH1 > $tmp1 +git log --format="%s" $base2..$BRANCH2 > $tmp2 + +echo "==== COMMIT DIFF ====" +git --no-pager diff --no-index $tmp1 $tmp2 + +exit 0 diff --git a/contest/cidiff.py b/contest/cidiff.py new file mode 100755 index 0000000..80ee139 --- /dev/null +++ b/contest/cidiff.py @@ -0,0 +1,537 @@ +#!/usr/bin/env python3 + +import argparse +import os +import subprocess +import tempfile +import sys +import re +import urllib.parse +from datetime import datetime, timedelta + +html_template = """ + + + + + NIPA {branch2} info + + + + + + + + + +
+
+

NIPA Branch {branch2}

+
+
+ Branches +
+ + +
+
+
{branch2_html} (current)\n {branch1_html} (comparison){compare_link}
+
+ +
+
Base trees
+
{ancestor_info}\n{base_diff}
+
+ +
+
+ New patches +
+ +
+
+
{commit_diff}
+
+ +
+
+ Test results +
+
+ +
+
+
+ + +""" + + +def parse_branch_datetime(branch_name): + """Extract date and time from branch name format like 'net-next-2025-06-28--21-00'.""" + match = re.search(r'(.*?)(\d{4}-\d{2}-\d{2})--(\d{2})-(\d{2})', branch_name) + if match: + date_str = match.group(2) + hour_str = match.group(3) + minute_str = match.group(4) + try: + return match.group(1), datetime.strptime(f"{date_str} {hour_str}:{minute_str}", "%Y-%m-%d %H:%M") + except ValueError: + return None, None + return None, None + +def generate_next_branch_name(branch1, branch2): + """Generate the next branch name based on the time difference between branch1 and branch2.""" + # Parse datetime from branch names + prefix, dt1 = parse_branch_datetime(branch1) + prefix, dt2 = parse_branch_datetime(branch2) + if not prefix or not dt1 or not dt2: + return None + + # Calculate time difference + time_diff = dt2 - dt1 + next_dt = dt2 + time_diff + return f"{prefix}{next_dt.strftime('%Y-%m-%d--%H-%M')}" + +# Format branch names for display and file paths +def branch_name_clear(name): + if not name: + return None + name = name.strip() + if name.startswith('remotes/') and name.count('/') >= 2: + name = "/".join(name.split('/')[2:]) + return name + +def generate_html(args, branch1, branch2, base_diff_output, commit_diff_output, + ancestor_info=None, committed=None): + """Generate HTML output for the diff.""" + # Generate next branch name + branch1 = branch_name_clear(branch1) + branch2 = branch_name_clear(branch2) + next_branch = generate_next_branch_name(branch1, branch2) + + # URL encode branch2 for the contest results iframe + branch2_encoded = urllib.parse.quote(branch2) + + # Process diff output to add HTML styling + def process_diff(diff_text): + if not diff_text: + return "

No differences found.

" + + lines = [] + for line in diff_text.split('\n'): + if line.startswith('---') or line.startswith('+++') or line.startswith('index') or line.startswith('diff --git'): + pass + elif line.startswith('+') and not line.startswith('+++'): + lines.append(f'
[+] {line[1:]}
') + elif line.startswith('-') and not line.startswith('---'): + title = line[1:] + if title in committed: + lines.append(f'
[c] {title}
') + else: + lines.append(f'
[-] {line[1:]}
') + elif line.startswith('@@'): + lines.append(f'
{line}
') + else: + lines.append(f'
{line}
') + + return ''.join(lines) + + # Process the diff outputs + processed_ancestor_info = process_diff(ancestor_info) + processed_commit_diff = process_diff(commit_diff_output) + compare_link = "" + + github_url = args.github_url + if github_url: + # Remove trailing slash if present + if github_url.endswith('/'): + github_url = github_url[:-1] + + compare_link = f'
Compare code
' + + branch1_html = f'{branch1}' + branch2_html = f'{branch2}' + else: + branch1_html = branch1 + branch2_html = branch2 + compare_link = "" + + # Generate the HTML + html = html_template.format( + branch1=branch1, + branch2=branch2, + branch1_html=branch1_html, + branch2_html=branch2_html, + compare_link=compare_link, + ancestor_info=processed_ancestor_info, + base_diff=base_diff_output, + commit_diff=processed_commit_diff, + prev_url=f"{branch1}.html", + next_url=f"{next_branch}.html" if next_branch else '', + branch2_encoded=branch2_encoded + ) + + return html + +def text_print(args: argparse.Namespace, message: str) -> None: + """Print message to stdout only if HTML output is not requested.""" + if not args.html: + print(message) + +def run_command(cmd): + """Run a shell command and return its output.""" + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + return result.stdout.strip() + +def get_base(branch): + """Get the base commit for a branch.""" + cmd = f"git log -1 --format='%h' --grep=\"Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net\" {branch}" + return run_command(cmd) + +def get_common_ancestor(commit1, commit2): + """Find the common ancestor of two commits.""" + cmd = f"git merge-base {commit1} {commit2}" + return run_command(cmd) + +def get_commit_list(start_commit, end_commit): + """Get a list of commits between start_commit and end_commit.""" + cmd = f"git log --format='%h#%s' {start_commit}..{end_commit}" + commits = run_command(cmd) + # Skip the first line, it's the net/main merge commit + return [x.split("#", 1) for x in reversed(commits.split('\n')[1:])] + +def get_base_diff(base1, base2): + """Get the diff between two base commits.""" + # Find common ancestor between the base commits + common_ancestor = get_common_ancestor(base1, base2) + + # Get commit lists between common ancestor and base commits + commits1 = get_commit_list(common_ancestor, base1) + commits2 = get_commit_list(common_ancestor, base2) + + committed = set() + diff_list = [] + + set1 = set([x for x, _ in commits1]) + set2 = set([x for x, _ in commits2]) + for h, s in commits1: + if h not in set2: + diff_list.append("-" + s) + for h, s in commits2: + if h not in set1: + diff_list.append("+" + s) + committed.add(s) + return "\n".join(diff_list), committed + +def main(): + parser = argparse.ArgumentParser(description='Compare two git branches.') + parser.add_argument('branch1', nargs='?', default=None, help='First branch to compare') + parser.add_argument('branch2', nargs='?', default=None, help='Second branch to compare') + parser.add_argument('--html', '-H', action='https://github.com/store_true', help='Generate HTML output') + parser.add_argument('--output', '-o', help='Output file for HTML (default: cidiff_result.html)') + parser.add_argument('--github-url', '-g', help='GitHub repository URL (to create branch links in HTML output)') + args = parser.parse_args() + + branch1 = args.branch1 + branch2 = args.branch2 + + # Determine which branches to compare + if not branch1 and not branch2: + text_print(args, "No branches specified, using two most recent:") + branches = run_command("git branch -a | tail -2").split('\n') + branch1 = branches[0].strip() + branch2 = branches[1].strip() if len(branches) > 1 else None + elif branch1 and not branch2: + text_print(args, "Single branch specified, using that and the previous one:") + branches = run_command(f"git branch -a | grep -B1 \"{branch1}\"").split('\n') + branch1 = branches[0].strip() + branch2 = branches[1].strip() if len(branches) > 1 else None + + if not branch2: + print("Error: Could not determine second branch.") + sys.exit(1) + + text_print(args, f" {branch1} ({run_command(f'git describe {branch1}')})") + text_print(args, f" {branch2} ({run_command(f'git describe {branch2}')})") + text_print(args, "") + + # Get base commits + base1 = get_base(branch1) + base2 = get_base(branch2) + + # Compare base commits + result = subprocess.run(f"git diff --exit-code --stat {base1} {base2}", + shell=True, capture_output=True, text=True) + + base_diff_output = "" + base_diff_status = "" + if result.returncode == 0: + base_diff_status = "==== BASE IDENTICAL ====" + base_diff_list, committed = "", set() + else: + base_diff_status = "==== BASE DIFF ====" + base_diff_output = run_command(f"git --no-pager diff --stat {base1} {base2}") + base_diff_list, committed = get_base_diff(base1, base2) + + text_print(args, base_diff_status) + if base_diff_output: + text_print(args, base_diff_output) + text_print(args, "") + text_print(args, base_diff_list + "\n") + + # Create temporary files with commit messages + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as tmp1, \ + tempfile.NamedTemporaryFile(mode='w+', delete=False) as tmp2: + + tmp1_path = tmp1.name + tmp2_path = tmp2.name + + tmp1.write(run_command(f"git log --format=\"%s\" {base1}..{branch1}")) + tmp2.write(run_command(f"git log --format=\"%s\" {base2}..{branch2}")) + tmp1.write("\n") + tmp2.write("\n") + + # Compare commit lists + if not args.html: + print("==== COMMIT DIFF ====") + subprocess.run(f"git --no-pager diff --no-index {tmp1_path} {tmp2_path}", shell=True) + else: + commit_diff_result = subprocess.run( + f"git --no-pager diff -U500 --no-index {tmp1_path} {tmp2_path}", + shell=True, capture_output=True, text=True + ) + commit_diff_output = commit_diff_result.stdout if commit_diff_result.stdout else commit_diff_result.stderr + + html_output = generate_html(args, branch1, branch2, base_diff_output, + commit_diff_output, + base_diff_list, committed) + if args.output: + with open(args.output, 'w') as f: + f.write(html_output) + print(f"HTML output written to {args.output}") + else: + print(html_output) + + # Clean up temporary files + os.unlink(tmp1_path) + os.unlink(tmp2_path) + +if __name__ == "__main__": + main() diff --git a/contest/cithreadmap b/contest/cithreadmap new file mode 100755 index 0000000..dcc2042 --- /dev/null +++ b/contest/cithreadmap @@ -0,0 +1,42 @@ +#!/bin/bash + +# expect URL to to the base dir as the only agrumnet +# base dir is the one below test outputs, where "config" is +[ -z "$1" ] && echo "Usage: $0 DIR_URL" && exit 1 +URL="$1" + +index=$(mktemp) +info=$(mktemp) + +declare -A worker_to_test + +clr() { + echo -ne " \r" +} + +curl -s $URL > $index + +i=0 +for subtest in $(cat $index | sed -n 's@ $info + + thr=$(cat $info | awk '/thr-id/ { print $2; }') + vm=$(cat $info | awk '/vm-id/ { print $2; }') + + worker_to_test["Thread$thr-VM$vm"]=${worker_to_test["Thread$thr-VM$vm"]}" "$subtest +done + +clr +echo "Fetched $i subtests" + +for key in ${!worker_to_test[@]}; do + echo $key + for value in ${worker_to_test[$key]}; do + echo -e '\t' $value + done +done + +rm $index $info diff --git a/contest/contest.js b/contest/contest.js deleted file mode 100644 index 29b2475..0000000 --- a/contest/contest.js +++ /dev/null @@ -1,179 +0,0 @@ -function colorify_str(value) -{ - if (value == "pass") { - ret = ''; - } else if (value == "skip") { - ret = ''; - } else { - ret = ''; - } - return ret + value + ''; -} - -function pw_filter_r(v, r, drop_reported) -{ - if (loaded_filters == null) - return false; - - var reported_exec = false; - for (const exec of loaded_filters.executors) { - if (v.executor == exec) { - reported_exec = true; - break; - } - } - - if (reported_exec == false && drop_reported == true) - return false; - - var reported_test = true; - for (const test of loaded_filters["ignore-tests"]) { - if (r.group == test.group && r.test == test.test) { - reported_test = false; - break; - } - } - if ((reported_test && reported_exec) == drop_reported) - return true; - - return false; -} - -function load_result_table(data_raw) -{ - var table = document.getElementById("results"); - var result_filter = { - "pass": document.getElementById("pass").checked, - "skip": document.getElementById("skip").checked, - "warn": document.getElementById("warn").checked, - "fail": document.getElementById("fail").checked - }; - var branch_filter = document.getElementById("branch").value; - var exec_filter = document.getElementById("executor").value; - var remote_filter = document.getElementById("remote").value; - var test_filter = document.getElementById("test").value; - var pw_n = document.getElementById("pw-n").checked; - var pw_y = document.getElementById("pw-y").checked; - - // Remove all rows but first (leave headers) - $("#results tr").slice(1).remove(); - - let warn_box = document.getElementById("fl-warn-box"); - if (!exec_filter && !test_filter && !branch_filter) { - warn_box.innerHTML = "Set an executor, branch or test filter. Otherwise this page will set your browser on fire..."; - return; - } else { - warn_box.innerHTML = ""; - } - - $.each(data_raw, function(i, v) { - if (branch_filter && - branch_filter != v.branch) - return 1; - if (exec_filter && - exec_filter != v.executor) - return 1; - if (remote_filter && - remote_filter != v.remote) - return 1; - - $.each(v.results, function(j, r) { - if (test_filter && - r.test != test_filter) - return 1; - if (result_filter[r.result] == false) - return 1; - if (pw_y == false && pw_filter_r(v, r, true)) - return 1; - if (pw_n == false && pw_filter_r(v, r, false)) - return 1; - - var row = table.insertRow(); - - var date = row.insertCell(0); - var branch = row.insertCell(1); - var remote = row.insertCell(2); - var exe = row.insertCell(3); - var group = row.insertCell(4); - var test = row.insertCell(5); - var res = row.insertCell(6); - - date.innerHTML = v.end.toLocaleString(); - branch.innerHTML = v.branch; - remote.innerHTML = v.remote; - exe.innerHTML = v.executor; - group.innerHTML = r.group; - test.innerHTML = "" + r.test + ""; - res.innerHTML = colorify_str(r.result); - }); - }); -} - -function add_option_filter(data_raw, elem_id, field) -{ - var elem = document.getElementById(elem_id); - var values = new Set(); - - $.each(data_raw, function(i, v) { - values.add(v[field]); - }); - for (const value of values) { - const opt = document.createElement('option'); - opt.value = value; - opt.innerHTML = value; - elem.appendChild(opt); - } -} - -function results_update() -{ - load_result_table(loaded_data); -} - -let xfr_todo = 2; -let loaded_data = null; -let loaded_filters = null; - -function loaded_one() -{ - if (--xfr_todo) - return; - - // We have all JSONs now, do processing. - add_option_filter(loaded_data, "branch", "branch"); - add_option_filter(loaded_data, "executor", "executor"); - add_option_filter(loaded_data, "remote", "remote"); - - nipa_filters_set_from_url(); - nipa_filters_enable(results_update); - - results_update(); -} - -function filters_loaded(data_raw) -{ - loaded_filters = data_raw; - loaded_one(); -} - -function results_loaded(data_raw) -{ - $.each(data_raw, function(i, v) { - v.start = new Date(v.start); - v.end = new Date(v.end); - }); - data_raw.sort(function(a, b){return b.end - a.end;}); - - loaded_data = data_raw; - loaded_one(); -} - -function do_it() -{ - $(document).ready(function() { - $.get("contest/filters.json", filters_loaded) - }); - $(document).ready(function() { - $.get("contest/all-results.json", results_loaded) - }); -} diff --git a/contest/flakes.js b/contest/flakes.js deleted file mode 100644 index e9c6df4..0000000 --- a/contest/flakes.js +++ /dev/null @@ -1,192 +0,0 @@ -function colorify(cell, value) -{ - if (value == "") { - ret = ""; - } else if (value == "pass") { - ret = "background-color:green"; - } else if (value == "skip") { - ret = "background-color:blue"; - } else { - ret = "background-color:red"; - } - cell.setAttribute("style", ret); -} - -function pw_filter_r(v, r, drop_reported) -{ - if (loaded_filters == null) - return false; - - var reported_exec = false; - for (const exec of loaded_filters.executors) { - if (v.executor == exec) { - reported_exec = true; - break; - } - } - - if (reported_exec == false && drop_reported == true) - return false; - - var reported_test = true; - for (const test of loaded_filters["ignore-tests"]) { - if (r.group == test.group && r.test == test.test) { - reported_test = false; - break; - } - } - if ((reported_test && reported_exec) == drop_reported) - return true; - - return false; -} - -function get_sort_key() -{ - if (document.getElementById("sort-streak").checked) - return "streak"; - return "cnt"; -} - -function load_result_table(data_raw) -{ - // Get all branch names - var branch_set = new Set(); - $.each(data_raw, function(i, v) { - branch_set.add(v.branch); - }); - let br_cnt = document.getElementById("br-cnt").value; - const branches = Array.from(branch_set).slice(0, br_cnt); - - // Build the result map - var pw_n = document.getElementById("pw-n").checked; - var pw_y = document.getElementById("pw-y").checked; - let needle = document.getElementById("tn-needle").value; - - var test_row = {}; - - $.each(data_raw, function(i, v) { - $.each(v.results, function(j, r) { - if (pw_y == false && pw_filter_r(v, r, true)) - return 1; - if (pw_n == false && pw_filter_r(v, r, false)) - return 1; - - const tn = v.remote + '/' + r.group + '/' + r.test; - if (needle && !tn.includes(needle)) - return 1; - - if (!(tn in test_row)) { - test_row[tn] = {}; - for (let i = 1; i <= branches.length; i++) - test_row[tn][branches[i - 1]] = ""; - } - test_row[tn][v.branch] = r.result; - }); - }); - - // Sort from most to least flaky - for (const [tn, entries] of Object.entries(test_row)) { - let count = 0, streak = 0; - let prev = "pass"; - - for (let i = 0; i < branches.length; i++) { - let current = entries[branches[i]]; - - if (current == "pass" && count == 0) - streak++; - - if (current != "" && current != prev) { - prev = current; - count++; - } - } - test_row[tn]["cnt"] = count; - test_row[tn]["streak"] = streak; - } - - // Filter out those not flaky enough to show - var min_flip = document.getElementById("min-flip").value; - let test_names = Array.from(Object.keys(test_row)); - test_names = test_names.filter(function(a){return test_row[a].cnt >= min_flip;}); - // Sort by the right key - var sort_key = get_sort_key(); - test_names.sort( - function(a, b) { return test_row[b][sort_key] - test_row[a][sort_key]; } - ); - - // Remove all rows but first (leave headers) - $("#results tr").remove(); - // Display - let table = document.getElementById("results"); - - let header = table.insertRow(); - header.insertCell(0); // name - for (let i = 0; i < branches.length; i++) { - let cell = header.insertCell(i + 1); - cell.innerHTML = branches[i]; - cell.setAttribute("style", "writing-mode: tb-rl; font-size: 0.8em; padding: 0px;"); - } - - for (const tn of test_names) { - let entries = test_row[tn]; - let row = table.insertRow(); - - let name = row.insertCell(0); - name.innerHTML = tn; - name.setAttribute("style", "padding: 0px"); - - for (let i = 0; i < branches.length; i++) { - let cell = row.insertCell(i + 1); - colorify(cell, entries[branches[i]]); - } - } -} - -function results_update() -{ - load_result_table(loaded_data); -} - -let xfr_todo = 2; -let loaded_data = null; -let loaded_filters = null; - -function loaded_one() -{ - if (--xfr_todo) - return; - - // We have all JSONs now, do processing. - nipa_filters_set_from_url(); - nipa_filters_enable(results_update); - results_update(); -} - -function filters_loaded(data_raw) -{ - loaded_filters = data_raw; - loaded_one(); -} - -function results_loaded(data_raw) -{ - $.each(data_raw, function(i, v) { - v.start = new Date(v.start); - v.end = new Date(v.end); - }); - data_raw.sort(function(a, b){return b.end - a.end;}); - - loaded_data = data_raw; - loaded_one(); -} - -function do_it() -{ - $(document).ready(function() { - $.get("contest/filters.json", filters_loaded) - }); - $(document).ready(function() { - $.get("contest/all-results.json", results_loaded) - }); -} diff --git a/contest/nipa.js b/contest/nipa.js deleted file mode 100644 index 64d3dd5..0000000 --- a/contest/nipa.js +++ /dev/null @@ -1,33 +0,0 @@ -function nipa_filters_enable(update_cb) -{ - let warn_box = document.getElementById("fl-warn-box"); - warn_box.innerHTML = ""; - - const fl_pw = document.querySelectorAll("[name=fl-pw]"); - for (const one of fl_pw) { - one.addEventListener("change", update_cb); - one.disabled = false; - } -} - -function nipa_filters_set_from_url() -{ - const urlParams = new URLSearchParams(window.location.search); - const filters = document.querySelectorAll("[name=fl-pw]"); - - for (const elem of filters) { - let url_val = urlParams.get(elem.id); - - if (!url_val) - continue; - - if (elem.hasAttribute("checked")) { - if (url_val == "0") - elem.checked = false; - else if (url_val == "1") - elem.checked = true; - } else { - elem.value = url_val; - } - } -} diff --git a/contest/remote/core b/contest/remote/core new file mode 120000 index 0000000..58377d5 --- /dev/null +++ b/contest/remote/core @@ -0,0 +1 @@ +../../core/ \ No newline at end of file diff --git a/contest/remote/exec.py b/contest/remote/exec.py index 2799674..47454e1 100755 --- a/contest/remote/exec.py +++ b/contest/remote/exec.py @@ -6,6 +6,7 @@ import os import subprocess +from core import NipaLifetime from lib import Fetcher @@ -18,6 +19,8 @@ test=test-name [bin] exec=./script.sh +[env] +paths=/extra/exec/PATH:/another/bin [remote] branches=https://url-to-branches-manifest [local] @@ -25,6 +28,7 @@ json_path=base-relative/path/to/json results_path=base-relative/path/to/raw/outputs tree_path=/root-path/to/kernel/git +patches_path=/root-path/to/patches/dir [www] url=https://url-to-reach-base-path """ @@ -42,6 +46,8 @@ def test(binfo, rinfo, config): env['BRANCH'] = binfo['branch'] env['BASE'] = binfo['base'] env['RESULTS_DIR'] = results_path + if config.get('env', 'paths', fallback=None): + env['PATH'] = config.get('env', 'paths') + ':' + env['PATH'] bin = config.get('bin', 'exec').split() process = subprocess.Popen(bin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, @@ -81,14 +87,20 @@ def main() -> None: base_dir = config.get('local', 'base_path') + life = NipaLifetime(config) + f = Fetcher(test, config, name=config.get('executor', 'name'), branches_url=config.get('remote', 'branches'), results_path=os.path.join(base_dir, config.get('local', 'json_path')), url_path=config.get('www', 'url') + '/' + config.get('local', 'json_path'), - tree_path=config.get('local', 'tree_path')) + tree_path=config.get('local', 'tree_path'), + patches_path=config.get('local', 'patches_path', fallback=None), + life=life, + first_run=config.get('executor', 'init', fallback="continue")) f.run() + life.exit() if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/contest/remote/gh.py b/contest/remote/gh.py new file mode 100755 index 0000000..69b95e8 --- /dev/null +++ b/contest/remote/gh.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import configparser +import datetime +import os +import requests +import subprocess +import sys +import time + +from core import NipaLifetime +from lib import Fetcher, CbArg, namify + +""" +[executor] +name= +group= +test= +[remote] +branches= +[local] +tree_path= +base_path= +results_path= +json_path= +[www] +url= + +[gh] +token=api-token +base=base/branch +link=https://full/link +out_remote=remote-name +out_branch=remote-branch +wait_first=secs-to-first-check +wait_poll=secs-between-rechecks +wait_max=secs-to-wait +[ci] +owner=gh-owner +repo=gh-repo +runs_ref=refs/pull/... +""" + +def get(url, token): + headers = {"Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28", + "Authorization": token} + return requests.get(url, headers=headers) + + +def link(runid, config): + return "https://github.com/" + \ + config.get('ci', 'owner') + "/" + \ + config.get('ci', 'repo') + "/" + \ + "actions/runs/" + str(runid) + + +def gh_namify(name): + # This may be pretty BPF specific, the test name looks like: + # x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc + name = ' / '.join(name.split(' / ')[:2]) + name = name.replace('test (test', '') + return namify(name) + + +def get_jobs_page(config, repo_url, found, token, page=1, res=None): + resp = get(repo_url + f'/actions/runs/{found["id"]}/jobs?page={page}', token) + jobs = resp.json() + + if 'jobs' not in jobs: + print("bad jobs", jobs) + return None + + if len(jobs['jobs']) == 0: + if page == 1: + print("short jobs", jobs) + return res + # Must be page 1, init res to empty array + if res is None: + res = [] + + decoder = { + 'success': 0, + 'skipped': 1, + None: 2, + 'failure': 3, + 'cancelled': 4, + 'unknown': 5, + } + encoder = { + 0: 'pass', + 1: 'pass', + 2: None, + 3: 'fail', + 4: 'fail', + 5: 'fail', + } + + url = link(found["id"], config) + for job in jobs["jobs"]: + if job["conclusion"] is None: + print("Still running, waiting for job:", job["name"]) + return None + if job["conclusion"] == 'skipped': + continue + + if job["conclusion"] in decoder: + result = encoder[decoder[job["conclusion"]]] + else: + print("Unknown result:", job["conclusion"]) + result = 'fail' + + test_link = job.get('html_url', url) + + res.append({'test': gh_namify(job["name"]), + 'group': config.get('executor', 'group'), + 'result': result, 'link': test_link}) + if not res: + print(f"Still waiting, {len(jobs['jobs'])} jobs skipped") + return get_jobs_page(config, repo_url, found, token, page=(page + 1), res=res) + + +def get_results(config, cbarg, prev_run, page=1): + token = config.get('gh', 'token') + repo_url = f"https://api.github.com/repos/%7Bconfig.get('ci', 'owner')}/{config.get('ci', 'repo')}" + ref = config.get('ci', 'runs_ref') + + resp = get(repo_url + f'/actions/runs?page={page}', token) + runs = resp.json() + found = None + for run in runs.get('workflow_runs', []): + if ref in [r['ref'] for r in run['referenced_workflows']]: + if found is None or found["id"] < run["id"]: + found = run + if found is None: + if page < 10: + return get_results(config, cbarg, prev_run, page=(page + 1)) + print(f"Run not found, tried all {page} pages!") + return None + if prev_run == found["id"]: + print("Found old run:", prev_run) + return None + cbarg.prev_runid = found["id"] + + return get_jobs_page(config, repo_url, found, token) + + +def test_run(binfo, rinfo, cbarg, config, start): + tree_path = config.get('local', 'tree_path') + base = config.get('gh', 'base') + + subprocess.run('git checkout ' + base, cwd=tree_path, shell=True, check=True) + res = subprocess.run('git merge ' + rinfo['branch-ref'], + cwd=tree_path, shell=True) + if res.returncode != 0: + # If rerere fixed it, just commit + res = subprocess.run('git diff -s --exit-code', cwd=tree_path, shell=True) + if res.returncode != 0: + return [{'test': config.get('executor', 'test'), + 'group': config.get('executor', 'group'), + 'result': 'skip', 'link': config.get('gh', 'link')}] + + subprocess.run('git commit --no-edit', cwd=tree_path, shell=True, check=True) + + out_remote = config.get('gh', 'out_remote') + out_branch = config.get('gh', 'out_branch') + + subprocess.run(f'git push -f {out_remote} HEAD:{out_branch}', + cwd=tree_path, shell=True, check=True) + + end = start + datetime.timedelta(seconds=config.getint('gh', 'wait_max')) + time.sleep(config.getint('gh', 'wait_first')) + + prev_runid = 0 + if hasattr(cbarg, "prev_runid"): + prev_runid = cbarg.prev_runid + + while datetime.datetime.now() < end: + res = get_results(config, cbarg, prev_runid) + if res: + print("Got result:", res) + return res + + time.sleep(config.getint('gh', 'wait_poll')) + + url = config.get('gh', 'link') + if hasattr(cbarg, "prev_runid") and cbarg.prev_runid != prev_runid: + url = link(cbarg.prev_runid, config) + + return [{'test': config.get('executor', 'test'), + 'group': config.get('executor', 'group'), + 'result': 'skip', 'link': url}] + + +def test(binfo, rinfo, cbarg): + start = datetime.datetime.now() + print("Run at", start) + + cbarg.refresh_config() + config = cbarg.config + + results_path = os.path.join(config.get('local', 'base_path'), + config.get('local', 'results_path'), + rinfo['run-cookie']) + os.makedirs(results_path) + + res = test_run(binfo, rinfo, cbarg, config, start) + + retry = [] + for one in res: + if one['result'] == 'fail': + retry = test_run(binfo, rinfo, cbarg, config, start) + break + for one2 in retry: + for one in res: + if one['test'] == one2['test']: + one['retry'] = one2['result'] + break + + return res + + +def main() -> None: + cfg_paths = ['remote.config', 'gh.config'] + if len(sys.argv) > 1: + cfg_paths += sys.argv[1:] + + cbarg = CbArg(cfg_paths) + config = cbarg.config + + base_dir = config.get('local', 'base_path') + + life = NipaLifetime(config) + + f = Fetcher(test, cbarg, + name=config.get('executor', 'name'), + branches_url=config.get('remote', 'branches'), + results_path=os.path.join(base_dir, config.get('local', 'json_path')), + url_path=config.get('www', 'url') + '/' + config.get('local', 'json_path'), + tree_path=config.get('local', 'tree_path'), + patches_path=config.get('local', 'patches_path', fallback=None), + life=life, + first_run=config.get('executor', 'init', fallback="continue")) + f.run() + life.exit() + + +if __name__ == "__main__": + main() diff --git a/contest/remote/kunit.py b/contest/remote/kunit.py index 2d9dd1b..bdfa2fd 100755 --- a/contest/remote/kunit.py +++ b/contest/remote/kunit.py @@ -5,9 +5,12 @@ import datetime import json import os +import shutil import subprocess -from lib import Fetcher +from core import NipaLifetime +from lib import Fetcher, namify +from lib import wait_loadavg """ @@ -17,6 +20,7 @@ name=executor group=test-group test=test-name +init=force / continue / next [remote] branches=https://url-to-branches-manifest [local] @@ -24,8 +28,11 @@ json_path=base-relative/path/to/json results_path=base-relative/path/to/raw/outputs tree_path=/root-path/to/kernel/git +patches_path=/root-path/to/patches/dir [www] url=https://url-to-reach-base-path +[cfg] +wait_loadavg= Expected: @@ -70,39 +77,64 @@ def load_expected(config): for l in lines: if not l: continue - words = l.split() + words = l.strip().split('|') + if len(words) != 3: + words = l.split() if words[0] not in expected: expected[words[0]] = {} grp = expected[words[0]] if words[1] not in grp: grp[words[1]] = {} grp[words[1]] = str_to_code[words[2]] - return expected + return expected -def summary_result(expected, got, link, sub_path=""): +def summary_flat(expected, got, sub_path=""): if sub_path: sub_path += '.' + overall_code = 0 results = [] bad_tests = [] + for case in got["test_cases"]: + code = str_to_code[case["status"]] + + exp = expected.get(got["name"], {}).get(case["name"]) + if exp and exp == code: + continue + + name = namify(case["name"]) + overall_code = max(code, overall_code) + results.append({'test': sub_path + name, + 'result': code_to_str[code]}) + if code: + bad_tests.append(f"{got['name']} {name} {case['status']}") + for sub_group in got["sub_groups"]: - for case in sub_group["test_cases"]: - code = str_to_code[case["status"]] + ov, bt, res = summary_flat(expected, sub_group, sub_path + sub_group["name"]) + overall_code = max(ov, overall_code) + results += res + bad_tests += bt - exp = expected.get(sub_group["name"], {}).get(case["name"]) - if exp and exp == code: - continue + return overall_code, bad_tests, results + + +def summary_result(expected, got, link, sub_path=""): + results = [] + bad_tests = [] + for sub_group in got["sub_groups"]: + code, bt, res = summary_flat(expected, sub_group) + + data = { + 'test': sub_group["name"], + 'group': 'kunit', + 'result': code_to_str[code], + 'results': res, + 'link': link + } + results.append(data) - results.append({'test': case["name"], - 'group': sub_path + sub_group["name"], - 'result': code_to_str[code], 'link': link}) - if code: - bad_tests.append(f"{sub_group['name']} {case['name']} {case['status']}") - for grp in sub_group["sub_groups"]: - bt, res = summary_result(expected, grp, link, sub_path + grp["name"]) - results += res - bad_tests += bt + bad_tests += bt return bad_tests, results @@ -110,9 +142,19 @@ def summary_result(expected, got, link, sub_path=""): def test(binfo, rinfo, config): print("Run at", datetime.datetime.now()) - process = subprocess.Popen(['./tools/testing/kunit/kunit.py', 'run', '--alltests', '--json'], + tree_path = config.get('local', 'tree_path') + + load_tgt = config.getfloat("cfg", "wait_loadavg", fallback=None) + if load_tgt: + wait_loadavg(load_tgt) + + penv = os.environ.copy() + if 'PYTHONUNBUFFERED' in penv: + del penv['PYTHONUNBUFFERED'] + + process = subprocess.Popen(['./tools/testing/kunit/kunit.py', 'run', '--alltests', '--json', '--arch=x86_64'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, - cwd=config.get('local', 'tree_path')) + cwd=tree_path, env=penv) stdout, stderr = process.communicate() stdout = stdout.decode("utf-8", "ignore") stderr = stderr.decode("utf-8", "ignore") @@ -132,6 +174,8 @@ def test(binfo, rinfo, config): fp.write(stdout) with open(os.path.join(results_path, 'stderr'), 'w') as fp: fp.write(stderr) + shutil.copyfile(os.path.join(tree_path, '.kunit', 'test.log'), + os.path.join(results_path, 'kunit-test.log')) try: results_json = stdout_get_json(stdout) @@ -161,14 +205,20 @@ def main() -> None: base_dir = config.get('local', 'base_path') + life = NipaLifetime(config) + f = Fetcher(test, config, name=config.get('executor', 'name'), branches_url=config.get('remote', 'branches'), results_path=os.path.join(base_dir, config.get('local', 'json_path')), url_path=config.get('www', 'url') + '/' + config.get('local', 'json_path'), - tree_path=config.get('local', 'tree_path')) + patches_path=config.get('local', 'patches_path', fallback=None), + life=life, + tree_path=config.get('local', 'tree_path'), + first_run=config.get('executor', 'init', fallback="continue")) f.run() + life.exit() if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/contest/remote/lib/__init__.py b/contest/remote/lib/__init__.py index 2067e09..2268a29 100644 --- a/contest/remote/lib/__init__.py +++ b/contest/remote/lib/__init__.py @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -from .fetcher import Fetcher +from .fetcher import Fetcher, namify +from .loadavg import wait_loadavg from .vm import VM, new_vm, guess_indicators from .cbarg import CbArg +from .crash import has_crash, extract_crash diff --git a/contest/remote/lib/crash.py b/contest/remote/lib/crash.py new file mode 100755 index 0000000..38fdc02 --- /dev/null +++ b/contest/remote/lib/crash.py @@ -0,0 +1,826 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import re +import unittest + + +def has_crash(output): + return output.find("] RIP: ") != -1 or \ + output.find("] Call Trace:") != -1 or \ + output.find('] ref_tracker: ') != -1 or \ + output.find('unreferenced object 0x') != -1 + + +def finger_print_skip_pfx_len(filters, needles): + # Filter may contain a list of needles we want to skip + # Assume it's well sorted, so we don't need LPM... + if filters and 'crash-prefix-skip' in filters: + for skip_pfx in filters['crash-prefix-skip']: + if len(needles) < len(skip_pfx): + continue + if needles[:len(skip_pfx)] == skip_pfx: + return len(skip_pfx) + return 0 + + +def crash_finger_print(filters, lines): + needles = [] + need_re = re.compile(r'.*( |0:|>\] )([a-z0-9_]+)\+0x[0-9a-f]+/0x[0-9a-f]+.*') + skip = 0 + for line in lines: + m = need_re.match(line) + if not m: + continue + needles.append(m.groups()[1]) + skip = finger_print_skip_pfx_len(filters, needles) + if len(needles) - skip == 5: + break + + needles = needles[skip:] + return ":".join(needles) + + +def extract_crash(outputs, prompt, get_filters): + in_crash = False + start = 0 + crash_lines = [] + finger_prints = set() + last5 = [""] * 5 + outputs = outputs.split('\n') + for line in outputs: + if in_crash: + in_crash &= '] ---[ end trace ' not in line + in_crash &= '] ' not in line + in_crash &= line[-2:] != '] ' + in_crash &= not line.startswith(prompt) + if not in_crash: + last5 = [""] * 5 + finger_prints.add(crash_finger_print(get_filters(), + crash_lines[start:])) + else: + in_crash |= '] Hardware name: ' in line + in_crash |= '] ref_tracker: ' in line + in_crash |= ' blocked for more than ' in line + in_crash |= line.startswith('unreferenced object 0x') + if in_crash: + start = len(crash_lines) + crash_lines += last5 + + # Keep last 5 to get some of the stuff before stack trace + last5 = last5[1:] + ["| " + line] + + if in_crash: + crash_lines.append(line) + + return crash_lines, finger_prints + + +############################################################# +# END OF CODE --- START OF UNIT TESTS +############################################################# + + +class TestCrashes(unittest.TestCase): + def test_memleak(self): + self.assertTrue(has_crash(TestCrashes.kmemleak)) + lines, fingers = extract_crash(TestCrashes.kmemleak, "xx__->", lambda : None) + self.assertGreater(len(lines), 8) + self.assertEqual(fingers, + {'kmalloc_trace_noprof:tcp_ao_alloc_info:do_tcp_setsockopt:do_sock_setsockopt:__sys_setsockopt'}) + + def test_bad_irq(self): + self.assertTrue(has_crash(TestCrashes.bad_irq)) + lines, fingers = extract_crash(TestCrashes.bad_irq, "xx__->", lambda : None) + self.assertGreater(len(lines), 10) + self.assertEqual(fingers, + {'dump_stack_lvl:__report_bad_irq:note_interrupt:handle_irq_event:handle_edge_irq'}) + + def test_bad_irq_trim(self): + self.assertTrue(has_crash(TestCrashes.bad_irq)) + lines, fingers = extract_crash(TestCrashes.bad_irq, "xx__->", + lambda : {'crash-prefix-skip': [["dump_stack_lvl","__report_bad_irq"]]}) + self.assertGreater(len(lines), 10) + self.assertEqual(fingers, + {'note_interrupt:handle_irq_event:handle_edge_irq:__common_interrupt:common_interrupt'}) + + def test_refleak(self): + self.assertTrue(has_crash(TestCrashes.refleak)) + lines, fingers = extract_crash(TestCrashes.refleak, "xx__->", lambda : None) + self.assertGreater(len(lines), 50) + self.assertEqual(fingers, + {'netdev_get_by_index:fib6_nh_init:nh_create_ipv6:nexthop_create:rtm_new_nexthop', + 'ipv6_add_dev:addrconf_notify:notifier_call_chain:register_netdevice:veth_newlink', + 'dst_init:dst_alloc:ip6_dst_alloc:ip6_rt_pcpu_alloc:ip6_pol_route'}) + + def test_hung_task(self): + self.assertTrue(has_crash(TestCrashes.hung_task)) + lines, fingers = extract_crash(TestCrashes.hung_task, "xx__->", lambda : None) + self.assertGreater(len(lines), 10) + self.assertEqual(fingers, + {'__schedule:schedule:__wait_on_freeing_inode:find_inode_fast:iget_locked', + '__schedule:schedule:d_alloc_parallel:__lookup_slow:walk_component'}) + + ######################################################### + ### Sample outputs + ######################################################### + kmemleak = """xx__-> echo $? +0 +xx__-> echo scan > /sys/kernel/debug/kmemleak && cat /sys/kernel/debug/kmemleak +unreferenced object 0xffff888003692380 (size 128): + comm "unsigned-md5_ip", pid 762, jiffies 4294831244 + hex dump (first 32 bytes): + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + backtrace (crc 2128895f): + [] kmalloc_trace_noprof+0x236/0x290 + [] tcp_ao_alloc_info+0x44/0xf0 + [] tcp_ao_info_cmd.constprop.0+0x423/0x8e0 + [] do_tcp_setsockopt+0xa64/0x2320 + [] do_sock_setsockopt+0x149/0x3a0 + [] __sys_setsockopt+0x104/0x1a0 + [] __x64_sys_setsockopt+0xbd/0x160 + [] do_syscall_64+0xc1/0x1d0 + [] entry_SYSCALL_64_after_hwframe+0x77/0x7f +xx__-> + """ + + bad_irq = """[ 1000.092583][ T3849] tc (3849) used greatest stack depth: 23216 bytes left +[ 1081.418714][ C3] irq 4: nobody cared (try booting with the "irqpoll" option) +[ 1081.419111][ C3] CPU: 3 PID: 3703 Comm: perl Not tainted 6.10.0-rc3-virtme #1 +[ 1081.419389][ C3] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014 +[ 1081.419773][ C3] Call Trace: +[ 1081.419909][ C3] +[ 1081.420008][ C3] dump_stack_lvl+0x82/0xd0 +[ 1081.420197][ C3] __report_bad_irq+0x5f/0x180 +[ 1081.420371][ C3] note_interrupt+0x6b3/0x860 +[ 1081.420556][ C3] handle_irq_event+0x16d/0x1c0 +[ 1081.420728][ C3] handle_edge_irq+0x1fa/0xb60 +[ 1081.420912][ C3] __common_interrupt+0x82/0x170 +[ 1081.421128][ C3] common_interrupt+0x7e/0x90 +[ 1081.421330][ C3] +[ 1081.421430][ C3] +[ 1081.421526][ C3] asm_common_interrupt+0x26/0x40 +[ 1081.421711][ C3] RIP: 0010:_raw_spin_unlock_irqrestore+0x43/0x70 +[ 1081.421951][ C3] Code: 10 e8 d1 1a 92 fd 48 89 ef e8 49 8b 92 fd 81 e3 00 02 00 00 75 1d 9c 58 f6 c4 02 75 29 48 85 db 74 01 fb 65 ff 0d 95 7a 06 54 <74> 0e 5b 5d c3 cc cc cc cc e8 7f 01 b6 fd eb dc 0f 1f 44 00 00 5b +[ 1081.422616][ C3] RSP: 0018:ffffc90000bdfac0 EFLAGS: 00000286 +[ 1081.422862][ C3] RAX: 0000000000000006 RBX: 0000000000000200 RCX: 1ffffffff5e2ff1a +[ 1081.423147][ C3] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffffabfd4d81 +[ 1081.423422][ C3] RBP: ffffffffafa41060 R08: 0000000000000001 R09: fffffbfff5e2b0a8 +[ 1081.423701][ C3] R10: ffffffffaf158547 R11: 0000000000000000 R12: 0000000000000001 +[ 1081.423991][ C3] R13: 0000000000000286 R14: ffffffffafa41060 R15: ffff888006683800 +[ 1081.424296][ C3] ? _raw_spin_unlock_irqrestore+0x51/0x70 +[ 1081.424542][ C3] uart_write+0x13d/0x330 +[ 1081.424695][ C3] process_output_block+0x13e/0x790 +[ 1081.424885][ C3] ? lockdep_hardirqs_on_prepare+0x275/0x410 +[ 1081.425144][ C3] n_tty_write+0x412/0x7a0 +[ 1081.425344][ C3] ? __pfx_n_tty_write+0x10/0x10 +[ 1081.425535][ C3] ? trace_lock_acquire+0x14d/0x1f0 +[ 1081.425722][ C3] ? __pfx_woken_wake_function+0x10/0x10 +[ 1081.425909][ C3] ? iterate_tty_write+0x95/0x540 +[ 1081.426098][ C3] ? lock_acquire+0x32/0xc0 +[ 1081.426285][ C3] ? iterate_tty_write+0x95/0x540 +[ 1081.426473][ C3] iterate_tty_write+0x228/0x540 +[ 1081.426660][ C3] ? tty_ldisc_ref_wait+0x28/0x80 +[ 1081.426850][ C3] file_tty_write.constprop.0+0x1db/0x370 +[ 1081.427037][ C3] vfs_write+0xa18/0x10b0 +[ 1081.427184][ C3] ? __pfx_lock_acquire.part.0+0x10/0x10 +[ 1081.427369][ C3] ? __pfx_vfs_write+0x10/0x10 +[ 1081.427557][ C3] ? clockevents_program_event+0xf6/0x300 +[ 1081.427750][ C3] ? __fget_light+0x53/0x1e0 +[ 1081.427938][ C3] ? clockevents_program_event+0x1ea/0x300 +[ 1081.428170][ C3] ksys_write+0xf5/0x1e0 +[ 1081.428319][ C3] ? __pfx_ksys_write+0x10/0x10 +[ 1081.428515][ C3] do_syscall_64+0xc1/0x1d0 +[ 1081.428696][ C3] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 1081.428915][ C3] RIP: 0033:0x7f3d90a53957 +[ 1081.429131][ C3] Code: 0b 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24 +[ 1081.429726][ C3] RSP: 002b:00007ffe774784d8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 +[ 1081.429987][ C3] RAX: ffffffffffffffda RBX: 00005596b8d2a1d0 RCX: 00007f3d90a53957 +[ 1081.430242][ C3] RDX: 0000000000000001 RSI: 00005596b8d2a1d0 RDI: 0000000000000001 +[ 1081.430494][ C3] RBP: 0000000000000001 R08: 0000000000000000 R09: 0000000000002000 +[ 1081.430753][ C3] R10: 0000000000000001 R11: 0000000000000246 R12: 00005596b8d165c0 +[ 1081.431012][ C3] R13: 00005596b8cf72a0 R14: 0000000000000001 R15: 00005596b8d165c0 +[ 1081.431290][ C3] +[ 1081.431421][ C3] handlers: +[ 1081.431553][ C3] [] serial8250_interrupt +[ 1081.432206][ C3] Disabling IRQ #4 +""" + + refleak = """ +[ 1055.837009][ T75] veth_A-C: left allmulticast mode +[ 1055.837273][ T75] veth_A-C: left promiscuous mode +[ 1055.837697][ T75] br0: port 1(veth_A-C) entered disabled state +[ 1619.761346][T10781] Initializing XFRM netlink socket +[ 1868.101248][T12484] unregister_netdevice: waiting for veth_A-R1 to become free. Usage count = 5 +[ 1868.101753][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1868.101753][T12484] dst_init+0x84/0x4a0 +[ 1868.101753][T12484] dst_alloc+0x97/0x150 +[ 1868.101753][T12484] ip6_dst_alloc+0x23/0x90 +[ 1868.101753][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1868.101753][T12484] ip6_pol_route+0x56f/0x840 +[ 1868.101753][T12484] fib6_rule_lookup+0x334/0x630 +[ 1868.101753][T12484] ip6_route_output_flags+0x259/0x480 +[ 1868.101753][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1868.101753][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1868.101753][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1868.101753][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1868.101753][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1868.101753][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1868.101753][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1868.101753][T12484] arp_solicit+0x4aa/0xe20 +[ 1868.101753][T12484] neigh_probe+0xaa/0xf0 +[ 1868.101753][T12484] +[ 1868.104788][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1868.104788][T12484] dst_init+0x84/0x4a0 +[ 1868.104788][T12484] dst_alloc+0x97/0x150 +[ 1868.104788][T12484] ip6_dst_alloc+0x23/0x90 +[ 1868.104788][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1868.104788][T12484] ip6_pol_route+0x56f/0x840 +[ 1868.104788][T12484] fib6_rule_lookup+0x334/0x630 +[ 1868.104788][T12484] ip6_route_output_flags+0x259/0x480 +[ 1868.104788][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1868.104788][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1868.104788][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1868.104788][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1868.104788][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1868.104788][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1868.104788][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1868.104788][T12484] ip6_finish_output2+0x59b/0xff0 +[ 1868.104788][T12484] ip6_finish_output+0x553/0xdf0 +[ 1868.104788][T12484] +[ 1868.107473][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1868.107473][T12484] netdev_get_by_index+0x5e/0x80 +[ 1868.107473][T12484] fib6_nh_init+0x3dd/0x15c0 +[ 1868.107473][T12484] nh_create_ipv6+0x377/0x600 +[ 1868.107473][T12484] nexthop_create+0x311/0x650 +[ 1868.107473][T12484] rtm_new_nexthop+0x3f0/0x5c0 +[ 1868.107473][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1868.107473][T12484] netlink_rcv_skb+0x130/0x360 +[ 1868.107473][T12484] netlink_unicast+0x449/0x710 +[ 1868.107473][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1868.107473][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1868.107473][T12484] ___sys_sendmsg+0xee/0x170 +[ 1868.107473][T12484] __sys_sendmsg+0xc2/0x150 +[ 1868.107473][T12484] do_syscall_64+0xc1/0x1d0 +[ 1868.107473][T12484] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 1868.107473][T12484] +[ 1868.109800][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1868.109800][T12484] ipv6_add_dev+0x3b9/0x11c0 +[ 1868.109800][T12484] addrconf_notify+0x344/0xd60 +[ 1868.109800][T12484] notifier_call_chain+0xcd/0x150 +[ 1868.109800][T12484] register_netdevice+0x1091/0x1690 +[ 1868.109800][T12484] veth_newlink+0x401/0x830 +[ 1868.109800][T12484] rtnl_newlink_create+0x341/0x850 +[ 1868.109800][T12484] __rtnl_newlink+0xac9/0xd80 +[ 1868.109800][T12484] rtnl_newlink+0x63/0xa0 +[ 1868.109800][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1868.109800][T12484] netlink_rcv_skb+0x130/0x360 +[ 1868.109800][T12484] netlink_unicast+0x449/0x710 +[ 1868.109800][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1868.109800][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1868.109800][T12484] ___sys_sendmsg+0xee/0x170 +[ 1868.109800][T12484] __sys_sendmsg+0xc2/0x150 +[ 1868.109800][T12484] do_syscall_64+0xc1/0x1d0 +[ 1868.109800][T12484] +[ 1878.221231][T12484] unregister_netdevice: waiting for veth_A-R1 to become free. Usage count = 5 +[ 1878.221630][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1878.221630][T12484] dst_init+0x84/0x4a0 +[ 1878.221630][T12484] dst_alloc+0x97/0x150 +[ 1878.221630][T12484] ip6_dst_alloc+0x23/0x90 +[ 1878.221630][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1878.221630][T12484] ip6_pol_route+0x56f/0x840 +[ 1878.221630][T12484] fib6_rule_lookup+0x334/0x630 +[ 1878.221630][T12484] ip6_route_output_flags+0x259/0x480 +[ 1878.221630][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1878.221630][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1878.221630][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1878.221630][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1878.221630][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1878.221630][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1878.221630][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1878.221630][T12484] arp_solicit+0x4aa/0xe20 +[ 1878.221630][T12484] neigh_probe+0xaa/0xf0 +[ 1878.221630][T12484] +[ 1878.223972][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1878.223972][T12484] dst_init+0x84/0x4a0 +[ 1878.223972][T12484] dst_alloc+0x97/0x150 +[ 1878.223972][T12484] ip6_dst_alloc+0x23/0x90 +[ 1878.223972][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1878.223972][T12484] ip6_pol_route+0x56f/0x840 +[ 1878.223972][T12484] fib6_rule_lookup+0x334/0x630 +[ 1878.223972][T12484] ip6_route_output_flags+0x259/0x480 +[ 1878.223972][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1878.223972][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1878.223972][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1878.223972][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1878.223972][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1878.223972][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1878.223972][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1878.223972][T12484] ip6_finish_output2+0x59b/0xff0 +[ 1878.223972][T12484] ip6_finish_output+0x553/0xdf0 +[ 1878.223972][T12484] +[ 1878.226285][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1878.226285][T12484] netdev_get_by_index+0x5e/0x80 +[ 1878.226285][T12484] fib6_nh_init+0x3dd/0x15c0 +[ 1878.226285][T12484] nh_create_ipv6+0x377/0x600 +[ 1878.226285][T12484] nexthop_create+0x311/0x650 +[ 1878.226285][T12484] rtm_new_nexthop+0x3f0/0x5c0 +[ 1878.226285][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1878.226285][T12484] netlink_rcv_skb+0x130/0x360 +[ 1878.226285][T12484] netlink_unicast+0x449/0x710 +[ 1878.226285][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1878.226285][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1878.226285][T12484] ___sys_sendmsg+0xee/0x170 +[ 1878.226285][T12484] __sys_sendmsg+0xc2/0x150 +[ 1878.226285][T12484] do_syscall_64+0xc1/0x1d0 +[ 1878.226285][T12484] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 1878.226285][T12484] +[ 1878.228262][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1878.228262][T12484] ipv6_add_dev+0x3b9/0x11c0 +[ 1878.228262][T12484] addrconf_notify+0x344/0xd60 +[ 1878.228262][T12484] notifier_call_chain+0xcd/0x150 +[ 1878.228262][T12484] register_netdevice+0x1091/0x1690 +[ 1878.228262][T12484] veth_newlink+0x401/0x830 +[ 1878.228262][T12484] rtnl_newlink_create+0x341/0x850 +[ 1878.228262][T12484] __rtnl_newlink+0xac9/0xd80 +[ 1878.228262][T12484] rtnl_newlink+0x63/0xa0 +[ 1878.228262][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1878.228262][T12484] netlink_rcv_skb+0x130/0x360 +[ 1878.228262][T12484] netlink_unicast+0x449/0x710 +[ 1878.228262][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1878.228262][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1878.228262][T12484] ___sys_sendmsg+0xee/0x170 +[ 1878.228262][T12484] __sys_sendmsg+0xc2/0x150 +[ 1878.228262][T12484] do_syscall_64+0xc1/0x1d0 +[ 1878.228262][T12484] +[ 1888.397169][T12484] unregister_netdevice: waiting for veth_A-R1 to become free. Usage count = 5 +[ 1888.397586][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1888.397586][T12484] dst_init+0x84/0x4a0 +[ 1888.397586][T12484] dst_alloc+0x97/0x150 +[ 1888.397586][T12484] ip6_dst_alloc+0x23/0x90 +[ 1888.397586][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1888.397586][T12484] ip6_pol_route+0x56f/0x840 +[ 1888.397586][T12484] fib6_rule_lookup+0x334/0x630 +[ 1888.397586][T12484] ip6_route_output_flags+0x259/0x480 +[ 1888.397586][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1888.397586][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1888.397586][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1888.397586][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1888.397586][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1888.397586][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1888.397586][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1888.397586][T12484] arp_solicit+0x4aa/0xe20 +[ 1888.397586][T12484] neigh_probe+0xaa/0xf0 +[ 1888.397586][T12484] +[ 1888.400065][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1888.400065][T12484] dst_init+0x84/0x4a0 +[ 1888.400065][T12484] dst_alloc+0x97/0x150 +[ 1888.400065][T12484] ip6_dst_alloc+0x23/0x90 +[ 1888.400065][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1888.400065][T12484] ip6_pol_route+0x56f/0x840 +[ 1888.400065][T12484] fib6_rule_lookup+0x334/0x630 +[ 1888.400065][T12484] ip6_route_output_flags+0x259/0x480 +[ 1888.400065][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1888.400065][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1888.400065][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1888.400065][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1888.400065][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1888.400065][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1888.400065][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1888.400065][T12484] ip6_finish_output2+0x59b/0xff0 +[ 1888.400065][T12484] ip6_finish_output+0x553/0xdf0 +[ 1888.400065][T12484] +[ 1888.402504][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1888.402504][T12484] netdev_get_by_index+0x5e/0x80 +[ 1888.402504][T12484] fib6_nh_init+0x3dd/0x15c0 +[ 1888.402504][T12484] nh_create_ipv6+0x377/0x600 +[ 1888.402504][T12484] nexthop_create+0x311/0x650 +[ 1888.402504][T12484] rtm_new_nexthop+0x3f0/0x5c0 +[ 1888.402504][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1888.402504][T12484] netlink_rcv_skb+0x130/0x360 +[ 1888.402504][T12484] netlink_unicast+0x449/0x710 +[ 1888.402504][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1888.402504][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1888.402504][T12484] ___sys_sendmsg+0xee/0x170 +[ 1888.402504][T12484] __sys_sendmsg+0xc2/0x150 +[ 1888.402504][T12484] do_syscall_64+0xc1/0x1d0 +[ 1888.402504][T12484] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 1888.402504][T12484] +[ 1888.404580][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1888.404580][T12484] ipv6_add_dev+0x3b9/0x11c0 +[ 1888.404580][T12484] addrconf_notify+0x344/0xd60 +[ 1888.404580][T12484] notifier_call_chain+0xcd/0x150 +[ 1888.404580][T12484] register_netdevice+0x1091/0x1690 +[ 1888.404580][T12484] veth_newlink+0x401/0x830 +[ 1888.404580][T12484] rtnl_newlink_create+0x341/0x850 +[ 1888.404580][T12484] __rtnl_newlink+0xac9/0xd80 +[ 1888.404580][T12484] rtnl_newlink+0x63/0xa0 +[ 1888.404580][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1888.404580][T12484] netlink_rcv_skb+0x130/0x360 +[ 1888.404580][T12484] netlink_unicast+0x449/0x710 +[ 1888.404580][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1888.404580][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1888.404580][T12484] ___sys_sendmsg+0xee/0x170 +[ 1888.404580][T12484] __sys_sendmsg+0xc2/0x150 +[ 1888.404580][T12484] do_syscall_64+0xc1/0x1d0 +[ 1888.404580][T12484] +[ 1898.589197][T12484] unregister_netdevice: waiting for veth_A-R1 to become free. Usage count = 5 +[ 1898.589575][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1898.589575][T12484] dst_init+0x84/0x4a0 +[ 1898.589575][T12484] dst_alloc+0x97/0x150 +[ 1898.589575][T12484] ip6_dst_alloc+0x23/0x90 +[ 1898.589575][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1898.589575][T12484] ip6_pol_route+0x56f/0x840 +[ 1898.589575][T12484] fib6_rule_lookup+0x334/0x630 +[ 1898.589575][T12484] ip6_route_output_flags+0x259/0x480 +[ 1898.589575][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1898.589575][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1898.589575][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1898.589575][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1898.589575][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1898.589575][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1898.589575][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1898.589575][T12484] arp_solicit+0x4aa/0xe20 +[ 1898.589575][T12484] neigh_probe+0xaa/0xf0 +[ 1898.589575][T12484] +[ 1898.591877][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1898.591877][T12484] dst_init+0x84/0x4a0 +[ 1898.591877][T12484] dst_alloc+0x97/0x150 +[ 1898.591877][T12484] ip6_dst_alloc+0x23/0x90 +[ 1898.591877][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1898.591877][T12484] ip6_pol_route+0x56f/0x840 +[ 1898.591877][T12484] fib6_rule_lookup+0x334/0x630 +[ 1898.591877][T12484] ip6_route_output_flags+0x259/0x480 +[ 1898.591877][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1898.591877][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1898.591877][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1898.591877][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1898.591877][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1898.591877][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1898.591877][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1898.591877][T12484] ip6_finish_output2+0x59b/0xff0 +[ 1898.591877][T12484] ip6_finish_output+0x553/0xdf0 +[ 1898.591877][T12484] +[ 1898.594146][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1898.594146][T12484] netdev_get_by_index+0x5e/0x80 +[ 1898.594146][T12484] fib6_nh_init+0x3dd/0x15c0 +[ 1898.594146][T12484] nh_create_ipv6+0x377/0x600 +[ 1898.594146][T12484] nexthop_create+0x311/0x650 +[ 1898.594146][T12484] rtm_new_nexthop+0x3f0/0x5c0 +[ 1898.594146][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1898.594146][T12484] netlink_rcv_skb+0x130/0x360 +[ 1898.594146][T12484] netlink_unicast+0x449/0x710 +[ 1898.594146][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1898.594146][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1898.594146][T12484] ___sys_sendmsg+0xee/0x170 +[ 1898.594146][T12484] __sys_sendmsg+0xc2/0x150 +[ 1898.594146][T12484] do_syscall_64+0xc1/0x1d0 +[ 1898.594146][T12484] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 1898.594146][T12484] +[ 1898.596102][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1898.596102][T12484] ipv6_add_dev+0x3b9/0x11c0 +[ 1898.596102][T12484] addrconf_notify+0x344/0xd60 +[ 1898.596102][T12484] notifier_call_chain+0xcd/0x150 +[ 1898.596102][T12484] register_netdevice+0x1091/0x1690 +[ 1898.596102][T12484] veth_newlink+0x401/0x830 +[ 1898.596102][T12484] rtnl_newlink_create+0x341/0x850 +[ 1898.596102][T12484] __rtnl_newlink+0xac9/0xd80 +[ 1898.596102][T12484] rtnl_newlink+0x63/0xa0 +[ 1898.596102][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1898.596102][T12484] netlink_rcv_skb+0x130/0x360 +[ 1898.596102][T12484] netlink_unicast+0x449/0x710 +[ 1898.596102][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1898.596102][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1898.596102][T12484] ___sys_sendmsg+0xee/0x170 +[ 1898.596102][T12484] __sys_sendmsg+0xc2/0x150 +[ 1898.596102][T12484] do_syscall_64+0xc1/0x1d0 +[ 1898.596102][T12484] +[ 1908.670144][T12484] unregister_netdevice: waiting for veth_A-R1 to become free. Usage count = 5 +[ 1908.670561][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1908.670561][T12484] dst_init+0x84/0x4a0 +[ 1908.670561][T12484] dst_alloc+0x97/0x150 +[ 1908.670561][T12484] ip6_dst_alloc+0x23/0x90 +[ 1908.670561][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1908.670561][T12484] ip6_pol_route+0x56f/0x840 +[ 1908.670561][T12484] fib6_rule_lookup+0x334/0x630 +[ 1908.670561][T12484] ip6_route_output_flags+0x259/0x480 +[ 1908.670561][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1908.670561][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1908.670561][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1908.670561][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1908.670561][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1908.670561][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1908.670561][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1908.670561][T12484] arp_solicit+0x4aa/0xe20 +[ 1908.670561][T12484] neigh_probe+0xaa/0xf0 +[ 1908.670561][T12484] +[ 1908.673046][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1908.673046][T12484] dst_init+0x84/0x4a0 +[ 1908.673046][T12484] dst_alloc+0x97/0x150 +[ 1908.673046][T12484] ip6_dst_alloc+0x23/0x90 +[ 1908.673046][T12484] ip6_rt_pcpu_alloc+0x1e6/0x520 +[ 1908.673046][T12484] ip6_pol_route+0x56f/0x840 +[ 1908.673046][T12484] fib6_rule_lookup+0x334/0x630 +[ 1908.673046][T12484] ip6_route_output_flags+0x259/0x480 +[ 1908.673046][T12484] ip6_dst_lookup_tail.constprop.0+0x700/0xb60 +[ 1908.673046][T12484] ip6_dst_lookup_flow+0x88/0x190 +[ 1908.673046][T12484] udp_tunnel6_dst_lookup+0x2b0/0x4d0 +[ 1908.673046][T12484] vxlan_xmit_one+0xd41/0x4500 [vxlan] +[ 1908.673046][T12484] vxlan_xmit+0x9b6/0xf10 [vxlan] +[ 1908.673046][T12484] dev_hard_start_xmit+0x10e/0x360 +[ 1908.673046][T12484] __dev_queue_xmit+0xe76/0x1740 +[ 1908.673046][T12484] ip6_finish_output2+0x59b/0xff0 +[ 1908.673046][T12484] ip6_finish_output+0x553/0xdf0 +[ 1908.673046][T12484] +[ 1908.675506][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1908.675506][T12484] netdev_get_by_index+0x5e/0x80 +[ 1908.675506][T12484] fib6_nh_init+0x3dd/0x15c0 +[ 1908.675506][T12484] nh_create_ipv6+0x377/0x600 +[ 1908.675506][T12484] nexthop_create+0x311/0x650 +[ 1908.675506][T12484] rtm_new_nexthop+0x3f0/0x5c0 +[ 1908.675506][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1908.675506][T12484] netlink_rcv_skb+0x130/0x360 +[ 1908.675506][T12484] netlink_unicast+0x449/0x710 +[ 1908.675506][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1908.675506][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1908.675506][T12484] ___sys_sendmsg+0xee/0x170 +[ 1908.675506][T12484] __sys_sendmsg+0xc2/0x150 +[ 1908.675506][T12484] do_syscall_64+0xc1/0x1d0 +[ 1908.675506][T12484] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 1908.675506][T12484] +[ 1908.677622][T12484] ref_tracker: veth_A-R1@ffff8880060c45e0 has 1/4 users at +[ 1908.677622][T12484] ipv6_add_dev+0x3b9/0x11c0 +[ 1908.677622][T12484] addrconf_notify+0x344/0xd60 +[ 1908.677622][T12484] notifier_call_chain+0xcd/0x150 +[ 1908.677622][T12484] register_netdevice+0x1091/0x1690 +[ 1908.677622][T12484] veth_newlink+0x401/0x830 +[ 1908.677622][T12484] rtnl_newlink_create+0x341/0x850 +[ 1908.677622][T12484] __rtnl_newlink+0xac9/0xd80 +[ 1908.677622][T12484] rtnl_newlink+0x63/0xa0 +[ 1908.677622][T12484] rtnetlink_rcv_msg+0x2fb/0xc10 +[ 1908.677622][T12484] netlink_rcv_skb+0x130/0x360 +[ 1908.677622][T12484] netlink_unicast+0x449/0x710 +[ 1908.677622][T12484] netlink_sendmsg+0x723/0xbe0 +[ 1908.677622][T12484] ____sys_sendmsg+0x800/0xa90 +[ 1908.677622][T12484] ___sys_sendmsg+0xee/0x170 +[ 1908.677622][T12484] __sys_sendmsg+0xc2/0x150 +[ 1908.677622][T12484] do_syscall_64+0xc1/0x1d0 +[ 1908.677622][T12484] +""" + + hung_task = """ +[ 1863.157993][ T9043] br0: port 1(vx0) entered forwarding state +[ 2090.392704][ T43] INFO: task tc:9091 blocked for more than 122 seconds. +[ 2090.393146][ T43] Not tainted 6.10.0-virtme #1 +[ 2090.393327][ T43] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. +[ 2090.393598][ T43] task:tc state:D stack:26464 pid:9091 tgid:9091 ppid:9090 flags:0x00000000 +[ 2090.393857][ T43] Call Trace: +[ 2090.393956][ T43] +[ 2090.394033][ T43] __schedule+0x6e0/0x17e0 +[ 2090.394184][ T43] ? __pfx___schedule+0x10/0x10 +[ 2090.394318][ T43] ? schedule+0x1a5/0x210 +[ 2090.394420][ T43] ? __pfx_lock_acquire.part.0+0x10/0x10 +[ 2090.394562][ T43] ? trace_lock_acquire+0x14d/0x1f0 +[ 2090.394701][ T43] ? schedule+0x1a5/0x210 +[ 2090.394800][ T43] schedule+0xdf/0x210 +[ 2090.395240][ T43] d_alloc_parallel+0xaef/0xed0 +[ 2090.395379][ T43] ? __pfx_d_alloc_parallel+0x10/0x10 +[ 2090.395505][ T43] ? __pfx_default_wake_function+0x10/0x10 +[ 2090.395676][ T43] ? lockdep_init_map_type+0x2cb/0x7c0 +[ 2090.395809][ T43] __lookup_slow+0x17f/0x3c0 +[ 2090.395942][ T43] ? __pfx___lookup_slow+0x10/0x10 +[ 2090.396075][ T43] ? walk_component+0x29e/0x4f0 +[ 2090.396219][ T43] walk_component+0x2ab/0x4f0 +[ 2090.396350][ T43] link_path_walk.part.0.constprop.0+0x416/0x940 +[ 2090.396517][ T43] ? __pfx_link_path_walk.part.0.constprop.0+0x10/0x10 +[ 2090.396706][ T43] path_lookupat+0x72/0x660 +[ 2090.396832][ T43] filename_lookup+0x19e/0x420 +[ 2090.396958][ T43] ? __pfx_filename_lookup+0x10/0x10 +[ 2090.397090][ T43] ? find_held_lock+0x2c/0x110 +[ 2090.397213][ T43] ? __lock_release+0x103/0x460 +[ 2090.397335][ T43] ? __pfx___lock_release+0x10/0x10 +[ 2090.397456][ T43] ? trace_lock_acquire+0x14d/0x1f0 +[ 2090.397590][ T43] ? __might_fault+0xc3/0x170 +[ 2090.397720][ T43] ? lock_acquire+0x32/0xc0 +[ 2090.397838][ T43] ? __might_fault+0xc3/0x170 +[ 2090.397966][ T43] vfs_statx+0xbf/0x140 +[ 2090.398060][ T43] ? __pfx_vfs_statx+0x10/0x10 +[ 2090.398183][ T43] ? getname_flags+0xb3/0x410 +[ 2090.398307][ T43] vfs_fstatat+0x80/0xc0 +[ 2090.398400][ T43] __do_sys_newfstatat+0x75/0xd0 +[ 2090.398548][ T43] ? __pfx___do_sys_newfstatat+0x10/0x10 +[ 2090.398669][ T43] ? user_path_at+0x45/0x60 +[ 2090.398802][ T43] ? __x64_sys_openat+0x123/0x1e0 +[ 2090.398929][ T43] ? __pfx___x64_sys_openat+0x10/0x10 +[ 2090.399052][ T43] ? __pfx_do_faccessat+0x10/0x10 +[ 2090.399179][ T43] ? lockdep_hardirqs_on_prepare+0x275/0x410 +[ 2090.399327][ T43] do_syscall_64+0xc1/0x1d0 +[ 2090.399451][ T43] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 2090.399612][ T43] RIP: 0033:0x7fef39aaceae +[ 2090.399746][ T43] RSP: 002b:00007ffc38865528 EFLAGS: 00000246 ORIG_RAX: 0000000000000106 +[ 2090.399969][ T43] RAX: ffffffffffffffda RBX: 0000000000000004 RCX: 00007fef39aaceae +[ 2090.400180][ T43] RDX: 00007ffc38865600 RSI: 00007ffc38865530 RDI: 00000000ffffff9c +[ 2090.400371][ T43] RBP: 00007ffc388656c0 R08: 00000000ffffffff R09: 00007ffc38865530 +[ 2090.400563][ T43] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc38865537 +[ 2090.400745][ T43] R13: 00007ffc38865530 R14: 00007fef39abc220 R15: 00007fef39a7e000 +[ 2090.400949][ T43] +[ 2090.401044][ T43] INFO: task jq:9092 blocked for more than 122 seconds. +[ 2090.401211][ T43] Not tainted 6.10.0-virtme #1 +[ 2090.401326][ T43] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. +[ 2090.401539][ T43] task:jq state:D stack:26464 pid:9092 tgid:9092 ppid:9090 flags:0x00004000 +[ 2090.401808][ T43] Call Trace: +[ 2090.401901][ T43] +[ 2090.401968][ T43] __schedule+0x6e0/0x17e0 +[ 2090.402124][ T43] ? __pfx___schedule+0x10/0x10 +[ 2090.402243][ T43] ? schedule+0x1a5/0x210 +[ 2090.402338][ T43] ? __pfx_lock_acquire.part.0+0x10/0x10 +[ 2090.402477][ T43] ? trace_lock_acquire+0x14d/0x1f0 +[ 2090.402626][ T43] ? schedule+0x1a5/0x210 +[ 2090.402731][ T43] schedule+0xdf/0x210 +[ 2090.402824][ T43] __wait_on_freeing_inode+0x115/0x280 +[ 2090.402982][ T43] ? __pfx___wait_on_freeing_inode+0x10/0x10 +[ 2090.403171][ T43] ? __pfx_wake_bit_function+0x10/0x10 +[ 2090.403329][ T43] ? lock_acquire+0x32/0xc0 +[ 2090.403450][ T43] ? find_inode_fast+0x158/0x450 +[ 2090.403605][ T43] find_inode_fast+0x18d/0x450 +[ 2090.403742][ T43] iget_locked+0x7d/0x390 +[ 2090.403834][ T43] ? hlock_class+0x4e/0x130 +[ 2090.403972][ T43] v9fs_fid_iget_dotl+0x78/0x2d0 +[ 2090.404117][ T43] v9fs_vfs_lookup.part.0+0x1ed/0x390 +[ 2090.404263][ T43] ? __pfx_v9fs_vfs_lookup.part.0+0x10/0x10 +[ 2090.404417][ T43] ? lockdep_init_map_type+0x2cb/0x7c0 +[ 2090.404589][ T43] __lookup_slow+0x209/0x3c0 +[ 2090.404723][ T43] ? __pfx___lookup_slow+0x10/0x10 +[ 2090.404854][ T43] ? walk_component+0x29e/0x4f0 +[ 2090.405009][ T43] walk_component+0x2ab/0x4f0 +[ 2090.405137][ T43] link_path_walk.part.0.constprop.0+0x416/0x940 +[ 2090.405312][ T43] ? __pfx_link_path_walk.part.0.constprop.0+0x10/0x10 +[ 2090.405474][ T43] path_openat+0x1be/0x440 +[ 2090.405608][ T43] ? __pfx_path_openat+0x10/0x10 +[ 2090.405756][ T43] ? __lock_acquire+0xaf0/0x1570 +[ 2090.405883][ T43] do_filp_open+0x1b3/0x3e0 +[ 2090.406008][ T43] ? __pfx_do_filp_open+0x10/0x10 +[ 2090.406162][ T43] ? find_held_lock+0x2c/0x110 +[ 2090.406294][ T43] ? do_raw_spin_lock+0x131/0x270 +[ 2090.406442][ T43] ? __pfx_do_raw_spin_lock+0x10/0x10 +[ 2090.406589][ T43] ? alloc_fd+0x1f5/0x650 +[ 2090.406693][ T43] ? do_raw_spin_unlock+0x58/0x220 +[ 2090.406815][ T43] ? _raw_spin_unlock+0x23/0x40 +[ 2090.406954][ T43] ? alloc_fd+0x1f5/0x650 +[ 2090.407075][ T43] do_sys_openat2+0x122/0x160 +[ 2090.407208][ T43] ? __pfx_do_sys_openat2+0x10/0x10 +[ 2090.407332][ T43] ? user_path_at+0x45/0x60 +[ 2090.407490][ T43] __x64_sys_openat+0x123/0x1e0 +[ 2090.407635][ T43] ? __pfx___x64_sys_openat+0x10/0x10 +[ 2090.407764][ T43] ? __pfx_do_faccessat+0x10/0x10 +[ 2090.407950][ T43] do_syscall_64+0xc1/0x1d0 +[ 2090.408091][ T43] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 2090.408251][ T43] RIP: 0033:0x7f7b9086b0e8 +[ 2090.408406][ T43] RSP: 002b:00007fff468c5918 EFLAGS: 00000287 ORIG_RAX: 0000000000000101 +[ 2090.408637][ T43] RAX: ffffffffffffffda RBX: 00007fff468c5b9f RCX: 00007f7b9086b0e8 +[ 2090.408838][ T43] RDX: 0000000000080000 RSI: 00007fff468c5990 RDI: 00000000ffffff9c +[ 2090.409040][ T43] RBP: 00007fff468c5980 R08: 0000000000080000 R09: 00007fff468c5990 +[ 2090.409220][ T43] R10: 0000000000000000 R11: 0000000000000287 R12: 00007fff468c5997 +[ 2090.409439][ T43] R13: 00007fff468c5bb0 R14: 00007fff468c5990 R15: 00007f7b9083c000 +[ 2090.409652][ T43] +[ 2090.409788][ T43] +[ 2090.409788][ T43] Showing all locks held in the system: +[ 2090.409977][ T43] 1 lock held by khungtaskd/43: +[ 2090.410101][ T43] #0: ffffffffb9368c00 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x70/0x3a0 +[ 2090.410350][ T43] 1 lock held by tc/9091: +[ 2090.410439][ T43] #0: ffff888001720148 (&type->i_mutex_dir_key#3){++++}-{3:3}, at: walk_component+0x29e/0x4f0 +[ 2090.410714][ T43] 1 lock held by jq/9092: +[ 2090.410814][ T43] #0: ffff888001720148 (&type->i_mutex_dir_key#3){++++}-{3:3}, at: walk_component+0x29e/0x4f0 +[ 2090.411085][ T43] +[ 2090.411150][ T43] ============================================= +[ 2090.411150][ T43] +[ 2213.272644][ T43] INFO: task tc:9091 blocked for more than 245 seconds. +[ 2213.272927][ T43] Not tainted 6.10.0-virtme #1 +[ 2213.273055][ T43] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. +[ 2213.273269][ T43] task:tc state:D stack:26464 pid:9091 tgid:9091 ppid:9090 flags:0x00000000 +[ 2213.273565][ T43] Call Trace: +[ 2213.273667][ T43] +[ 2213.273738][ T43] __schedule+0x6e0/0x17e0 +[ 2213.273875][ T43] ? __pfx___schedule+0x10/0x10 +[ 2213.274005][ T43] ? schedule+0x1a5/0x210 +[ 2213.274109][ T43] ? __pfx_lock_acquire.part.0+0x10/0x10 +[ 2213.274250][ T43] ? trace_lock_acquire+0x14d/0x1f0 +[ 2213.274387][ T43] ? schedule+0x1a5/0x210 +[ 2213.274486][ T43] schedule+0xdf/0x210 +[ 2213.274603][ T43] d_alloc_parallel+0xaef/0xed0 +[ 2213.274757][ T43] ? __pfx_d_alloc_parallel+0x10/0x10 +[ 2213.274882][ T43] ? __pfx_default_wake_function+0x10/0x10 +[ 2213.275038][ T43] ? lockdep_init_map_type+0x2cb/0x7c0 +[ 2213.275175][ T43] __lookup_slow+0x17f/0x3c0 +[ 2213.275305][ T43] ? __pfx___lookup_slow+0x10/0x10 +[ 2213.275438][ T43] ? walk_component+0x29e/0x4f0 +[ 2213.275597][ T43] walk_component+0x2ab/0x4f0 +[ 2213.275749][ T43] link_path_walk.part.0.constprop.0+0x416/0x940 +[ 2213.275908][ T43] ? __pfx_link_path_walk.part.0.constprop.0+0x10/0x10 +[ 2213.276067][ T43] path_lookupat+0x72/0x660 +[ 2213.276193][ T43] filename_lookup+0x19e/0x420 +[ 2213.276317][ T43] ? __pfx_filename_lookup+0x10/0x10 +[ 2213.276454][ T43] ? find_held_lock+0x2c/0x110 +[ 2213.276596][ T43] ? __lock_release+0x103/0x460 +[ 2213.276723][ T43] ? __pfx___lock_release+0x10/0x10 +[ 2213.276850][ T43] ? trace_lock_acquire+0x14d/0x1f0 +[ 2213.276984][ T43] ? __might_fault+0xc3/0x170 +[ 2213.277110][ T43] ? lock_acquire+0x32/0xc0 +[ 2213.277230][ T43] ? __might_fault+0xc3/0x170 +[ 2213.277356][ T43] vfs_statx+0xbf/0x140 +[ 2213.277457][ T43] ? __pfx_vfs_statx+0x10/0x10 +[ 2213.277617][ T43] ? getname_flags+0xb3/0x410 +[ 2213.277748][ T43] vfs_fstatat+0x80/0xc0 +[ 2213.277850][ T43] __do_sys_newfstatat+0x75/0xd0 +[ 2213.277982][ T43] ? __pfx___do_sys_newfstatat+0x10/0x10 +[ 2213.278105][ T43] ? user_path_at+0x45/0x60 +[ 2213.278239][ T43] ? __x64_sys_openat+0x123/0x1e0 +[ 2213.278363][ T43] ? __pfx___x64_sys_openat+0x10/0x10 +[ 2213.278483][ T43] ? __pfx_do_faccessat+0x10/0x10 +[ 2213.278627][ T43] ? lockdep_hardirqs_on_prepare+0x275/0x410 +[ 2213.278779][ T43] do_syscall_64+0xc1/0x1d0 +[ 2213.278921][ T43] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 2213.279083][ T43] RIP: 0033:0x7fef39aaceae +[ 2213.279221][ T43] RSP: 002b:00007ffc38865528 EFLAGS: 00000246 ORIG_RAX: 0000000000000106 +[ 2213.279415][ T43] RAX: ffffffffffffffda RBX: 0000000000000004 RCX: 00007fef39aaceae +[ 2213.279615][ T43] RDX: 00007ffc38865600 RSI: 00007ffc38865530 RDI: 00000000ffffff9c +[ 2213.279801][ T43] RBP: 00007ffc388656c0 R08: 00000000ffffffff R09: 00007ffc38865530 +[ 2213.279989][ T43] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc38865537 +[ 2213.280184][ T43] R13: 00007ffc38865530 R14: 00007fef39abc220 R15: 00007fef39a7e000 +[ 2213.280377][ T43] +[ 2213.280470][ T43] INFO: task jq:9092 blocked for more than 245 seconds. +[ 2213.280628][ T43] Not tainted 6.10.0-virtme #1 +[ 2213.280743][ T43] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. +[ 2213.280955][ T43] task:jq state:D stack:26464 pid:9092 tgid:9092 ppid:9090 flags:0x00004000 +[ 2213.281199][ T43] Call Trace: +[ 2213.281296][ T43] +[ 2213.281364][ T43] __schedule+0x6e0/0x17e0 +[ 2213.281495][ T43] ? __pfx___schedule+0x10/0x10 +[ 2213.281642][ T43] ? schedule+0x1a5/0x210 +[ 2213.281737][ T43] ? __pfx_lock_acquire.part.0+0x10/0x10 +[ 2213.281868][ T43] ? trace_lock_acquire+0x14d/0x1f0 +[ 2213.282003][ T43] ? schedule+0x1a5/0x210 +[ 2213.282109][ T43] schedule+0xdf/0x210 +[ 2213.282219][ T43] __wait_on_freeing_inode+0x115/0x280 +[ 2213.282350][ T43] ? __pfx___wait_on_freeing_inode+0x10/0x10 +[ 2213.282505][ T43] ? __pfx_wake_bit_function+0x10/0x10 +[ 2213.282650][ T43] ? lock_acquire+0x32/0xc0 +[ 2213.282775][ T43] ? find_inode_fast+0x158/0x450 +[ 2213.282903][ T43] find_inode_fast+0x18d/0x450 +[ 2213.283054][ T43] iget_locked+0x7d/0x390 +[ 2213.283149][ T43] ? hlock_class+0x4e/0x130 +[ 2213.283280][ T43] v9fs_fid_iget_dotl+0x78/0x2d0 +[ 2213.283410][ T43] v9fs_vfs_lookup.part.0+0x1ed/0x390 +[ 2213.283539][ T43] ? __pfx_v9fs_vfs_lookup.part.0+0x10/0x10 +[ 2213.283705][ T43] ? lockdep_init_map_type+0x2cb/0x7c0 +[ 2213.283835][ T43] __lookup_slow+0x209/0x3c0 +[ 2213.283959][ T43] ? __pfx___lookup_slow+0x10/0x10 +[ 2213.284091][ T43] ? walk_component+0x29e/0x4f0 +[ 2213.284232][ T43] walk_component+0x2ab/0x4f0 +[ 2213.284356][ T43] link_path_walk.part.0.constprop.0+0x416/0x940 +[ 2213.284510][ T43] ? __pfx_link_path_walk.part.0.constprop.0+0x10/0x10 +[ 2213.284680][ T43] path_openat+0x1be/0x440 +[ 2213.284804][ T43] ? __pfx_path_openat+0x10/0x10 +[ 2213.284926][ T43] ? __lock_acquire+0xaf0/0x1570 +[ 2213.285051][ T43] do_filp_open+0x1b3/0x3e0 +[ 2213.285180][ T43] ? __pfx_do_filp_open+0x10/0x10 +[ 2213.285321][ T43] ? find_held_lock+0x2c/0x110 +[ 2213.285455][ T43] ? do_raw_spin_lock+0x131/0x270 +[ 2213.285599][ T43] ? __pfx_do_raw_spin_lock+0x10/0x10 +[ 2213.285735][ T43] ? alloc_fd+0x1f5/0x650 +[ 2213.285834][ T43] ? do_raw_spin_unlock+0x58/0x220 +[ 2213.285964][ T43] ? _raw_spin_unlock+0x23/0x40 +[ 2213.286086][ T43] ? alloc_fd+0x1f5/0x650 +[ 2213.286188][ T43] do_sys_openat2+0x122/0x160 +[ 2213.286320][ T43] ? __pfx_do_sys_openat2+0x10/0x10 +[ 2213.286446][ T43] ? user_path_at+0x45/0x60 +[ 2213.286586][ T43] __x64_sys_openat+0x123/0x1e0 +[ 2213.286709][ T43] ? __pfx___x64_sys_openat+0x10/0x10 +[ 2213.286829][ T43] ? __pfx_do_faccessat+0x10/0x10 +[ 2213.286972][ T43] do_syscall_64+0xc1/0x1d0 +[ 2213.287100][ T43] entry_SYSCALL_64_after_hwframe+0x77/0x7f +[ 2213.287251][ T43] RIP: 0033:0x7f7b9086b0e8 +[ 2213.287381][ T43] RSP: 002b:00007fff468c5918 EFLAGS: 00000287 ORIG_RAX: 0000000000000101 +[ 2213.287577][ T43] RAX: ffffffffffffffda RBX: 00007fff468c5b9f RCX: 00007f7b9086b0e8 +[ 2213.287758][ T43] RDX: 0000000000080000 RSI: 00007fff468c5990 RDI: 00000000ffffff9c +[ 2213.287935][ T43] RBP: 00007fff468c5980 R08: 0000000000080000 R09: 00007fff468c5990 +[ 2213.288117][ T43] R10: 0000000000000000 R11: 0000000000000287 R12: 00007fff468c5997 +[ 2213.288318][ T43] R13: 00007fff468c5bb0 R14: 00007fff468c5990 R15: 00007f7b9083c000 +[ 2213.288514][ T43] +[ 2213.288614][ T43] +[ 2213.288614][ T43] Showing all locks held in the system: +[ 2213.288798][ T43] 1 lock held by khungtaskd/43: +[ 2213.288925][ T43] #0: ffffffffb9368c00 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x70/0x3a0 +[ 2213.289157][ T43] 1 lock held by tc/9091: +[ 2213.289251][ T43] #0: ffff888001720148 (&type->i_mutex_dir_key#3){++++}-{3:3}, at: walk_component+0x29e/0x4f0 +[ 2213.289500][ T43] 1 lock held by jq/9092: +[ 2213.289611][ T43] #0: ffff888001720148 (&type->i_mutex_dir_key#3){++++}-{3:3}, at: walk_component+0x29e/0x4f0 +[ 2213.289855][ T43] +[ 2213.289919][ T43] ============================================= +[ 2213.289919][ T43] +""" + + +if __name__ == "__main__": + unittest.main() diff --git a/contest/remote/lib/fetcher.py b/contest/remote/lib/fetcher.py index 713d6d8..2ffcc74 100644 --- a/contest/remote/lib/fetcher.py +++ b/contest/remote/lib/fetcher.py @@ -3,6 +3,7 @@ import datetime import json import os +import re import requests import subprocess import time @@ -10,20 +11,20 @@ class Fetcher: def __init__(self, cb, cbarg, name, branches_url, results_path, url_path, tree_path, - check_sec=60, first_run="continue", single_shot=False): + patches_path, life, first_run="continue"): self._cb = cb self._cbarg = cbarg self.name = name + self.life = life self._branches_url = branches_url - self._check_secs = check_sec self._results_path = results_path self._url_path = url_path self._results_manifest = os.path.join(results_path, 'results.json') self._tree_path = tree_path - self.single_shot = single_shot + self._patches_path = patches_path # Set last date to something old self._last_date = datetime.datetime.now(datetime.UTC) - datetime.timedelta(weeks=1) @@ -68,7 +69,10 @@ def _result_set(self, branch_name, url): found = True break if not found: - old_db.append({'url': None, 'branch': branch_name, 'executor': self.name}) + old_db.append({'url': url, 'branch': branch_name, 'executor': self.name}) + + # Maintain only the last 500 entries + old_db = old_db[-500:] with open(self._results_manifest, "w") as fp: json.dump(old_db, fp) @@ -81,12 +85,15 @@ def _write_result(self, data, run_cookie): return self._url_path + '/' + file_name - def _run_test(self, binfo): + def _run_test(self, binfo, ref): self._result_set(binfo['branch'], None) start = datetime.datetime.now(datetime.UTC) run_id_cookie = str(int(start.timestamp() / 60) % 1000000) - rinfo = {'run-cookie': run_id_cookie} + rinfo = { + 'run-cookie': run_id_cookie, + 'branch-ref': ref, + } results = self._cb(binfo, rinfo, self._cbarg) end = datetime.datetime.now(datetime.UTC) @@ -99,18 +106,29 @@ def _run_test(self, binfo): } if 'link' in rinfo: entry['link'] = rinfo['link'] + if 'device' in rinfo: + entry['device'] = rinfo['device'] url = self._write_result(entry, run_id_cookie) self._result_set(binfo['branch'], url) - def _clean_old_branches(self, remote, current): - ret = subprocess.run('git branch', shell=True, capture_output=True) - existing = set([x.strip() for x in ret.stdout.decode('utf-8').split('\n')]) + def _find_branch(self, name): + ret = subprocess.run(['git', 'describe', 'main'], + check=False, capture_output=True) + if ret.returncode == 0: + # git found a direct hit for the name, use as is + return name - for b in remote: - if b["branch"] in existing and b["branch"] != current: - subprocess.run('git branch -d ' + b["branch"], - cwd=self._tree_path, shell=True) + # Try to find the branch in one of the remotes (will return remote/name) + ret = subprocess.run(['git', 'branch', '-r', '-l', '*/' + name], + cwd=self._tree_path, + capture_output=True, check=True) + + branches = ret.stdout.decode('utf-8').strip() + branches = [x.strip() for x in branches.split('\n')] + if len(branches) != 1: + print("Unexpected number of branches found:", branches) + return branches[0] def _run_once(self): r = requests.get(self._branches_url) @@ -131,20 +149,45 @@ def _run_once(self): print("Testing ", to_test) self._last_date = newest + + if self._patches_path is not None: + subprocess.run('git restore .', cwd=self._tree_path, + shell=True) + # For now assume URL is in one of the remotes subprocess.run('git fetch --all --prune', cwd=self._tree_path, - shell=True) - subprocess.run('git checkout ' + to_test["branch"], + shell=True, check=True) + + # After upgrading git 2.40.1 -> 2.47.1 CI hits a race in git, + # where tree is locked, even though previous command has finished. + # We need to sleep a bit and then wait for the lock to go away. + time.sleep(1) + lock_path = os.path.join(self._tree_path, '.git/HEAD.lock') + while os.path.exists(lock_path): + print("HEAD is still locked! Sleeping..") + time.sleep(0.2) + + ref = self._find_branch(to_test["branch"]) + subprocess.run('git checkout --detach ' + ref, cwd=self._tree_path, shell=True, check=True) - self._clean_old_branches(branches, to_test["branch"]) - self._run_test(to_test) - return self.single_shot + + if self._patches_path is not None: + for patch in sorted(os.listdir(self._patches_path)): + realpath = '{}/{}'.format(self._patches_path, patch) + subprocess.run('git apply -v {}'.format(realpath), + cwd=self._tree_path, shell=True) + + self._run_test(to_test, ref) def run(self): - while True: - if self._run_once(): - return - try: - time.sleep(self._check_secs) - except KeyboardInterrupt: - return + while self.life.next_poll(): + self._run_once() + + +def namify(what): + if not what: + return "no-name" + name = re.sub(r'[^0-9a-zA-Z]+', '-', what) + if name[-1] == '-': + name = name[:-1] + return name diff --git a/contest/remote/lib/loadavg.py b/contest/remote/lib/loadavg.py new file mode 100644 index 0000000..733a990 --- /dev/null +++ b/contest/remote/lib/loadavg.py @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import time + + +def get_dirty_mem(): + """ Get amount of dirty mem, returns value in MB """ + with open("/proc/meminfo", "r") as fp: + lines = fp.read().split("\n") + dirty = list(filter(lambda a: "Dirty" in a, lines))[0] + return int(dirty.split(" ")[-2]) / 1000 + + +def wait_loadavg(target, dirty_max=100, check_ival=30, stable_cnt=3): + """ + Wait for loadavg to drop but be careful at the start, the load + may have not ramped up, yet, so if we ungate early whoever is waiting + will experience the overload. + """ + + seen_stable = 0 + while target is not None: + load, _, _ = os.getloadavg() + dirty = get_dirty_mem() + + if load <= target and dirty <= dirty_max: + if seen_stable >= stable_cnt: + break + seen_stable += 1 + else: + seen_stable = 0 + + print(f"Waiting for loadavg to decrease: CPU: {load} > {target} Dirty Mem: {dirty} > {dirty_max} MB ({seen_stable})") + time.sleep(check_ival) diff --git a/contest/remote/lib/vm.py b/contest/remote/lib/vm.py index f2e35a2..8e7efbe 100644 --- a/contest/remote/lib/vm.py +++ b/contest/remote/lib/vm.py @@ -9,7 +9,9 @@ import os import psutil import re +import shutil import signal +from .crash import has_crash, extract_crash """ @@ -29,11 +31,17 @@ paths=/extra/exec/PATH:/another/bin [vm] paths=/extra/exec/PATH:/another/bin +ld_paths=/extra/lib/PATH:/another/lib +exports=VAR1=val1,VAR2=val2 +setup=path_to_script.sh configs=relative/path/config,another/config init_prompt=expected_on-boot# virtme_opt=--opt,--another one +qemu_opt=--opt,this is --same one default_timeout=15 boot_timeout=45 +slowdown=2.5 # mark the machine as slow and multiply the ksft timeout by 2.5 +gcov=off / on """ @@ -48,29 +56,21 @@ def decode_and_filter(buf): return "".join([x for x in buf if (x in ['\n'] or unicodedata.category(x)[0]!="C")]) -def crash_finger_print(lines): - needles = [] - need_re = re.compile(r'.*( |0:)([a-z0-9_]+)\+0x[0-9a-f]+/0x[0-9a-f]+.*') - for line in lines: - m = need_re.match(line) - if not m: - continue - needles.append(m.groups()[1]) - if len(needles) == 4: - break - return ":".join(needles) - - class VM: - def __init__(self, config): + def __init__(self, config, vm_name=""): self.fail_state = "" self.p = None self.procs = [] self.config = config + self.vm_name = vm_name + self.print_pfx = (": " + vm_name) if vm_name else ":" + self.tree_path = config.get('local', 'tree_path') self.cfg_boot_to = int(config.get('vm', 'boot_timeout')) self.filter_data = None + self.has_kmemleak = None + self.has_gcov = self.config.getboolean('vm', 'gcov', fallback=False) self.log_out = "" self.log_err = "" @@ -79,17 +79,20 @@ def tree_popen(self, cmd): if self.config.get('env', 'paths'): env['PATH'] += ':' + self.config.get('env', 'paths') - return subprocess.Popen(cmd, env=env, cwd=self.config.get('local', 'tree_path'), + return subprocess.Popen(cmd, env=env, cwd=self.tree_path, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) def tree_cmd(self, cmd): - self.log_out += "> TREE CMD: " + cmd + "\n" - proc = self.tree_popen(cmd.split()) + if isinstance(cmd, str): + cmd = cmd.split() + self.log_out += "> TREE CMD: " + " ".join(cmd) + "\n" + proc = self.tree_popen(cmd) stdout, stderr = proc.communicate() self.log_out += stdout.decode("utf-8", "ignore") self.log_err += stderr.decode("utf-8", "ignore") proc.stdout.close() proc.stderr.close() + return proc.returncode def build(self, extra_configs, override_configs=None): if self.log_out or self.log_err: @@ -103,22 +106,97 @@ def build(self, extra_configs, override_configs=None): if extra_configs: configs += extra_configs - print("INFO: building kernel") + gcov = " --configitem GCOV_KERNEL=y" if self.has_gcov else "" + + print(f"INFO{self.print_pfx} building kernel") # Make sure we rebuild, config and module deps can be stale otherwise self.tree_cmd("make mrproper") - self.tree_cmd("vng -v -b" + " -f ".join([""] + configs)) + + rc = self.tree_cmd("vng -v -b" + " -f ".join([""] + configs) + gcov) + if rc != 0: + print(f"INFO{self.print_pfx} kernel build failed") + return False + + return True + + def _get_ksft_timeout(self): + default_timeout = 45 # from tools/testing/selftests/kselftest/runner.sh + + targets = self.config.get('ksft', 'target', fallback=None) + if not targets: + return default_timeout + target = targets.split()[0] + + settings_path = f'{self.tree_path}/tools/testing/selftests/{target}/settings' + if not os.path.isfile(settings_path): + return default_timeout + + with open(settings_path, 'r') as fp: + lines = fp.readlines() + for l in lines: + if l.startswith('timeout='): + return int(l.split('=')[1]) + + return default_timeout + + def _set_env(self): + # Install extra PATHs + if self.config.get('vm', 'paths', fallback=None): + self.cmd("export PATH=" + self.config.get('vm', 'paths') + ':$PATH') + self.drain_to_prompt() + + if self.config.get('vm', 'ld_paths', fallback=None): + self.cmd("export LD_LIBRARY_PATH=" + self.config.get('vm', 'ld_paths') + ':$LD_LIBRARY_PATH') + self.drain_to_prompt() + + setup_scripts = self.config.get('vm', 'setup', fallback='').split(',') + for setup in setup_scripts: + if not setup: + continue + self.cmd(setup) + self.drain_to_prompt() + + exports = self.config.get('vm', 'exports', fallback=None) + if exports: + for export in exports.split(','): + self.cmd("export " + export) + self.drain_to_prompt() + + slowdown = self.config.getfloat('vm', 'slowdown', fallback=0) + if slowdown: + self.cmd("export KSFT_MACHINE_SLOW=yes") + self.drain_to_prompt() + + # only when needed, to avoid 'overriding timeout' message + if slowdown > 1: + timeout = self._get_ksft_timeout() * slowdown + self.cmd(f"export kselftest_override_timeout={round(timeout)}") + self.drain_to_prompt() + + self.cmd("env") + self.drain_to_prompt() def start(self, cwd=None): cmd = "vng -v -r arch/x86/boot/bzImage --user root" cmd = cmd.split(' ') if cwd: cmd += ["--cwd", cwd] - cmd += self.config.get('vm', 'virtme_opt').split(',') + + opts = self.config.get('vm', 'virtme_opt', fallback="") + cmd += opts.split(',') if opts else [] + + opts = self.config.get('vm', 'qemu_opt', fallback="") + cmd += ["-o", " " + opts] if opts else [] + cpus = self.config.get('vm', 'cpus', fallback="") if cpus: cmd += ["--cpus", cpus] + mem = self.config.get('vm', 'mem', fallback="") + if mem: + cmd += ["--memory", mem] - print("INFO: VM starting:", " ".join(cmd)) + print(f"INFO{self.print_pfx} VM starting:", " ".join(cmd)) + self.log_out += "# " + " ".join(cmd) + "\n" self.p = self.tree_popen(cmd) for pipe in [self.p.stdout, self.p.stderr]: @@ -129,7 +207,7 @@ def start(self, cwd=None): init_prompt = self.config.get('vm', 'init_prompt') if init_prompt[-1] != ' ': init_prompt += ' ' - print(f"INFO: expecting prompt: '{init_prompt}'") + print(f"INFO{self.print_pfx} expecting prompt: '{init_prompt}'") try: self.drain_to_prompt(prompt=init_prompt, dump_after=self.cfg_boot_to) finally: @@ -137,31 +215,24 @@ def start(self, cwd=None): proc = psutil.Process(self.p.pid) self.procs = proc.children(recursive=True) + [proc] - print("INFO: reached initial prompt") + print(f"INFO{self.print_pfx} reached initial prompt") self.cmd("PS1='xx__-> '") self.drain_to_prompt() - # Install extra PATHs - if self.config.get('vm', 'paths', fallback=None): - self.cmd("export PATH=" + self.config.get('vm', 'paths') + ':$PATH') - self.drain_to_prompt() - if self.config.get('vm', 'ld_paths', fallback=None): - self.cmd("export LD_LIBRARY_PATH=" + self.config.get('vm', 'ld_paths') + ':$LD_LIBRARY_PATH') - self.drain_to_prompt() - exports = self.config.get('vm', 'exports', fallback=None) - if exports: - for export in exports.split(','): - self.cmd("export " + export) - self.drain_to_prompt() - self.cmd("env") + off = len(self.log_out) + self.cmd("ls /sys/kernel/debug/") self.drain_to_prompt() + self.has_kmemleak = "kmemleak" in self.log_out[off:] + self.has_gcov = self.has_gcov and "gcov" in self.log_out[off:] + + self._set_env() def stop(self): self.cmd("exit") try: stdout, stderr = self.p.communicate(timeout=3) except subprocess.TimeoutExpired: - print("WARNING: process did not exit, sending a KILL to", self.p.pid, self.procs) + print(f"WARN{self.print_pfx} process did not exit, sending a KILL to", self.p.pid, self.procs) for p in self.procs: try: p.kill() @@ -174,7 +245,7 @@ def stop(self): stdout = stdout.decode("utf-8", "ignore") stderr = stderr.decode("utf-8", "ignore") - print("INFO: VM stopped") + print(f"INFO{self.print_pfx} VM stopped") self.log_out += stdout self.log_err += stderr @@ -192,6 +263,14 @@ def ctrl_c(self): self.p.stdin.write(b'\x03') self.p.stdin.flush() + def kill_current_cmd(self): + try: + self.ctrl_c() + self.ctrl_c() + self.drain_to_prompt(dump_after=12) + except TimeoutError: + print(f"WARN{self.print_pfx} failed to interrupt process") + def _read_pipe_nonblock(self, pipe): read_some = False output = "" @@ -201,15 +280,20 @@ def _read_pipe_nonblock(self, pipe): return read_some, output read_some = True output = decode_and_filter(buf) - if output.find("] RIP: ") != -1 or output.find("] Call Trace:") != -1: + if has_crash(output): self.fail_state = "oops" except BlockingIOError: pass return read_some, output - def drain_to_prompt(self, prompt="xx__-> ", dump_after=None): + def drain_to_prompt(self, prompt="xx__-> ", dump_after=None, deadline=None): + _dump_after = dump_after if dump_after is None: - dump_after = int(self.config.get('vm', 'default_timeout')) + dump_after = self.config.getint('vm', 'default_timeout') + hard_stop = self.config.getint('vm', 'hard_timeout', fallback=(1 << 63)) + if deadline is not None: + hard_stop = max(0, min(deadline, hard_stop)) + waited = 0 total_wait = 0 stdout = "" @@ -226,6 +310,10 @@ def drain_to_prompt(self, prompt="xx__-> ", dump_after=None): if read_some: if stdout.endswith(prompt): break + if self.fail_state == "oops" and _dump_after is None and dump_after > 300: + dump_after = 300 + self.log_out += '\nDETECTED CRASH, lowering timeout\n' + # A bit of a hack, sometimes kernel spew will clobber # the prompt. Until we have a good way of sending kernel # logs elsewhere try to get a new prompt by sending a new line. @@ -238,12 +326,21 @@ def drain_to_prompt(self, prompt="xx__-> ", dump_after=None): waited += 0.03 sleep(0.03) + if total_wait > hard_stop: + self.log_err += f'\nHARD STOP ({hard_stop})\n' + waited = 1 << 63 if waited > dump_after: - print("WAIT TIMEOUT retcode:", self.p.returncode) + print(f"WARN{self.print_pfx} TIMEOUT retcode:", self.p.returncode, + "waited:", waited, "total:", total_wait) self.log_out += '\nWAIT TIMEOUT stdout\n' self.log_err += '\nWAIT TIMEOUT stderr\n' + if not self.fail_state: + self.fail_state = "timeout" raise TimeoutError(stderr, stdout) + if self.fail_state == "timeout": + self.fail_state = "" + return stdout, stderr def dump_log(self, dir_path, result=None, info=None): @@ -269,40 +366,20 @@ def dump_log(self, dir_path, result=None, info=None): self.log_err = "" def _load_filters(self): - if self.filter_data is not None: - return - url = self.config.get("remote", "filters", fallback=None) - if not url: - return - r = requests.get(url) - self.filter_data = json.loads(r.content.decode('utf-8')) + if self.filter_data is None: + url = self.config.get("remote", "filters", fallback=None) + if not url: + return None + r = requests.get(url) + self.filter_data = json.loads(r.content.decode('utf-8')) + return self.filter_data def extract_crash(self, out_path): - in_crash = False - start = 0 - crash_lines = [] - finger_prints = [] - last5 = [""] * 5 - for line in self.log_out.split('\n'): - if in_crash: - in_crash &= '] ---[ end trace ' not in line - in_crash &= '] ' not in line - if not in_crash: - finger_prints.append(crash_finger_print(crash_lines[start:])) - else: - in_crash |= '] Hardware name: ' in line - if in_crash: - start = len(crash_lines) - crash_lines += last5 - - # Keep last 5 to get some of the stuff before stack trace - last5 = last5[1:] + ["| " + line] - - if in_crash: - crash_lines.append(line) + crash_lines, finger_prints = extract_crash(self.log_out + self.log_err, "xx__-> ", + lambda : self._load_filters()) if not crash_lines: - print("WARNING: extract_crash found no crashes") - return + print(f"WARN{self.print_pfx} extract_crash found no crashes") + return ["crash-extract-fail"] proc = self.tree_popen("./scripts/decode_stacktrace.sh vmlinux auto ./".split()) stdout, stderr = proc.communicate("\n".join(crash_lines).encode("utf-8")) @@ -319,10 +396,37 @@ def extract_crash(self, out_path): self._load_filters() if self.filter_data is not None and 'ignore-crashes' in self.filter_data: ignore = set(self.filter_data["ignore-crashes"]) - seen = set(finger_prints) - if not seen - ignore: - print("INFO: all crashes were ignored") + if not finger_prints - ignore: + print(f"INFO{self.print_pfx} all crashes were ignored") self.fail_state = "" + return list(finger_prints) + + def check_health(self): + if self.fail_state: + return + if self.has_kmemleak: + # First scan, to identify possible leaks + self.cmd("echo scan > /sys/kernel/debug/kmemleak") + self.drain_to_prompt() + # kmemleak needs objects to be at least MSECS_MIN_AGE (5000) + # before it considers them to have been leaked + sleep(5) + # Second scan, to identify what has really leaked + self.cmd("echo scan > /sys/kernel/debug/kmemleak && cat /sys/kernel/debug/kmemleak") + self.drain_to_prompt() + + + def capture_gcov(self, dest): + if not self.has_gcov: + return + + lcov = "kernel.lcov" + self.cmd(f"lcov --capture --keep-going --rc geninfo_unexecuted_blocks=1 --function-coverage --branch-coverage -j $(nproc) -o {lcov}") + self.drain_to_prompt() + + lcov = os.path.join(self.tree_path, lcov) + if os.path.isfile(lcov): + shutil.copy(lcov, dest) def bash_prev_retcode(self): self.cmd("echo $?") @@ -333,7 +437,7 @@ def bash_prev_retcode(self): def new_vm(results_path, vm_id, thr=None, vm=None, config=None, cwd=None): thr_pfx = f"thr{thr}-" if thr is not None else "" if vm is None: - vm = VM(config) + vm = VM(config, vm_name=f"{thr_pfx}{vm_id + 1}") # For whatever reason starting sometimes hangs / crashes i = 0 while True: @@ -346,7 +450,7 @@ def new_vm(results_path, vm_id, thr=None, vm=None, config=None, cwd=None): i += 1 if i > 4: raise - print(f"WARNING: VM did not start, retrying {i}/4") + print(f"WARN{vm.print_pfx} VM did not start, retrying {i}/4") vm.dump_log(results_path + f'/vm-crashed-{thr_pfx}{vm_id}-{i}') vm.stop() @@ -354,10 +458,11 @@ def new_vm(results_path, vm_id, thr=None, vm=None, config=None, cwd=None): def guess_indicators(output): return { "fail": output.find("[FAIL]") != -1 or output.find("[fail]") != -1 or \ + output.find(" FAIL:") != -1 or \ output.find("\nnot ok 1 selftests: ") != -1 or \ output.find("\n# not ok 1") != -1, "skip": output.find("[SKIP]") != -1 or output.find("[skip]") != -1 or \ - output.find(" # SKIP") != -1, + output.find(" # SKIP") != -1 or output.find("SKIP:") != -1, "pass": output.find("[OKAY]") != -1 or output.find("[PASS]") != -1 or \ output.find("[ OK ]") != -1 or output.find("[OK]") != -1 or \ output.find("[ ok ]") != -1 or output.find("[pass]") != -1 or \ diff --git a/contest/remote/vmksft-p.py b/contest/remote/vmksft-p.py index dfcdf13..7cfa9d1 100755 --- a/contest/remote/vmksft-p.py +++ b/contest/remote/vmksft-p.py @@ -7,13 +7,16 @@ import os import re import queue +import subprocess import sys import tempfile import threading import time +from core import NipaLifetime +from lib import wait_loadavg from lib import CbArg -from lib import Fetcher +from lib import Fetcher, namify from lib import VM, new_vm, guess_indicators @@ -32,6 +35,7 @@ json_path=base-relative/path/to/json results_path=base-relative/path/to/raw/outputs tree_path=/root-path/to/kernel/git +patches_path=/root-path/to/patches/dir [www] url=https://url-to-reach-base-path # Specific stuff @@ -44,7 +48,10 @@ default_timeout=15 boot_timeout=45 [ksft] -targets=net +target=net +nested_tests=off / on +[device] +info_script=cmd_printing_json Expected: @@ -54,27 +61,77 @@ """ -def namify(what): - name = re.sub(r'[^0-9a-zA-Z]+', '-', what) - if name[-1] == '-': - name = name[:-1] - return name - - -def get_prog_list(vm, target): +def get_prog_list(vm, targets): tmpdir = tempfile.mkdtemp() - vm.tree_cmd(f"make -C tools/testing/selftests/ TARGETS={target} INSTALL_PATH={tmpdir} install") + targets = " ".join(targets) + vm.tree_cmd(['make', '-C', 'tools/testing/selftests/', 'TARGETS=' + targets, 'INSTALL_PATH=' + tmpdir, 'install']) with open(os.path.join(tmpdir, 'kselftest-list.txt'), "r") as fp: targets = fp.readlines() vm.tree_cmd("rm -rf " + tmpdir) - return [e.split(":")[1].strip() for e in targets] - - -def _vm_thread(config, results_path, thr_id, in_queue, out_queue): - target = config.get('ksft', 'target') + return [(e.split(":")[0].strip(), e.split(":")[1].strip()) for e in targets] + + +def _parse_nested_tests(full_run, prev_results): + tests = [] + nested_tests = False + + result_re = re.compile(r"(not )?ok (\d+)( -)? ([^#]*[^ ])( +# +)?([^ ].*)?$") + time_re = re.compile(r"time=(\d+)ms") + + for line in full_run.split('\n'): + # nested subtests support: we parse the comments from 'TAP version' + if nested_tests: + if line.startswith("# "): + line = line[2:] + else: + nested_tests = False + elif line.startswith("# TAP version "): + nested_tests = True + continue + + if not nested_tests: + continue + + if line.startswith("ok "): + result = "pass" + elif line.startswith("not ok "): + result = "fail" + else: + continue + + v = result_re.match(line).groups() + r = {'test': namify(v[3])} + + if len(v) > 5 and v[4] and v[5]: + if v[5].lower().startswith('skip'): + result = "skip" + + t = time_re.findall(v[5].lower()) + if t: + r['time'] = round(int(t[-1]) / 1000.) # take the last one + + r['result'] = result + + if prev_results is not None: + for entry in prev_results: + if entry['test'] == r['test']: + entry['retry'] = result + break + else: + # the first run didn't validate this test: add it to the list + r['result'] = 'skip' + r['retry'] = result + prev_results.append(r) + else: + tests.append(r) + + # return an empty list when there are prev results: no replacement needed + return tests + +def _vm_thread(config, results_path, thr_id, hard_stop, in_queue, out_queue): vm = None - vm_id = 1 + vm_id = -1 while True: try: @@ -83,28 +140,35 @@ def _vm_thread(config, results_path, thr_id, in_queue, out_queue): print(f"INFO: thr-{thr_id} has no more work, exiting") break - if vm is None: - vm_id, vm = new_vm(results_path, vm_id, config=config, thr=thr_id) - - test_id = work_item[0] - prog = work_item[1] + test_id = work_item['tid'] + prog = work_item['prog'] + target = work_item['target'] test_name = namify(prog) file_name = f"{test_id}-{test_name}" + is_retry = 'result' in work_item + + deadline = (hard_stop - datetime.datetime.now(datetime.UTC)).total_seconds() + + if is_retry: + file_name += '-retry' + # Don't run retries if we can't finish with 10min to spare + if is_retry and deadline - work_item['time'] < 10 * 60: + print(f"INFO: thr-{thr_id} retry skipped == " + prog) + out_queue.put(work_item) + continue + + if vm is None: + vm_id, vm = new_vm(results_path, vm_id, config=config, thr=thr_id) print(f"INFO: thr-{thr_id} testing == " + prog) t1 = datetime.datetime.now() - vm.cmd(f'make -C tools/testing/selftests TARGETS={target} TEST_PROGS={prog} TEST_GEN_PROGS="" run_tests') - + vm.cmd(f'make -C tools/testing/selftests TARGETS="{target}" TEST_PROGS={prog} TEST_GEN_PROGS="" run_tests') try: - vm.drain_to_prompt() + vm.drain_to_prompt(deadline=deadline) retcode = vm.bash_prev_retcode() except TimeoutError: - try: - vm.ctrl_c() - vm.ctrl_c() - vm.drain_to_prompt(dump_after=10) - except TimeoutError: - pass + print(f"INFO: thr-{thr_id} test timed out:", prog) + vm.kill_current_cmd() retcode = 1 t2 = datetime.datetime.now() @@ -122,36 +186,80 @@ def _vm_thread(config, results_path, thr_id, in_queue, out_queue): if indicators["fail"]: result = 'fail' + vm.check_health() + + crashes = None if vm.fail_state == 'oops': - vm.extract_crash(results_path + f'/vm-crash-thr{thr_id}-{vm_id}') + print(f"INFO: thr-{thr_id} test crashed kernel:", prog) + crashes = vm.extract_crash(results_path + f'/vm-crash-thr{thr_id}-{vm_id}') # Extraction will clear/discard false-positives (ignored traces) # check VM is still in failed state if vm.fail_state: result = "fail" - vm.dump_log(results_path + '/' + file_name, result=retcode, - info={"thr-id": thr_id, "vm-id": vm_id, "time": (t2 - t1).seconds, - "found": indicators, "vm_state": vm.fail_state}) print(f"INFO: thr-{thr_id} {prog} >> retcode:", retcode, "result:", result, "found", indicators) - out_queue.put({'prog': prog, 'test': test_name, 'file_name': file_name, - 'result': result, 'time': (t2 - t1).seconds}) + if is_retry: + outcome = work_item + outcome['retry'] = result + else: + outcome = {'tid': test_id, 'prog': prog, 'target': target, + 'test': test_name, 'file_name': file_name, + 'result': result, 'time': (t2 - t1).total_seconds()} + if crashes: + outcome['crashes'] = crashes + + if config.getboolean('ksft', 'nested_tests', fallback=False): + if is_retry: + prev_results = outcome['results'] if 'results' in outcome else [] + else: + prev_results = None + + # this will only parse nested tests inside the TAP comments + nested_tests = _parse_nested_tests(vm.log_out, prev_results) + if nested_tests: + outcome['results'] = nested_tests + + print(f"INFO: thr-{thr_id} {prog} >> nested tests: {len(nested_tests)}") + + can_retry = not is_retry + + post_check = config.get('ksft', 'post_check', fallback=None) + if post_check and not vm.fail_state: + vm.cmd(post_check) + vm.drain_to_prompt() + pc = vm.bash_prev_retcode() + if pc != 0: + vm.fail_state = "env-check-fail" + if result == 'pass': + result = 'fail' + can_retry = False # Don't waste time, the test is buggy + + if can_retry and result == 'fail': + in_queue.put(outcome) + else: + out_queue.put(outcome) + + vm.dump_log(results_path + '/' + file_name, result=retcode, + info={"thr-id": thr_id, "vm-id": vm_id, "time": (t2 - t1).total_seconds(), + "found": indicators, "vm_state": vm.fail_state}) if vm.fail_state: - print(f"INFO: thr-{thr_id} VM kernel crashed, destroying it") + print(f"INFO: thr-{thr_id} VM {vm.fail_state}, destroying it") vm.stop() vm.dump_log(results_path + f'/vm-stop-thr{thr_id}-{vm_id}') vm = None if vm is not None: + vm.capture_gcov(results_path + f'/kernel-thr{thr_id}-{vm_id}.lcov') vm.stop() vm.dump_log(results_path + f'/vm-stop-thr{thr_id}-{vm_id}') return -def vm_thread(config, results_path, thr_id, in_queue, out_queue): +def vm_thread(config, results_path, thr_id, hard_stop, in_queue, out_queue): try: - _vm_thread(config, results_path, thr_id, in_queue, out_queue) + _vm_thread(config, results_path, thr_id, hard_stop, in_queue, out_queue) except Exception: print(f"ERROR: thr-{thr_id} has crashed") raise @@ -173,19 +281,44 @@ def test(binfo, rinfo, cbarg): config.get('local', 'results_path') + '/' + \ rinfo['run-cookie'] rinfo['link'] = link - target = config.get('ksft', 'target') + targets = config.get('ksft', 'target').split() + grp_name = "selftests-" + namify(targets[0]) + + if config.get('device', 'info_script', fallback=None): + dev_info = subprocess.run(config.get('device', 'info_script'), + shell=True, stdout=subprocess.PIPE, check=True) + rinfo['device'] = dev_info.stdout.decode('utf-8').strip() vm = VM(config) - vm.build([f"tools/testing/selftests/{target}/config"]) + + kconfs = [] + for target in targets: + conf = f"tools/testing/selftests/{target}/config" + if os.path.exists(os.path.join(vm.tree_path, conf)): + kconfs.append(conf) + if vm.build(kconfs) == False: + vm.dump_log(results_path + '/build') + return [{ + 'test': 'build', + 'group': grp_name, + 'result': 'fail', + 'link': link + '/build', + }] + shutil.copy(os.path.join(config.get('local', 'tree_path'), '.config'), results_path + '/config') vm.tree_cmd("make headers") - vm.tree_cmd(f"make -C tools/testing/selftests/{target}/") + for target in targets: + vm.tree_cmd(f"make -C tools/testing/selftests/{target}/") vm.dump_log(results_path + '/build') - progs = get_prog_list(vm, target) + progs = get_prog_list(vm, targets) progs.sort(reverse=True, key=lambda prog : cbarg.prev_runtime.get(prog, 0)) + dl_min = config.getint('executor', 'deadline_minutes', fallback=999999) + hard_stop = datetime.datetime.fromisoformat(binfo["date"]) + hard_stop += datetime.timedelta(minutes=dl_min) + in_queue = queue.Queue() out_queue = queue.Queue() threads = [] @@ -193,27 +326,40 @@ def test(binfo, rinfo, cbarg): i = 0 for prog in progs: i += 1 - in_queue.put((i, prog, )) + in_queue.put({'tid': i, 'target': prog[0], 'prog': prog[1]}) + # In case we have multiple tests kicking off on the same machine, + # add optional wait to make sure others have finished building + load_tgt = config.getfloat("cfg", "wait_loadavg", fallback=None) thr_cnt = int(config.get("cfg", "thread_cnt")) delay = float(config.get("cfg", "thread_spawn_delay", fallback=0)) for i in range(thr_cnt): + wait_loadavg(load_tgt) print("INFO: starting VM", i) threads.append(threading.Thread(target=vm_thread, - args=[config, results_path, i, in_queue, out_queue])) + args=[config, results_path, i, hard_stop, + in_queue, out_queue])) threads[i].start() time.sleep(delay) for i in range(thr_cnt): threads[i].join() - grp_name = "selftests-" + namify(target) cases = [] while not out_queue.empty(): r = out_queue.get() - cbarg.prev_runtime[r["prog"]] = r["time"] - cases.append({'test': r['test'], 'group': grp_name, 'result': r["result"], - 'link': link + '/' + r['file_name']}) + if 'time' in r: + cbarg.prev_runtime[(r["target"], r["prog"])] = r["time"] + outcome = { + 'test': r['test'], + 'group': "selftests-" + namify(r['target']), + 'result': r["result"], + 'link': link + '/' + r['file_name'] + } + for key in ['time', 'retry', 'crashes', 'results']: + if key in r: + outcome[key] = r[key] + cases.append(outcome) if not in_queue.empty(): print("ERROR: in queue is not empty") @@ -232,14 +378,19 @@ def main() -> None: base_dir = config.get('local', 'base_path') + life = NipaLifetime(config) + f = Fetcher(test, cbarg, name=config.get('executor', 'name'), branches_url=config.get('remote', 'branches'), results_path=os.path.join(base_dir, config.get('local', 'json_path')), url_path=config.get('www', 'url') + '/' + config.get('local', 'json_path'), tree_path=config.get('local', 'tree_path'), + patches_path=config.get('local', 'patches_path', fallback=None), + life=life, first_run=config.get('executor', 'init', fallback="continue")) f.run() + life.exit() if __name__ == "__main__": diff --git a/contest/remote/vmksft.py b/contest/remote/vmksft.py index ec73db9..a738f0f 100755 --- a/contest/remote/vmksft.py +++ b/contest/remote/vmksft.py @@ -8,6 +8,7 @@ import re import sys +from core import NipaLifetime from lib import CbArg from lib import Fetcher from lib import VM, new_vm, guess_indicators @@ -28,6 +29,7 @@ json_path=base-relative/path/to/json results_path=base-relative/path/to/raw/outputs tree_path=/root-path/to/kernel/git +patches_path=/root-path/to/patches/dir [www] url=https://url-to-reach-base-path # Specific stuff @@ -41,6 +43,7 @@ boot_timeout=45 [ksft] targets=net +nested_tests=off / on Expected: @@ -50,14 +53,28 @@ """ -def ktap_split(full_run): +def ktap_split(full_run, parse_nested_tests): tests = [] test = None test_id = 0 + test_main = None - result_re = re.compile(r"(not )?ok (\d+) ([^#]*[^ ])( # )?([^ ].*)?$") + result_re = re.compile(r"(not )?ok (\d+)( -)? ([^#]*[^ ])( # )?([^ ].*)?$") for line in full_run.split('\n'): + if parse_nested_tests: + # nested tests support: we parse the comments from 'TAP version' + if test_main: + if line.startswith("# "): + line = line[2:] + else: + # back to the main test + test = test_main + test_main = None + elif line.startswith("# TAP version "): + test_main = test + test = None + if test is None: test = { "tid": test_id, @@ -81,10 +98,10 @@ def ktap_split(full_run): v = result_re.match(line).groups() test["output"] = "\n".join(test["output"]) test["sid"] = int(v[1]) - test["name"] = v[2] - if len(v) > 4: - test["comment"] = v[4] - if v[4] == "SKIP" and test["result"] == "pass": + test["name"] = v[3] + if len(v) > 5: + test["comment"] = v[5] + if v[5] == "SKIP" and test["result"] == "pass": test["result"] = "skip" tests.append(test) test = None @@ -167,7 +184,9 @@ def test(binfo, rinfo, cbarg): full_run = vm.log_out vm.dump_log(results_path + '/full', result=retcode, info={"vm_state": vm.fail_state}) - tests = ktap_split(full_run) + parse_nested_tests = config.getboolean('ksft', 'nested_tests', + fallback=False) + tests = ktap_split(full_run, parse_nested_tests) if tests: pfx = ktap_extract_pfx(tests) grp_name = namify(pfx) @@ -217,14 +236,19 @@ def main() -> None: base_dir = config.get('local', 'base_path') + life = NipaLifetime(config) + f = Fetcher(test, cbarg, name=config.get('executor', 'name'), branches_url=config.get('remote', 'branches'), results_path=os.path.join(base_dir, config.get('local', 'json_path')), url_path=config.get('www', 'url') + '/' + config.get('local', 'json_path'), tree_path=config.get('local', 'tree_path'), + patches_path=config.get('local', 'patches_path', fallback=None), + life=life, first_run=config.get('executor', 'init', fallback="continue")) f.run() + life.exit() if __name__ == "__main__": diff --git a/contest/remote/vmtest.py b/contest/remote/vmtest.py index 3510775..a2ca5b1 100755 --- a/contest/remote/vmtest.py +++ b/contest/remote/vmtest.py @@ -7,6 +7,7 @@ import sys import os +from core import NipaLifetime from lib import CbArg from lib import Fetcher from lib import VM, new_vm, guess_indicators @@ -27,6 +28,7 @@ json_path=base-relative/path/to/json results_path=base-relative/path/to/raw/outputs tree_path=/root-path/to/kernel/git +patches_path=/root-path/to/patches/dir [www] url=https://url-to-reach-base-path # Specific stuff @@ -142,14 +144,19 @@ def main() -> None: base_dir = config.get('local', 'base_path') + life = NipaLifetime(config) + f = Fetcher(test, cbarg, name=config.get('executor', 'name'), branches_url=config.get('remote', 'branches'), results_path=os.path.join(base_dir, config.get('local', 'json_path')), url_path=config.get('www', 'url') + '/' + config.get('local', 'json_path'), tree_path=config.get('local', 'tree_path'), + patches_path=config.get('local', 'patches_path', fallback=None), + life=life, first_run=config.get('executor', 'init', fallback="continue")) f.run() + life.exit() if __name__ == "__main__": diff --git a/contest/results-collector.py b/contest/results-collector.py new file mode 100755 index 0000000..82af61a --- /dev/null +++ b/contest/results-collector.py @@ -0,0 +1,480 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import configparser +import copy +import datetime +import functools +import json +import os +import psycopg2 +import requests +import time + + +""" +Config: + +[cfg] +refresh=#secs +[input] +remote_db=/path/to/db +[output] +dir=/path/to/output +url_pfx=relative/within/server +combined=name-of-manifest.json +[db] +db=db-name +stability-name=table-name +results-name=table-name +wip-name=table-name +branches-name=table-name +""" + +def result_flatten(full): + """ + Take in a full result dict (for one run, with subtests). + Return a list of dicts: + [ + { "group": str, "test": str, "subtest": str/None, "result": bool }, + ] + """ + flat = [] + + for test in full["results"]: + l1 = { "group": test["group"], + "test": test["test"], + "subtest": None, + "result": test["result"].lower() == "pass" + } + flat.append(l1) + for case in test.get("results", []): + data = l1.copy() + data["subtest"] = case["test"] + data["result"] = case["result"].lower() == "pass" + flat.append(data) + + return flat + + +class FetcherState: + def __init__(self): + self.config = configparser.ConfigParser() + self.config.read(['fetcher.config']) + + # "fetched" is more of a "need state rebuild" + self.fetched = True + + self.tbl_stb = self.config.get("db", "stability-name", fallback="stability") + self.tbl_res = self.config.get("db", "results-name", fallback="results") + self.tbl_wip = self.config.get("db", "wip-name", fallback="results_pending") + self.tbl_brn = self.config.get("db", "branches-name", fallback="branches") + + db_name = self.config.get("db", "db") + self.psql_conn = psycopg2.connect(database=db_name) + self.psql_conn.autocommit = True + + def get_branch(self, name): + with self.psql_conn.cursor() as cur: + cur.execute(f"SELECT info FROM {self.tbl_brn} WHERE branch = '{name}'") + rows = cur.fetchall() + return json.loads(rows[0][0]) + + def psql_run_selector(self, cur, remote, run): + return cur.mogrify("WHERE branch = %s AND remote = %s AND executor = %s", + (run['branch'], remote["name"], run["executor"],)).decode('utf-8') + + def psql_has_wip(self, remote, run): + """ Check if there is an entry in the WIP/pending table for the run """ + with self.psql_conn.cursor() as cur: + cur.execute(f"SELECT branch FROM {self.tbl_wip} " + self.psql_run_selector(cur, remote, run)) + rows = cur.fetchall() + return rows and len(rows) > 0 + + def psql_clear_wip(self, remote, run): + """ Delete entry in the WIP/pending table for the run """ + with self.psql_conn.cursor() as cur: + cur.execute(f"DELETE FROM {self.tbl_wip} " + self.psql_run_selector(cur, remote, run)) + + def psql_insert_wip(self, remote, run): + """ + Add entry in the WIP/pending table for the run, if one doesn't exist + """ + if self.psql_has_wip(remote, run): + return + + branch_info = self.get_branch(run["branch"]) + when = datetime.datetime.fromisoformat(branch_info['date']) + + with self.psql_conn.cursor() as cur: + cur.execute(f"INSERT INTO {self.tbl_wip} (branch, remote, executor, branch_date, t_start) VALUES (%s, %s, %s, %s, %s)", + (run["branch"], remote["name"], run["executor"], run["branch"][-17:], str(when))) + + def insert_result_psql(self, data): + with self.psql_conn.cursor() as cur: + fields = "(branch, branch_date, remote, executor, t_start, t_end, json_normal, json_full)" + normal, full = self.psql_json_split(data) + arg = cur.mogrify("(%s,%s,%s,%s,%s,%s,%s,%s)", + (data["branch"], data["branch"][-17:], data["remote"], data["executor"], + data["start"], data["end"], normal, full)) + cur.execute(f"INSERT INTO {self.tbl_res} {fields} VALUES " + arg.decode('utf-8')) + + def psql_json_split(self, data): + # return "normal" and "full" as json string or None + # "full" will be None if they are the same to save storage + full_s = json.dumps(data) + if data.get("results") is None: # WIP result + return full_s, None + data = copy.deepcopy(data) + + # Filter down the results + apply_stability(self, data, {}) + + for row in data.get("results", []): + if "results" in row: + del row["results"] + + norm_s = json.dumps(data) + + if norm_s != full_s: + return norm_s, full_s + return full_s, None + + def psql_stability_selector(self, cur, data, row): + base = cur.mogrify("WHERE remote = %s AND executor = %s AND grp = %s AND test = %s", + (data["remote"], data["executor"], row["group"], row["test"],)).decode('utf-8') + + if row["subtest"] is None: + return base + " AND subtest is NULL" + return base + cur.mogrify(" AND subtest = %s", (row["subtest"],)).decode('utf-8') + + def psql_get_unstable(self, data): + with self.psql_conn.cursor() as cur: + rem_exe = cur.mogrify("remote = %s AND executor = %s", + (data["remote"], data["executor"],)).decode('utf-8') + cur.execute(f"SELECT grp, test, subtest FROM {self.tbl_stb} " + + "WHERE autoignore = True AND passing IS NULL AND " + rem_exe) + rows = cur.fetchall() + res = {} + for row in rows: + res[(row[0], row[1], row[2])] = { + "group": row[0], + "test": row[1], + "subtest": row[2] + } + if res: + print(f"Unstable for {data['remote']}/{data['executor']} got", len(res)) + return res + + def psql_get_test_stability(self, data, row): + with self.psql_conn.cursor() as cur: + cur.execute(f"SELECT pass_cnt, fail_cnt, pass_srk, fail_srk, pass_cur, fail_cur, passing FROM {self.tbl_stb} " + + self.psql_stability_selector(cur, data, row)) + rows = cur.fetchall() + if rows and len(rows) > 0: + res = rows[0] + else: + res = [0] * 10 + res[6] = None # passing + return { + "pass_cnt": res[0], + "fail_cnt": res[1], + "pass_srk": res[2], + "fail_srk": res[3], + "pass_cur": res[4], + "fail_cur": res[5], + "passing": res[6], + "exists": bool(rows), + } + + def psql_insert_stability(self, data): + flat = result_flatten(data) + + for row in flat: + # Fetch current state + stability = self.psql_get_test_stability(data, row) + if not stability["exists"]: + with self.psql_conn.cursor() as cur: + cur.execute(f"INSERT INTO {self.tbl_stb} (remote, executor, grp, test, subtest, autoignore) " + + cur.mogrify("VALUES (%s, %s, %s, %s, %s, %s)", + (data["remote"], data["executor"], row["group"], + row["test"], row["subtest"], "device" in data) + ).decode('utf-8')) + # Update state + if row["result"]: + key_pfx = "pass" + stability["fail_cur"] = 0 + else: + key_pfx = "fail" + stability["pass_cur"] = 0 + + stability[key_pfx + "_cnt"] += 1 + stability[key_pfx + "_cur"] += 1 + stability[key_pfx + "_srk"] = max(stability[key_pfx + "_cur"], stability[key_pfx + "_srk"]) + + now = datetime.datetime.now().isoformat() + "+00:00" + if stability["pass_cur"] > 15 and not stability["passing"]: # 5 clean days for HW + print("Test reached stability", data["remote"], row["test"], row["subtest"]) + stability["passing"] = now + + with self.psql_conn.cursor() as cur: + cur.execute(f"UPDATE {self.tbl_stb} SET " + + cur.mogrify("pass_cnt = %s, fail_cnt = %s, pass_srk = %s, fail_srk = %s, pass_cur = %s, fail_cur = %s, passing = %s, last_update = %s", + (stability["pass_cnt"], stability["fail_cnt"], stability["pass_srk"], stability["fail_srk"], + stability["pass_cur"], stability["fail_cur"], stability["passing"], now)).decode('utf-8') + + self.psql_stability_selector(cur, data, row)) + + def psql_insert_device(self, data): + if 'device' not in data: + return + + with self.psql_conn.cursor() as cur: + cur.execute("SELECT info FROM devices_info WHERE " + + cur.mogrify("remote = %s AND executor = %s", + (data["remote"], data["executor"], )).decode('utf-8') + + "ORDER BY changed DESC LIMIT 1") + rows = cur.fetchall() + if rows: + info = rows[0][0] + else: + info = 'x' + + new_info = data["device"] + if isinstance(new_info, dict): + new_info = json.dumps(new_info) + if info == new_info: + return + + with self.psql_conn.cursor() as cur: + cur.execute("INSERT INTO devices_info (remote, executor, changed, info) " + + cur.mogrify("VALUES(%s, %s, %s, %s)", + (data["remote"], data["executor"], + data["start"], new_info)).decode('utf-8')) + + def insert_real(self, remote, run): + data = run.copy() + data["remote"] = remote["name"] + + self.psql_insert_stability(data) + self.psql_insert_device(data) + + self.psql_clear_wip(remote, run) + self.insert_result_psql(data) + + +def write_json_atomic(path, data): + tmp = path + '.new' + with open(tmp, 'w') as fp: + json.dump(data, fp) + os.rename(tmp, path) + + +def fetch_remote_run(fetcher, remote, run_info, remote_state): + r = requests.get(run_info['url']) + try: + data = json.loads(r.content.decode('utf-8')) + except json.decoder.JSONDecodeError: + print('WARN: Failed to decode results from remote:', remote['name'], + 'invalid JSON at', run_info['url']) + return False + + fetcher.insert_real(remote, data) + + file = os.path.join(remote_state['dir'], os.path.basename(run_info['url'])) + with open(file, "w") as fp: + json.dump(data, fp) + return True + + +def fetch_remote(fetcher, remote, seen): + print("Fetching remote", remote['url']) + r = requests.get(remote['url']) + try: + manifest = json.loads(r.content.decode('utf-8')) + except json.decoder.JSONDecodeError: + print('WARN: Failed to decode manifest from remote:', remote['name']) + return + remote_state = seen[remote['name']] + + for run in manifest: + if run['branch'] in remote_state['seen']: + continue + if not run['url']: # Executor has not finished, yet + if run['branch'] not in remote_state['wip']: + fetcher.psql_insert_wip(remote, run) + fetcher.fetched = True + continue + + print('Fetching run', run['branch']) + if fetch_remote_run(fetcher, remote, run, remote_state): + fetcher.fetched = True + + with open(os.path.join(remote_state['dir'], 'results.json'), "w") as fp: + json.dump(manifest, fp) + + +def apply_stability(fetcher, data, unstable): + if data.get("results") is None: # WIP result + return + + u_key = (data['remote'], data['executor']) + if u_key not in unstable: + unstable[u_key] = fetcher.psql_get_unstable(data) + + # Non-HW runners have full stability, usually + if not unstable[u_key]: + return + + def filter_l1(test): + # Defer filtering to L2 + if test.get("results"): + return True + # Crashes must always be reported + if test.get("crashes"): + return True + return (test['group'], test['test'], None) not in unstable[u_key] + + def trim_l2(test): + # Skip over pure L1s + if "results" not in test: + return test + # Crashes must always be reported + if test.get("crashes"): + return test + + def filter_l1_l2(case): + return (test['group'], test['test'], case['test']) not in unstable[u_key] + + test["results"] = list(filter(filter_l1_l2, test["results"])) + if not test["results"]: + return None + + # See if we removed all failing subtests + all_pass = True + all_pass &= not test.get("crashes") + if test["result"].lower() != "pass": + all_pass = functools.reduce(lambda x, y: x and y["result"].lower() == "pass", test["results"], all_pass) + if all_pass: + test["result"] = "pass" + # Same logic for retries + all_pass = True + all_pass &= not test.get("crashes") + if test.get("retry", "pass").lower() != "pass": + all_pass = functools.reduce(lambda x, y: x and y.get("retry", "fail").lower() == "pass", test["results"], all_pass) + if all_pass: + test["retry"] = "pass" + return test + + data["results"] = list(filter(filter_l1, data["results"])) + data["results"] = list(map(trim_l2, data["results"])) + data["results"] = list(filter(lambda x: x is not None, data["results"])) + + +def build_combined(fetcher, remote_db): + r = requests.get(fetcher.config.get('input', 'branch_url')) + branches = json.loads(r.content.decode('utf-8')) + branch_info = {} + for br in branches: + branch_info[br['branch']] = br + + combined = [] + for remote in remote_db: + name = remote['name'] + dir = os.path.join(fetcher.config.get('output', 'dir'), name) + print('Combining from remote', name) + + manifest = os.path.join(dir, 'results.json') + if not os.path.exists(manifest): + continue + + with open(manifest, "r") as fp: + results = json.load(fp) + + for entry in results: + if not entry['url']: # Executor is running + if entry['branch'] not in branch_info: + continue + data = entry.copy() + when = datetime.datetime.fromisoformat(branch_info[entry['branch']]['date']) + data["start"] = str(when) + when += datetime.timedelta(hours=2, minutes=58) + data["end"] = str(when) + data["results"] = None + else: + file = os.path.join(dir, os.path.basename(entry['url'])) + if not os.path.exists(file): + print('No file', file) + continue + with open(file, "r") as fp: + data = json.load(fp) + + data['remote'] = name + combined.append(data) + + unstable = {} + for run in combined: + apply_stability(fetcher, run, unstable) + + return combined + + +def build_seen(fetcher, remote_db): + seen = {} + for remote in remote_db: + seen[remote['name']] = {'seen': set(), 'wip': set()} + + # Prepare local state + name = remote['name'] + dir = os.path.join(fetcher.config.get('output', 'dir'), name) + seen[name]['dir'] = dir + os.makedirs(dir, exist_ok=True) + + url = fetcher.config.get('output', 'url_pfx') + '/' + name + seen[name]['url'] = url + + # Read the files + manifest = os.path.join(dir, 'results.json') + if not os.path.exists(manifest): + continue + + with open(manifest, "r") as fp: + results = json.load(fp) + for entry in results: + if not entry.get('url'): + seen[name]['wip'].add(entry.get('branch')) + print('No URL on', entry, 'from', remote['name']) + continue + file = os.path.join(dir, os.path.basename(entry['url'])) + if not os.path.exists(file): + continue + seen[name]['seen'].add(entry.get('branch')) + return seen + + +def main() -> None: + fetcher = FetcherState() + + with open(fetcher.config.get('input', 'remote_db'), "r") as fp: + remote_db = json.load(fp) + + while True: + if fetcher.fetched: + seen = build_seen(fetcher, remote_db) + fetcher.fetched = False + + for remote in remote_db: + fetch_remote(fetcher, remote, seen) + + if fetcher.fetched: + print('Generating combined') + results = build_combined(fetcher, remote_db) + + combined = os.path.join(fetcher.config.get('output', 'dir'), + fetcher.config.get('output', 'combined')) + write_json_atomic(combined, results) + + time.sleep(int(fetcher.config.get('cfg', 'refresh'))) + + +if __name__ == "__main__": + main() diff --git a/contest/results-faker.py b/contest/results-faker.py index 043840c..c98167e 100755 --- a/contest/results-faker.py +++ b/contest/results-faker.py @@ -15,28 +15,55 @@ Config: [input] -branches=/path/to/branches.json +branches=/path/to/branches.json,/path/to/branches2.json +infos=/path/to/infos.json,/path/to/infos2.json [output] dir=/path/to/output url_pfx=relative/within/server +info=/path/to/info.json """ +def combine_infos(config): + paths = config.get("input", "infos", fallback="").split(',') + if not paths: + return + + infos = {} + for path in paths: + with open(path, "r") as fp: + infos.update(json.load(fp)) + + with open(config.get("output", "info"), 'w') as fp: + json.dump(infos, fp) + + def main() -> None: config = configparser.ConfigParser() config.read(['faker.config']) - with open(config.get("input", "branches"), "r") as fp: - branches = json.load(fp) + combine_infos(config) + + branches = [] + paths = config.get("input", "branches") + for path in paths.split(','): + with open(path, "r") as fp: + branches += json.load(fp) + + branches = sorted(branches, key=lambda x: x["date"]) url = config.get("output", "url_pfx") if url[-1] != '/': url += '/' directory = config.get("output", "dir") + used_cookies = set() results = [] for br in branches: br_dt = datetime.datetime.fromisoformat(br["date"]) - run_id_cookie = str(int(br_dt.timestamp() / 60) % 1000000) + run_id_cookie = int(br_dt.timestamp() / 60) % 1000000 + while run_id_cookie in used_cookies: + run_id_cookie += 1 + used_cookies.add(run_id_cookie) fname = f"results-{run_id_cookie}.json" data = {'url': url + fname, diff --git a/contest/results-fetcher.py b/contest/results-fetcher.py deleted file mode 100755 index 3f428bb..0000000 --- a/contest/results-fetcher.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: GPL-2.0 - -import configparser -import datetime -import json -import os -import requests -import time - - -""" -Config: - -[cfg] -refresh=#secs -[input] -remote_db=/path/to/db -[output] -dir=/path/to/output -url_pfx=relative/within/server -combined=name-of-manifest.json -""" - - -def write_json_atomic(path, data): - tmp = path + '.new' - with open(tmp, 'w') as fp: - json.dump(data, fp) - os.rename(tmp, path) - - -def fetch_remote_run(run_info, remote_state): - r = requests.get(run_info['url']) - data = json.loads(r.content.decode('utf-8')) - - file = os.path.join(remote_state['dir'], os.path.basename(run_info['url'])) - with open(file, "w") as fp: - json.dump(data, fp) - - -def fetch_remote(remote, seen): - print("Fetching remote", remote['url']) - r = requests.get(remote['url']) - manifest = json.loads(r.content.decode('utf-8')) - remote_state = seen[remote['name']] - - fetched = False - for run in manifest: - if run['branch'] in remote_state['seen']: - continue - if not run['url']: # Executor has not finished, yet - fetched |= run['branch'] not in remote_state['wip'] - continue - - print('Fetching run', run['branch']) - fetch_remote_run(run, remote_state) - fetched = True - - with open(os.path.join(remote_state['dir'], 'results.json'), "w") as fp: - json.dump(manifest, fp) - - return fetched - - -def build_combined(config, remote_db): - r = requests.get(config.get('input', 'branch_url')) - branches = json.loads(r.content.decode('utf-8')) - branch_info = {} - for br in branches: - branch_info[br['branch']] = br - - combined = [] - for remote in remote_db: - name = remote['name'] - dir = os.path.join(config.get('output', 'dir'), name) - print('Combining from remote', name) - - manifest = os.path.join(dir, 'results.json') - if not os.path.exists(manifest): - continue - - with open(manifest, "r") as fp: - results = json.load(fp) - - for entry in results: - if not entry['url']: # Executor is running - if entry['branch'] not in branch_info: - continue - data = entry.copy() - when = datetime.datetime.fromisoformat(branch_info[entry['branch']]['date']) - data["start"] = str(when) - when += datetime.timedelta(hours=2, minutes=58) - data["end"] = str(when) - data["results"] = None - else: - file = os.path.join(dir, os.path.basename(entry['url'])) - if not os.path.exists(file): - print('No file', file) - continue - with open(file, "r") as fp: - data = json.load(fp) - - data['remote'] = name - combined.append(data) - return combined - - -def build_seen(config, remote_db): - seen = {} - for remote in remote_db: - seen[remote['name']] = {'seen': set(), 'wip': set()} - - # Prepare local state - name = remote['name'] - dir = os.path.join(config.get('output', 'dir'), name) - seen[name]['dir'] = dir - os.makedirs(dir, exist_ok=True) - - url = config.get('output', 'url_pfx') + '/' + name - seen[name]['url'] = url - - # Read the files - manifest = os.path.join(dir, 'results.json') - if not os.path.exists(manifest): - continue - - with open(manifest, "r") as fp: - results = json.load(fp) - for entry in results: - if not entry.get('url'): - seen[name]['wip'].add(entry.get('branch')) - print('No URL on', entry, 'from', remote['name']) - continue - file = os.path.join(dir, os.path.basename(entry['url'])) - if not os.path.exists(file): - continue - seen[name]['seen'].add(entry.get('branch')) - return seen - - -def one_check(config, remote_db, seen): - fetched = False - for remote in remote_db: - fetched |= fetch_remote(remote, seen) - return fetched - - -def main() -> None: - config = configparser.ConfigParser() - config.read(['fetcher.config']) - - with open(config.get('input', 'remote_db'), "r") as fp: - remote_db = json.load(fp) - - fetched = True - while True: - if fetched: - seen = build_seen(config, remote_db) - - fetched = one_check(config, remote_db, seen) - - if fetched: - print('Generating combined') - results = build_combined(config, remote_db) - - combined = os.path.join(config.get('output', 'dir'), - config.get('output', 'combined')) - write_json_atomic(combined, results) - - time.sleep(int(config.get('cfg', 'refresh'))) - - -if __name__ == "__main__": - main() diff --git a/contest/scripts/env_check.py b/contest/scripts/env_check.py new file mode 100755 index 0000000..c6f376a --- /dev/null +++ b/contest/scripts/env_check.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +""" +Collect system state info. Save it to a JSON file, +if file already exists, compare it first and report deltas. +""" + +import json +import os +import subprocess +import sys + + +def run_cmd_text(cmd): + """Execute a shell command and return its output as text.""" + result = subprocess.run(cmd, shell=True, check=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + return result.stdout + + +def run_cmd_json(cmd): + """Execute a shell command and return its output parsed as JSON.""" + result = subprocess.run(cmd, shell=True, check=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + if result.returncode != 0: + return {"error": result.stderr.strip()} + + ret = json.loads(result.stdout) + # "decapsulate" the one element arrays that ip and ethtool like return + if isinstance(ret, list) and len(ret) == 1: + ret = ret[0] + return ret + + +def collect_system_state(): + """Collect network interface information.""" + state = { + "links": {}, + "chans": {}, + "feat": {}, + "rings": {}, + "rss": {}, + "ntuple": {}, + } + + interfaces = run_cmd_json("ip -j -d link show") + + for iface in interfaces: + ifname = iface['ifname'] + + state["links"][ifname] = iface + + state["chans"][ifname] = run_cmd_json(f"ethtool -j -l {ifname}") + state["feat" ][ifname] = run_cmd_json(f"ethtool -j -k {ifname}") + state["rings"][ifname] = run_cmd_json(f"ethtool -j -g {ifname}") + state["rss" ][ifname] = run_cmd_json(f"ethtool -j -x {ifname}") + if "rss-hash-key" in state["rss"][ifname]: + del state["rss"][ifname]["rss-hash-key"] + state["ntuple"][ifname] = run_cmd_text(f"ethtool -n {ifname}") + + return state + + +def is_linkstate(a, b, path): + """System state key is related to carrier (whether link has come up, yet)""" + + if path.startswith(".links."): + if path.endswith(".operstate"): + return True + if path.endswith(".flags"): + a = set(a) + b = set(b) + diff = a ^ b + return not (diff - {'NO-CARRIER', 'LOWER_UP'}) + return False + +def compare_states(current, saved, path=""): + """Compare current system state with saved state.""" + + ret = 0 + + if isinstance(current, dict) and isinstance(saved, dict): + for k in current.keys() | saved.keys(): + if k in current and k in saved: + ret |= compare_states(current[k], saved[k], path=f"{path}.{k}") + else: + print(f"Saved {path}.{k}:", saved.get(k)) + print(f"Current {path}.{k}:", current.get(k)) + ret = 1 + else: + if current != saved: + print(f"Saved {path}:", saved) + print(f"Current {path}:", current) + + ret |= not is_linkstate(current, saved, path) + + return ret + + +def main(): + """Main function to collect and compare network interface states.""" + output_file = "/tmp/nipa-env-state.json" + if len(sys.argv) > 1: + output_file = sys.argv[1] + + # Collect current system state + current_state = collect_system_state() + exit_code = 0 + + # Check if the file already exists + if os.path.exists(output_file): + print("Comparing to existing state file: ", end="") + try: + with open(output_file, 'r', encoding='utf-8') as f: + saved_state = json.load(f) + + # Compare states + exit_code = compare_states(current_state, saved_state) + if exit_code == 0: + print("no differences detected.") + except (json.JSONDecodeError, IOError, OSError) as e: + print("Error loading or comparing:") + print(e) + # Save current state to file + with open(output_file, 'w', encoding='utf-8') as f: + json.dump(current_state, f, indent=2) + print(f"Current system state saved to {output_file}") + + sys.exit(exit_code) + + +if __name__ == "__main__": + main() diff --git a/contest/scripts/vm-virtio-dev-info.sh b/contest/scripts/vm-virtio-dev-info.sh new file mode 100755 index 0000000..dc38523 --- /dev/null +++ b/contest/scripts/vm-virtio-dev-info.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +qver=$(qemu-system-x86_64 --version | head -1) + +echo '{"driver":"virtio_net","versions":{"fixed":{},"stored":{},"running":{"fw":"'"$qver"'"}}}' diff --git a/contest/scripts/vm-virtio-loop.sh b/contest/scripts/vm-virtio-loop.sh new file mode 100755 index 0000000..1ebb425 --- /dev/null +++ b/contest/scripts/vm-virtio-loop.sh @@ -0,0 +1,38 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +# Expect we were booted into a virtme-ng VM with "--net loop" + +IFC0=enp0s1 +IFC1=enp0s2 + +for ifc in $IFC0 $IFC1; do + if ! ethtool -i "$ifc" | grep -q virtio; then + echo "Error: $ifc is not virtio" + exit 1 + fi +done + +ip netns add ns-remote +ip link set dev $IFC1 netns ns-remote +export REMOTE_TYPE=netns +export REMOTE_ARGS=ns-remote + +ip link set dev $IFC0 up +ip -netns ns-remote link set dev $IFC1 up +export NETIF=$IFC0 + +ip addr add dev $IFC0 192.0.3.1/24 +ip -netns ns-remote addr add dev $IFC1 192.0.3.2/24 +export LOCAL_V4=192.0.3.1 +export REMOTE_V4=192.0.3.2 + +ip addr add dev $IFC0 2001:db8:1::1/64 nodad +ip -netns ns-remote addr add dev $IFC1 2001:db8:1::2/64 nodad +export LOCAL_V6=2001:db8:1::1 +export REMOTE_V6=2001:db8:1::2 + +sysctl -w net.ipv6.conf.$IFC0.keep_addr_on_down=1 +# We don't bring remote down, it'd break remote via SSH + +sleep 1 diff --git a/contest/tests/build-doc.sh b/contest/tests/build-doc.sh index 9a2fb44..323b473 100755 --- a/contest/tests/build-doc.sh +++ b/contest/tests/build-doc.sh @@ -16,12 +16,12 @@ echo echo " === Building the base tree ===" git checkout -q $BASE make cleandocs -make -Oline htmldocs 2> >(tee $tmpfile_o >&2) +make -Oline htmldocs 2> >(tee $tmpfile_o >&2) || exit 1 echo " === Building the new tree ===" git checkout -q $BRANCH make cleandocs -make -Oline htmldocs 2> >(tee $tmpfile_n >&2) +make -Oline htmldocs 2> >(tee $tmpfile_n >&2) || exit 1 incumbent=$(grep -i -c "\(warn\|error\)" $tmpfile_o) current=$(grep -i -c "\(warn\|error\)" $tmpfile_n) @@ -32,6 +32,15 @@ if [ $current -gt $incumbent ]; then rc=1 fi +# Copy the latest outputs +if [ "x${RESULTS_DIR}" != x ]; then + OUTPUT_TMP=$(dirname ${RESULTS_DIR})/output + + rm -rf "${OUTPUT_TMP}" + mkdir -p "${OUTPUT_TMP}" + cp -r Documentation/output/* "${OUTPUT_TMP}"/ +fi + echo echo " === Summary === " echo "Incumbent: $incumbent" diff --git a/contest/tests/cocci-check.sh b/contest/tests/cocci-check.sh index a382e48..86953b6 100755 --- a/contest/tests/cocci-check.sh +++ b/contest/tests/cocci-check.sh @@ -23,24 +23,64 @@ clean_up_output() { # remove the command lines sed -i '/^\/usr\/local\/bin\/spatch -D report /d' $file + # ignore the str helpers like str_on_off(), we don't care + sed -i '/: opportunity for str_/d' $file + # if files are removed or added cocci will fail in pre- or post- run sed -i '/^EXN: .*No such file or directory/d' $file sed -i '/^EXN: Coccinelle_modules.Common.Timeout /d' $file sed -i '/An error occurred when attempting /d' $file + sed -i '/mlx5_ifc.h:.* WARNING use flexible-array member instead/d' $file } +# Figure out the number of physical cores, save 8 or half for other stuff +THREADS_PER_CORE=$(LANG=C lscpu | grep "Thread(s) per core: " | tr -cd "[:digit:]") +NPROC=$(getconf _NPROCESSORS_ONLN) +JOBS=$((NPROC / THREADS_PER_CORE)) + +if [ $JOBS -gt 16 ]; then + JOBS=$((JOBS - 8)) +else + JOBS=$((JOBS / 2)) +fi + echo " === Start ===" echo "Base: $BASE" echo "Branch: $BRANCH ($branch_rev)" +echo "Jobs: $JOBS" +echo + +echo " === Waiting for loadavg to die down ===" +while true; do + # Sleep first to make sure others get a chance to start + sleep 120 + + load=$(cat /proc/loadavg | sed -e 's/\([0-9.]\) .*/\1/;s/\.//;s/^0*//') + [ $load -lt 800 ] && break +done + +echo "Starting at $(date)" echo +IGNORED=( + scripts/coccinelle/misc/minmax.cocci + # secs_to_jiffies is broken in report mode + scripts/coccinelle/misc/secs_to_jiffies.cocci +) + +git reset --hard +for ign_file in ${IGNORED[@]}; do + echo "Ignoring " $ign_file + mv $ign_file $ign_file.ignore +done + echo " === Checking the base tree ===" git checkout -q $BASE -make coccicheck MODE=report SPFLAGS="$SPFLAGS" > $out_o +make coccicheck MODE=report J=$JOBS SPFLAGS="$SPFLAGS" > $out_o || exit 1 echo " === Building the new tree ===" git checkout -q $BRANCH -make coccicheck MODE=report SPFLAGS="$SPFLAGS" > $out_n +make coccicheck MODE=report J=$JOBS SPFLAGS="$SPFLAGS" > $out_n || exit 1 dirty=( $(grep -c . $out_o) $(grep -i -c "warn" $out_o) $(grep -i -c "error" $out_o) $(grep -c . $out_n) $(grep -i -c "warn" $out_n) $(grep -i -c "error" $out_n) @@ -73,6 +113,25 @@ elif [ ${current[0]} -gt ${incumbent[0]} ]; then rc=5 fi +if [ $rc -ne 0 ]; then + echo "Per-file breakdown" 1>&2 + tmpfile_fo=$(mktemp) + tmpfile_fn=$(mktemp) + + grep -i "^$PWD" $out_of | sed -n 's@^'$PWD'\([/a-zA-Z0-9_.-]*.[ch]\):.*@\1@p' | sort | uniq -c \ + > $tmpfile_fo + grep -i "^$PWD" $out_nf | sed -n 's@^'$PWD'\([/a-zA-Z0-9_.-]*.[ch]\):.*@\1@p' | sort | uniq -c \ + > $tmpfile_fn + + diff -U 0 $tmpfile_fo $tmpfile_fn 1>&2 + rm $tmpfile_fo $tmpfile_fn +fi + +for ign_file in ${IGNORED[@]}; do + echo "Un-ignoring " $ign_file + mv $ign_file.ignore $ign_file +done + echo echo " === Summary === " echo "Incumbent: ${incumbent[@]}" diff --git a/core/__init__.py b/core/__init__.py index 4390b4c..010d243 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -19,6 +19,7 @@ import os +from .lifetime import NipaLifetime from .logger import log, log_open_sec, log_end_sec, log_init from .patch import Patch from .test import Test diff --git a/core/lifetime.py b/core/lifetime.py new file mode 100644 index 0000000..546ed8c --- /dev/null +++ b/core/lifetime.py @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: GPL-2.0 + +import subprocess +import signal +import sys +import time +import os + + +sig_initialized = False +got_sigusr1 = False + + +def sig_handler(signum, frame) -> None: + global got_sigusr1 + + got_sigusr1 |= signum == signal.SIGUSR1 + print('signal received, SIGUSR1:', got_sigusr1) + + +def sig_init(): + global sig_initialized + + if not sig_initialized: + signal.signal(signal.SIGUSR1, sig_handler) + sig_initialized = True + + +def nipa_git_version(): + cwd = os.path.dirname(os.path.abspath(__file__)) + res = subprocess.run(["git", "show", "HEAD", "--format=reference", "--no-patch"], + capture_output=True, cwd=cwd, check=True) + return res.stdout.decode("utf-8").strip() + + +class NipaLifetime: + def __init__(self, config): + self.config = config + + # Load exit criteria + self.use_usrsig = config.getboolean('life', 'sigusr1', fallback=True) + if self.use_usrsig: + sig_init() + self._nipa_version = nipa_git_version() + self.use_nipa_version = config.getboolean('life', 'nipa_version', fallback=True) + if self.use_nipa_version: + self._nipa_version = nipa_git_version() + + print("NIPA version:", self._nipa_version) + + # Load params + self._sleep = config.getint('life', 'poll_ival', fallback=60) + self._single_shot = config.getboolean('life', 'single_shot', fallback=False) + # Set initial state + self._first_run = True + self._restart = False + + def next_poll(self, wait_time=None): + if self._first_run: + self._first_run = False + return True + elif self._single_shot: + return False + + if self.use_nipa_version and nipa_git_version() != self._nipa_version: + self._restart = True + + to_sleep = self._sleep + if wait_time is not None: + to_sleep = wait_time + while not self._restart and to_sleep > 0: + if self.use_usrsig and got_sigusr1: + self._restart = True + break + try: + time.sleep(min(to_sleep, 1)) + except KeyboardInterrupt: + return False + to_sleep -= 1 + + return not self._restart + + def exit(self): + if self._restart: + print("NIPA restarting!") + os.execv(sys.executable, [sys.executable] + sys.argv) + print("NIPA quitting!") diff --git a/core/logger.py b/core/logger.py index 0ab491d..166b964 100644 --- a/core/logger.py +++ b/core/logger.py @@ -220,24 +220,16 @@ def log_init(name, path, force_single_thread=False): def log_fini(): - global tls - tls.logger.fini() def log_open_sec(header): - global tls - tls.logger.open_sec(header) def log_end_sec(): - global tls - tls.logger.end_sec() def log(header, data=''): - global tls - tls.logger.log(header, data) diff --git a/core/maintainers.py b/core/maintainers.py index 79c014f..d56127e 100755 --- a/core/maintainers.py +++ b/core/maintainers.py @@ -25,6 +25,11 @@ def name_email_split(name_email): name_email = name_email[idx + 1:-1] name = '' email = name_email + if '+' in email and email.find('+') < email.find('@'): + pidx = email.find('+') + didx = email.find('@') + email = email[:idx] + email[didx:] + return name, email def __repr__(self): @@ -38,9 +43,15 @@ def __eq__(self, other): class Maintainers: - def __init__(self, *, file=None, url=None): + def __init__(self, *, file=None, url=None, config=None): self.entries = MaintainersList() + self.http_headers = None + if config: + ua = config.get('patchwork', 'user-agent', fallback='') + if ua: + self.http_headers = {"user-agent":ua} + if file: self._load_from_file(file) elif url: @@ -55,6 +66,11 @@ def _load_from_lines(self, lines): if not started: continue + # Fix up tabs vs spaces + if len(line) > 5 and line[0].isupper() and line[1:4] == ': ': + print("Bad attr line:", group, line.strip()) + line = line[:2] + '\t' + line[2:].strip() + if line == '': if len(group) > 1: self.entries.add(MaintainersEntry(group)) @@ -72,7 +88,7 @@ def _load_from_file(self, file): self._load_from_lines(f.read().split('\n')) def _load_from_url(https://github.com/self,%20url): - r = requests.get(url) + r = requests.get(url, headers=self.http_headers) data = r.content.decode('utf-8') self._load_from_lines(data.split('\n')) diff --git a/core/patch.py b/core/patch.py index d552e02..4bd1150 100644 --- a/core/patch.py +++ b/core/patch.py @@ -9,10 +9,8 @@ import core -patch_id_gen = 0 - -class Patch(object): +class Patch: """Patch class Class representing a patch with references to postings etc. @@ -29,12 +27,18 @@ class Patch(object): write_out(fp) Write the raw patch into the given file pointer. """ + + PATCH_ID_GEN = 0 + def __init__(self, raw_patch, ident=None, title="", series=None): self.raw_patch = raw_patch self.title = title self.subject = "" self.series = series + # Whether the patch is first in the series, set by series.add_patch() + self.first_in_series = None + subj = re.search(r'Subject: \[.*\](.*)', raw_patch) if not subj: subj = re.search(r'Subject: (.*)', raw_patch) @@ -45,13 +49,13 @@ def __init__(self, raw_patch, ident=None, title="", series=None): core.log_open_sec("Patch init: " + self.title) core.log_end_sec() - global patch_id_gen if ident is not None: self.id = ident else: - patch_id_gen += 1 - self.id = patch_id_gen + Patch.PATCH_ID_GEN += 1 + self.id = Patch.PATCH_ID_GEN def write_out(self, fp): + """ Write patch contents to a file """ fp.write(self.raw_patch.encode('utf-8')) fp.flush() diff --git a/core/series.py b/core/series.py index dcec6c7..dd38143 100644 --- a/core/series.py +++ b/core/series.py @@ -39,6 +39,7 @@ def set_cover_letter(self, data): self.subject = subj.group(0)[9:] def add_patch(self, patch): + patch.first_in_series = len(self.patches) == 0 self.patches.append(patch) def is_pure_pull(self): diff --git a/core/test.py b/core/test.py index f32a59c..89f1f52 100644 --- a/core/test.py +++ b/core/test.py @@ -121,10 +121,15 @@ def _exec_run(self, tree, thing, result_dir): try: rfd, wfd = os.pipe() + env = { "DESC_FD": str(wfd), + "RESULTS_DIR": os.path.join(result_dir, self.name), + "BRANCH_BASE": tree.branch } + + if hasattr(thing, 'first_in_series'): + env["FIRST_IN_SERIES"] = str(int(thing.first_in_series)) + out, err = CMD.cmd_run(self.info["run"], include_stderr=True, cwd=tree.path, - pass_fds=[wfd], add_env={"DESC_FD": str(wfd), - "RESULTS_DIR": os.path.join(result_dir, self.name), - "BRANCH_BASE": tree.branch}) + pass_fds=[wfd], add_env=env) except core.cmd.CmdError as e: retcode = e.retcode out = e.stdout diff --git a/core/tester.py b/core/tester.py index ca8b026..7152a7e 100644 --- a/core/tester.py +++ b/core/tester.py @@ -10,7 +10,7 @@ import re import core -from core import Test, PullError +from core import Test, PullError, PatchApplyError def write_tree_selection_result(result_dir, s, comment): @@ -31,6 +31,17 @@ def write_tree_selection_result(result_dir, s, comment): os.makedirs(patch_dir) +def write_apply_result(series_dir, tree, what, retcode): + series_apply = os.path.join(series_dir, "apply") + os.makedirs(series_apply) + + core.log("Series " + what, "") + with open(os.path.join(series_apply, "retcode"), "w+") as fp: + fp.write(str(retcode)) + with open(os.path.join(series_apply, "desc"), "w+") as fp: + fp.write(f"Patch {what} to {tree.name}") + + def mark_done(result_dir, series): series_dir = os.path.join(result_dir, str(series.id)) if not os.path.exists(os.path.join(series_dir, ".tester_done")): @@ -38,16 +49,15 @@ def mark_done(result_dir, series): class Tester(threading.Thread): - def __init__(self, result_dir, tree, queue, done_queue, barrier): + def __init__(self, result_dir, tree, queue, done_queue, config=None): threading.Thread.__init__(self) self.tree = tree self.queue = queue self.done_queue = done_queue - self.barrier = barrier self.should_die = False self.result_dir = result_dir - self.config = None + self.config = config self.include = None self.exclude = None @@ -55,12 +65,14 @@ def __init__(self, result_dir, tree, queue, done_queue, barrier): self.patch_tests = [] def run(self) -> None: - self.config = configparser.ConfigParser() - self.config.read(['nipa.config', 'pw.config', 'tester.config']) + if self.config is None: + self.config = configparser.ConfigParser() + self.config.read(['nipa.config', 'pw.config', 'tester.config']) + log_dir = self.config.get('log', 'dir', fallback=core.NIPA_DIR) core.log_init( self.config.get('log', 'type', fallback='org'), - self.config.get('log', 'file', fallback=os.path.join(core.NIPA_DIR, f"{self.tree.name}.org"))) + self.config.get('log', 'file', fallback=os.path.join(log_dir, f"{self.tree.name}.org"))) core.log_open_sec("Tester init") if not os.path.exists(self.result_dir): @@ -77,26 +89,37 @@ def run(self) -> None: core.log_end_sec() while not self.should_die: - self.barrier.wait() + s = self.queue.get() + if s is None: + break - while not self.should_die and not self.queue.empty(): - s = self.queue.get() - if s is None: - continue + core.log(f"Tester commencing with backlog of {self.queue.qsize()}") + self.test_series(self.tree, s) + self.done_queue.put(s) + core.log("Tester done processing") - core.log(f"Tester commencing with backlog of {self.queue.qsize()}") - self.test_series(self.tree, s) - self.done_queue.put(s) + core.log("Tester exiting") - # If we're the last worker with work to do - let the poller run - core.log(f"Checking barrier {self.barrier.n_waiting}/{self.barrier.parties} {self.queue.qsize()}") - if self.barrier.parties == self.barrier.n_waiting + 1: - break + def get_test_names(self, annotate=True) -> list[str]: + tests_dir = os.path.abspath(core.CORE_DIR + "../../tests") + location = self.config.get('dirs', 'tests', fallback=tests_dir) - try: - self.barrier.wait() - except threading.BrokenBarrierError: - break + self.include = [x.strip() for x in re.split(r'[,\n]', self.config.get('tests', 'include', fallback="")) if len(x)] + self.exclude = [x.strip() for x in re.split(r'[,\n]', self.config.get('tests', 'exclude', fallback="")) if len(x)] + + tests = [] + for name in ["series", "patch"]: + tests_subdir = os.path.join(location, name) + for td in os.listdir(tests_subdir): + test = f'{name}/{td}' + if not annotate: + pass # don't annotate + elif test in self.exclude or \ + (len(self.include) != 0 and test not in self.include): + test += ' [excluded]' + tests.append(test) + + return tests def load_tests(self, name): core.log_open_sec(name.capitalize() + " tests") @@ -127,71 +150,55 @@ def _test_series(self, tree, series): elif os.path.exists(os.path.join(series_dir, ".tester_done")): core.log(f"Already tested in {series_dir}", "") core.log_end_sec() - return [], [] + return try: + tree.reset() if series.is_pure_pull(): - ret = self._test_series_pull(tree, series, series_dir) + self._test_series_pull(tree, series, series_dir) else: - ret = self._test_series_patches(tree, series, series_dir) + self._test_series_patches(tree, series, series_dir) finally: core.log_end_sec() - return ret - def _test_series_patches(self, tree, series, series_dir): - if not tree.check_applies(series): - series_apply = os.path.join(series_dir, "apply") - os.makedirs(series_apply) - + tree.reset(fetch=False) + try: + tree.apply(series) + except PatchApplyError: already_applied = tree.check_already_applied(series) if already_applied: - core.log("Series already applied", "") - with open(os.path.join(series_apply, "retcode"), "w+") as fp: - fp.write("0") - with open(os.path.join(series_apply, "desc"), "w+") as fp: - fp.write(f"Patch already applied to {tree.name}") + write_apply_result(series_dir, tree, "already applied", 0) else: - core.log("Series does not apply", "") - with open(os.path.join(series_apply, "retcode"), "w+") as fp: - fp.write("1") - with open(os.path.join(series_apply, "desc"), "w+") as fp: - fp.write(f"Patch does not apply to {tree.name}") - return [already_applied], [already_applied] - - series_ret = [] - patch_ret = [] - tree.reset(fetch=False) + write_apply_result(series_dir, tree, "does not apply", 1) + return - tree.apply(series) for test in self.series_tests: - ret = test.exec(tree, series, series_dir) - series_ret.append(ret) - tree.reset(fetch=False) - - cnt = 1 - for patch in series.patches: - core.log_open_sec(f"Testing patch {cnt}/{len(series.patches)}| {patch.title}") - cnt += 1 - - current_patch_ret = [] - - patch_dir = os.path.join(series_dir, str(patch.id)) - if not os.path.exists(patch_dir): - os.makedirs(patch_dir) - - try: - tree.apply(patch) - - for test in self.patch_tests: - ret = test.exec(tree, patch, patch_dir) - current_patch_ret.append(ret) - finally: - core.log_end_sec() - - patch_ret.append(current_patch_ret) - - return series_ret, patch_ret + test.exec(tree, series, series_dir) + + tcnt = 0 + for test in self.patch_tests: + tcnt += 1 + tree.reset(fetch=False) + + pcnt = 0 + for patch in series.patches: + pcnt += 1 + cnts = f"{tcnt}/{len(self.patch_tests)}|{pcnt}/{len(series.patches)}" + core.log_open_sec(f"Testing patch {cnts}| {patch.title}") + + patch_dir = os.path.join(series_dir, str(patch.id)) + if not os.path.exists(patch_dir): + os.makedirs(patch_dir) + + try: + tree.apply(patch) + test.exec(tree, patch, patch_dir) + except PatchApplyError: + write_apply_result(series_dir, tree, f"patch {pcnt} does not apply", 1) + return + finally: + core.log_end_sec() def _test_series_pull(self, tree, series, series_dir): try: @@ -205,10 +212,9 @@ def _test_series_pull(self, tree, series, series_dir): fp.write("1") with open(os.path.join(series_apply, "desc"), "w+") as fp: fp.write(f"Pull to {tree.name} failed") - return [], [] + return patch = series.patches[0] - current_patch_ret = [] core.log_open_sec(f"Testing pull request {patch.title}") @@ -219,9 +225,7 @@ def _test_series_pull(self, tree, series, series_dir): try: for test in self.patch_tests: if test.is_pull_compatible(): - ret = test.exec(tree, patch, patch_dir) - current_patch_ret.append(ret) + test.exec(tree, patch, patch_dir) finally: core.log_end_sec() - return [], [current_patch_ret] diff --git a/core/tree.py b/core/tree.py index 93bcc1f..5844b27 100644 --- a/core/tree.py +++ b/core/tree.py @@ -4,8 +4,10 @@ """ The git tree module """ +import multiprocessing import os import tempfile +import time from typing import List import core @@ -26,33 +28,73 @@ class TreeNotClean(Exception): pass +class WorktreeNesting(Exception): + pass + + class Tree: """The git tree class Git tree class which controls a git tree + + current_branch: use whathever is currently checked out as branch """ - def __init__(self, name, pfx, fspath, remote=None, branch=None): + def __init__(self, name, pfx, fspath, remote=None, branch=None, + wt_id=None, parent=None, current_branch=False): self.name = name self.pfx = pfx self.path = os.path.abspath(fspath) self.remote = remote self.branch = branch + if parent: + self.lock = parent.lock + else: + self.lock = multiprocessing.RLock() + + if current_branch: + self.branch = self.current_branch() if remote and not branch: - self.branch = remote + "/master" + self.branch = remote + "/main" + self._wt_id = wt_id self._saved_path = None self._check_tree() + def work_tree(self, worker_id): + # Create a worktree for the repo, returns new Tree object + if self._wt_id: + raise WorktreeNesting() + + name = f'wt-{worker_id}' + new_path = os.path.join(self.path, name) + if not os.path.exists(new_path): + self.git(["worktree", "add", name]) + + new_name = self.name + f'-{worker_id}' + return Tree(new_name, self.pfx, new_path, self.remote, self.branch, + wt_id=worker_id, parent=self) + def git(self, args: List[str]): - return CMD.cmd_run(["git"] + args, cwd=self.path) + self.lock.acquire(timeout=300) + try: + return CMD.cmd_run(["git"] + args, cwd=self.path) + finally: + self.lock.release() def git_am(self, patch): return self.git(["am", "-s", "--", patch]) - def git_pull(self, pull_url): + def git_checkout(self, ref): + return self.git(["checkout", ref]) + + def git_pull(self, pull_url, ff=None): cmd = ["pull", "--no-edit", "--signoff"] + if ff == True: + cmd.append('--ff-only') + elif ff == False: + cmd.append('--no-ff') cmd += pull_url.split() return self.git(cmd) @@ -75,7 +117,14 @@ def git_merge_base(self, c1, c2, is_ancestor=False): return self.git(cmd) def git_fetch(self, remote): - return self.git(['fetch', remote]) + for i in range(10): + try: + return self.git(['fetch', remote]) + except CMD.CmdError as e: + core.log(f"Fetching failed (attempt {i + 1})", repr(e)) + time.sleep(30) + if i >= 9: + raise def git_reset(self, target, hard=False): cmd = ['reset', target] @@ -99,6 +148,14 @@ def _check_tree(self): finally: core.log_end_sec() + def current_branch(self): + out = self.git(["symbolic-ref", "-q", "HEAD"]) + if out: + out = out.strip() + if out.startswith('refs/heads/'): + out = out[11:] + return out + def head_hash(self): return self.git(['rev-parse', 'HEAD']).strip() @@ -131,6 +188,7 @@ def remotes(self): def contains(self, commit): core.log_open_sec("Checking for commit " + commit) try: + self.reset() self.git_merge_base(commit, 'HEAD', is_ancestor=True) ret = True except CMD.CmdError: @@ -203,21 +261,30 @@ def check_applies(self, thing): return ret - def _pull_safe(self, pull_url): + def _pull_safe(self, pull_url, trust_rerere, ff): try: - self.git_pull(pull_url) + self.git_pull(pull_url, ff=ff) except CMD.CmdError as e: + try: + # If rerere fixed it, just commit + if trust_rerere: + self.git(['diff', '-s', '--exit-code']) # will raise if rerere didn't fix it + self.git(['commit', '--no-edit']) + return + except CMD.CmdError: + pass + try: self.git(["merge", "--abort"]) except CMD.CmdError: pass raise PullError(e) from e - def pull(self, pull_url, reset=True): + def pull(self, pull_url, reset=True, trust_rerere=None, ff=None): core.log_open_sec("Pulling " + pull_url) try: if reset: self.reset() - self._pull_safe(pull_url) + self._pull_safe(pull_url, trust_rerere, ff) finally: core.log_end_sec() diff --git a/deploy/contest/db b/deploy/contest/db new file mode 100644 index 0000000..6f86c34 --- /dev/null +++ b/deploy/contest/db @@ -0,0 +1,108 @@ +# Results DB deployment notes + +sudo dnf -y install postgresql postgresql-server python-psycopg2 + +/usr/bin/postgresql-setup --initdb + +sudo systemctl start postgresql +sudo systemctl enable postgresql + +sudo su - postgres + +# Do the same for fedora, nipa-upload and nipa-brancher +# Actually 'nipa' itself may actually not need these, TBH +createuser nipa +createdb nipa + +psql + GRANT ALL PRIVILEGES ON DATABASE "nipa" to nipa; + ALTER USER nipa WITH PASSWORD 'new_password'; + \c nipa postgres + GRANT ALL ON SCHEMA public TO "nipa"; + GRANT ALL ON ALL TABLES IN SCHEMA public TO "nipa"; + GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO "nipa"; + \q + +GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO "nipa-brancher"; +GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO "nipa-brancher"; + +# Read-only users +createuser flask +psql + \c nipa postgres + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "flask"; + \q + +exit + +# back as fedora +psql --dbname=nipa + +CREATE TABLE results ( + branch varchar(80), + remote varchar(80), + executor varchar(80), + branch_date varchar(17), + t_start timestamp, + t_end timestamp, + json_normal text, + json_full text +); + +CREATE INDEX ON branches (branch DESC); +CREATE INDEX ON branches (t_date DESC); + +CREATE TABLE branches ( + branch varchar(80), + stream varchar(60), + t_date timestamp, + base varchar(80), + url varchar(200), + info text +); + +CREATE INDEX by_branch ON results (branch DESC); +CREATE INDEX by_branch_date ON results (branch_date DESC); + +CREATE TABLE results_pending ( + id serial primary key, + branch varchar(80), + remote varchar(80), + executor varchar(80), + branch_date varchar(17), + t_start timestamp +); + +CREATE TABLE metrics ( + id serial primary key, + ts timestamp not null, + source varchar(40), + category varchar(40), + name varchar(40), + value double precision +); + +CREATE TABLE stability ( + remote varchar(80), + executor varchar(80), + grp varchar(80), + test varchar(128), + subtest varchar(256), + autoignore boolean DEFAULT false, + pass_cnt integer NOT NULL DEFAULT 0, + fail_cnt integer NOT NULL DEFAULT 0, + pass_srk integer NOT NULL DEFAULT 0, + fail_srk integer NOT NULL DEFAULT 0, + pass_cur integer NOT NULL DEFAULT 0, + fail_cur integer NOT NULL DEFAULT 0, + last_update timestamp, + passing timestamp +); + + +CREATE TABLE devices_info ( + remote varchar(80), + executor varchar(80), + changed timestamp, + info text +); diff --git a/deploy/contest/remote/worker-setup.sh b/deploy/contest/remote/worker-setup.sh new file mode 100644 index 0000000..9288661 --- /dev/null +++ b/deploy/contest/remote/worker-setup.sh @@ -0,0 +1,262 @@ +#!/bin/bash -xe + +# Cocci +# also install ocaml itself +sudo dnf install ocaml-findlib ocaml-findlib-devel +./configure --enable-ocaml --enable-pcre-syntax +make +make install +# explore local installation, ./configure output suggests how + +# Let runners use git on NIPA +git config --global --add safe.directory /opt/nipa + +sudo dnf install pip meson + +sudo dnf install perf bpftrace +sudo dnf install nftables.x86_64 +sudo dnf install pixman-devel.x86_64 pixman.x86_64 libgudev.x86_64 +sudo dnf install libpcap-devel libpcap cmake +sudo dnf install clang numactl-devel.x86_64 +sudo dnf install socat wireshark nmap-ncat.x86_64 +sudo dnf install libdaemon-devel libdaemon +sudo dnf install libtool patch +sudo dnf install ninja-build.x86_64 texinfo +sudo dnf install bison flex openssl-devel +sudo dnf install capstone bzip2-devel libssh-devel +sudo dnf install git libmnl-devel +sudo dnf install elfutils-devel elfutils-libs elfutils-libelf elfutils-libelf-devel +sudo dnf install iptables + +# NIPA setup +git clone https://github.com/kuba-moo/nipa.git +sudo mv nipa/ /opt/ +sudo useradd virtme + +# nginx setup +sudo dnf -y install nginx +sudo systemctl enable nginx +sudo systemctl start nginx +# do basic config, then +sudo dnf -y install certbot certbot-nginx + +# virtme +git clone https://github.com/arighi/virtme-ng.git + +# as admin: +sudo dnf install python3.11.x86_64 python3.11-devel.x86_64 python3.11-pip.noarch python3.11-libs.x86_64 +# as virtme: +pip-3.11 install requests +pip-3.11 install psutil + +# prep for outside (system wide) +# QEMU +download QEMU +cd qemu-* +pip install sphinx +sudo dnf install glib2 glib2-devel +./configure --target-list=x86_64-softmmu,x86_64-linux-user +udo make install prefix=/usr + +# libcli +git clone https://github.com/dparrish/libcli.git +cd libcli +make -j +sudo make install PREFIX=/usr + +### Local + +mkdir tools +cd tools + +# netperf +git clone https://github.com/HewlettPackard/netperf.git +cd netperf +./autogen.sh +./configure --disable-omni # fails build otherwise +make install DESTDIR=/home/virtme/tools/fs prefix=/usr + +exit 0 + +# Install libbpf +cd $kernel +cd tools/lib/bpf +make -j 40 +sudo make install prefix=/usr + +# bpftool +cd $kernel +make -C tools/bpf/bpftool +cp tools/bpf/bpftool/bpftool ../tools/fs/ + +# Tests need +sudo dnf install socat libcap-devel + +# Build locally +sudo dnf install libnl3.x86_64 libnl3-cli.x86_64 libnl3-devel.x86_64 libnl3-doc.x86_64 +git clone https://github.com/jpirko/libteam.git +cd libteam +./autogen.sh +./configure +make -j 40 +# needs manual install +cp ./utils/teamdctl ../fs/usr/bin/ +cp ./utils/teamnl ../fs/usr/bin/ +cp -v ./libteam/.libs/libteam.so* ../fs/usr/lib/ +cp -v ./libteamdctl/.libs/libteamdctl.so* ../fs/usr/lib/ + +# refresh iproute2 +git clone https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git +cd iproute2-next +git remote add current https://git.kernel.org/pub/scm/network/iproute2/iproute2.git +git fetch --all +git reset --hard origin/main +git merge current/main -m "merge in current" + +./configure +make -j 40 +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr + +# msend / mreceive +git clone https://github.com/troglobit/mtools.git +cd mtools +make +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr + +# smcrouted +git clone https://github.com/troglobit/smcroute.git +cd smcroute +./autogen.sh +./configure +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr +# it looks for a socket in /usr/local/var/run +sudo su +mkdir -p /usr/local/var/ +ln -sv /run /usr/local/var/ + +# ndisc6 (ndisc6 package on Fedora) +dnf -y install gettext-devel +git clone https://git.remlab.net/git/ndisc6.git +cd ndisc6/ +./autogen.sh +./configure +make -j +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr +# make sure the SUID bits don't stick +find tools/fs/ -perm -4000 +fs=$(find tools/fs/ -perm -4000) +chmod -s $fs +ls -l $fs + +# dropwatch (DNF on fedora) +dnf -y install readline-devel binutils-devel +git clone https://github.com/nhorman/dropwatch +cd dropwatch/ +./autogen.sh +./configure +make -j +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr + +# ethtool +git clone https://git.kernel.org/pub/scm/network/ethtool/ethtool.git +cd ethtool +./autogen.sh +./configure +make -j +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr + +# psample +git clone https://github.com/Mellanox/libpsample +cd libpsample +cmake -DCMAKE_INSTALL_PREFIX:PATH=/home/virtme/tools/fs/usr . +make -j +make install + +# netsniff-ng +sudo dnf install libnetfilter_conntrack.x86_64 libnetfilter_conntrack-devel.x86_64 +sudo dnf install libsodium-devel.x86_64 libsodium.x86_64 +sudo dnf install libnet libnet-devel +git clone https://github.com/netsniff-ng/netsniff-ng.git +cd netsniff-ng +./configure +make -j + + +# AWS iputils are buggy +dnf -y install libxslt-devel libidn2-devel +git clone https://github.com/iputils/iputils.git +cd iputils +./configure +make -j +make install DESTDIR=/tmp +cp -v /tmp/usr/local/bin/* ../fs/usr/bin/ +cd ../fs/usr/bin/ +ln -s ping ping6 + +# ipv6toolkit (ra6 for fib_tests.sh) +git clone https://github.com/fgont/ipv6toolkit +cd ipv6toolkit/ +make +make install DESTDIR=/home/virtme/tools/fs PREFIX=/usr + +# for nf tests +sudo dnf install conntrack iperf3 ipvsadm + +git clone git://git.netfilter.org/libnftnl +./autogen.sh +./configure +make -j 30 +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr + +libtool --finish /home/virtme/tools/fs/usr/lib +sudo dnf install gmp gmp-devel + +git clone git://git.netfilter.org/nftables +export PKG_CONFIG_PATH=/home/virtme/tools/fs:/home/virtme/tools/fs/usr:/home/virtme/tools/fs/usr/lib/pkgconfig/ +./configure --with-json --with-xtables + +# Edit paths into the makefile +# LIBNFTNL_CFLAGS = -I/usr/local/include -I/home/virtme/tools/fs/usr/include +# LIBNFTNL_LIBS = -L/usr/local/lib -L/home/virtme/tools/fs/usr/lib -lnftnl + +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr +# note that library LD_LIBRARY_PATH must have local libs before /lib64 ! + +git clone git://git.netfilter.org/ebtables +./autogen.sh +./configure --prefix=/ --exec-prefix=/home/virtme/tools/fs +make -j 8 +make install DESTDIR=/home/virtme/tools/fs prefix=/usr PREFIX=/usr +cd /home/virtme/tools/fs/usr/sbin/ +ln -v ebtables-legacy ebtables + +sudo cp /etc/ethertypes /usr/local/etc/ + +# packetdrill +sudo dnf install glibc-static.x86_64 + +git clone https://github.com/google/packetdrill.git +cd packetdrill/gtests/net/packetdrill +./configure +make + +cp packetdrill ~/tools/fs/usr/bin/ + +# Net tests need pyroute2 (for OvS tests) +sudo dnf install python3-pyroute2.noarch + +# uring (needs ZC) + git clone https://github.com/axboe/liburing/ + cd liburing + ./configure --prefix=/usr + make -j + sudo make install + + # traceroute + get tar ball from: + https://sourceforge.net/projects/traceroute/files/ + untar + cd ... + make + cp -v ./traceroute/traceroute ../fs/usr/bin/ + cp -v ./traceroute/traceroute ../fs/usr/bin/traceroute6 diff --git a/deploy/contest/setup_net.sh b/deploy/contest/setup_net.sh new file mode 100755 index 0000000..f85ae29 --- /dev/null +++ b/deploy/contest/setup_net.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +for i in `seq 0 3`; do + sudo ip tuntap add name tap$((i * 2 )) mode tap multi_queue group virtme + sudo ip tuntap add name tap$((i * 2 + 1)) mode tap multi_queue group virtme + + sudo ip li add name br$i type bridge + sudo ip link set dev tap$((i * 2 )) master br$i + sudo ip link set dev tap$((i * 2 + 1)) master br$i + + sudo ip link set dev br$i mtu 12000 up + sudo ip link set dev tap$((i * 2 )) mtu 12000 up + sudo ip link set dev tap$((i * 2 + 1)) mtu 12000 up +done diff --git a/deploy/contest/virtio-hw.config b/deploy/contest/virtio-hw.config new file mode 100644 index 0000000..b964dd8 --- /dev/null +++ b/deploy/contest/virtio-hw.config @@ -0,0 +1,22 @@ +[executor] +name=## +deadline_minutes=479 +[remote] +branches=## +[local] +tree_path=## +base_path=## +[www] +url=## +[vm] +cpus=4 +setup=. #nipa/contest/scripts/vm-virtio-loop.sh +qemu_opt=-device virtio-net-pci,netdev=n0,iommu_platform=on,disable-legacy=on,mq=on,vectors=18 -netdev tap,id=n0,ifname=tap4,vhost=on,script=no,downscript=no,queues=8 -device virtio-net-pci,netdev=n1,iommu_platform=on,disable-legacy=on,mq=on,vectors=18 -netdev tap,id=n1,ifname=tap5,vhost=on,script=no,downscript=no,queues=8 +[ksft] +target=drivers/net drivers/net/hw +nested_tests=on +[device] +info_script=#nipa/contest/scripts/vm-virtio-dev-info.sh +[cfg] +thread_cnt=1 +thread_spawn_delay=2 diff --git a/systemd/nipa-checks.service b/deploy/systemd/nipa-checks.service similarity index 100% rename from systemd/nipa-checks.service rename to deploy/systemd/nipa-checks.service diff --git a/systemd/nipa-checks.timer b/deploy/systemd/nipa-checks.timer similarity index 100% rename from systemd/nipa-checks.timer rename to deploy/systemd/nipa-checks.timer diff --git a/systemd/nipa-clean-logs.service b/deploy/systemd/nipa-clean-logs.service similarity index 100% rename from systemd/nipa-clean-logs.service rename to deploy/systemd/nipa-clean-logs.service diff --git a/systemd/nipa-clean-output.service b/deploy/systemd/nipa-clean-output.service similarity index 100% rename from systemd/nipa-clean-output.service rename to deploy/systemd/nipa-clean-output.service diff --git a/systemd/nipa-poller.service b/deploy/systemd/nipa-poller.service similarity index 100% rename from systemd/nipa-poller.service rename to deploy/systemd/nipa-poller.service diff --git a/systemd/nipa-status.service b/deploy/systemd/nipa-status.service similarity index 100% rename from systemd/nipa-status.service rename to deploy/systemd/nipa-status.service diff --git a/systemd/nipa-status.timer b/deploy/systemd/nipa-status.timer similarity index 100% rename from systemd/nipa-status.timer rename to deploy/systemd/nipa-status.timer diff --git a/systemd/nipa-upload.service b/deploy/systemd/nipa-upload.service similarity index 100% rename from systemd/nipa-upload.service rename to deploy/systemd/nipa-upload.service diff --git a/docker/Dockerfile b/docker/Dockerfile index f204c13..9a59a5e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -4,13 +4,30 @@ FROM debian:bookworm ARG nipauid RUN useradd -o -m -u $nipauid nipa -RUN apt-get update -RUN apt-get install -y procps python3 git patatt build-essential \ - ccache flex bison libssl-dev libelf-dev clang sparse bc cpio \ - gdb strace vim - -# needed for spdxcheck.py from kernel sources -RUN apt-get install -y python3-ply python3-git +RUN apt-get update && apt-get install -y \ + procps \ + python3 \ + git \ + patatt \ + build-essential \ + ccache \ + flex \ + bison \ + gawk \ + libssl-dev \ + libelf-dev \ + clang lld llvm \ + sparse \ + bc \ + cpio \ + gdb \ + strace \ + vim \ + python3-requests \ + # for spdxcheck.py: + python3-ply \ + python3-git \ + && rm -rf /var/lib/apt/lists/* RUN mkdir -p /home/nipa/.local/share/patatt && chown -R nipa: /home/nipa diff --git a/docs.py b/docs.py index f5a3691..b24123c 100755 --- a/docs.py +++ b/docs.py @@ -247,9 +247,14 @@ def main(): dr = DocRefs() for file in os.listdir(os.path.join(sys.argv[1], 'Documentation', 'process')): if not os.path.isfile(os.path.join(sys.argv[1], 'Documentation', 'process', file)): - return + continue name = file[:-4] dr.load_section('process/' + name, name) + for file in os.listdir(os.path.join(sys.argv[1], 'Documentation', 'maintainer')): + if not os.path.isfile(os.path.join(sys.argv[1], 'Documentation', 'maintainer', file)): + continue + name = file[:-4] + dr.load_section('maintainer/' + name, name) if len(sys.argv) > 2: form_letters = sys.argv[2] else: diff --git a/docs/ci.svg b/docs/ci.svg new file mode 100644 index 0000000..32b8317 --- /dev/null +++ b/docs/ci.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/exe-exec.svg b/docs/exe-exec.svg new file mode 100644 index 0000000..b38ad4a --- /dev/null +++ b/docs/exe-exec.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/exe-gh.svg b/docs/exe-gh.svg new file mode 100644 index 0000000..7436c26 --- /dev/null +++ b/docs/exe-gh.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/exe-vmksft.svg b/docs/exe-vmksft.svg new file mode 100644 index 0000000..c5c2b6f --- /dev/null +++ b/docs/exe-vmksft.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/execs.svg b/docs/execs.svg new file mode 100644 index 0000000..07e80bf --- /dev/null +++ b/docs/execs.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/form-letters/net-next-closed b/form-letters/net-next-closed index ca18fb1..2e23180 100644 --- a/form-letters/net-next-closed +++ b/form-letters/net-next-closed @@ -1,11 +1,12 @@ -The merge window for v6.8 has begun and we have already posted our pull -request. Therefore net-next is closed for new drivers, features, code -refactoring and optimizations. We are currently accepting bug fixes only. +We have already submitted our pull request with net-next material for v6.17, +and therefore net-next is closed for new drivers, features, code refactoring +and optimizations. We are currently accepting bug fixes only. -Please repost when net-next reopens after January 22nd. +Please repost when net-next reopens after Aug 11th. RFC patches sent for review only are obviously welcome at any time. See: https://www.kernel.org/doc/html/next/process/maintainer-netdev.html#development-cycle -- pw-bot: defer +pv-bot: closed diff --git a/ingest_mdir.py b/ingest_mdir.py index f3c0a8a..a966622 100755 --- a/ingest_mdir.py +++ b/ingest_mdir.py @@ -13,88 +13,331 @@ import configparser import os import re -import threading import queue +import shutil +import sys +import tempfile +import time -from core import NIPA_DIR -from core import log, log_open_sec, log_end_sec, log_init +from core import cmd +from core import log_open_sec, log_end_sec, log_init from core import Patch from core import Series from core import Tree from core import Tester +CONSOLE_WIDTH = None +BOLD = '\033[1m' +RED = '\033[31m' +GREEN = '\033[32m' +YELLOW = '\033[33m' +RESET = '\033[0m' + config = configparser.ConfigParser() -config.read(['nipa.config', "tester.config"]) -results_dir = config.get('results', 'dir', fallback=os.path.join(NIPA_DIR, "results")) +config.add_section('dirs') +config.add_section('log') +config.add_section('tests') -# TODO: use config parser = argparse.ArgumentParser() -parser.add_argument('--mdir', required=True, help='path to the directory with the patches') + +patch_arg = parser.add_mutually_exclusive_group(required=True) +patch_arg.add_argument('--patch', help='path to the patch file') +patch_arg.add_argument('--mdir', help='path to the directory with the patches') + parser.add_argument('--tree', required=True, help='path to the tree to test on') -parser.add_argument('--tree-name', default='unknown', help='the tree name to expect') -parser.add_argument('--tree-branch', default='master', - help='the branch or commit to use as a base for applying patches') -parser.add_argument('--result-dir', default=results_dir, +parser.add_argument('--tree-name', help='the tree name to expect') +parser.add_argument('--result-dir', help='the directory where results will be generated') -args = parser.parse_args() - -args.mdir = os.path.abspath(args.mdir) -args.tree = os.path.abspath(args.tree) - -log_init(config.get('log', 'type'), config.get('log', 'path'), force_single_thread=True) - -log_open_sec("Loading patches") -try: - files = [os.path.join(args.mdir, f) for f in sorted(os.listdir(args.mdir))] - series = Series() - series.tree_selection_comment = "ingest_mdir" - series.tree_mark_expected = False - - for f in files: - with open(f, 'r') as fp: - data = fp.read() - if re.search(r"\[.* 0+/\d.*\]", data) and \ - not re.search(r"\n@@ -\d", data): - series.set_cover_letter(data) - else: - series.add_patch(Patch(data)) -finally: - log_end_sec() - -tree = Tree(args.tree_name, args.tree_name, args.tree, branch=args.tree_branch) -if not tree.check_applies(series): - print("Patch series does not apply cleanly to the tree") - os.sys.exit(1) - -try: - done = queue.Queue() - pending = queue.Queue() - barrier = threading.Barrier(2) - tester = Tester(args.result_dir, tree, pending, done, barrier) - tester.start() - - pending.put(series) - - # Unleash all workers - log("Activate workers", "") - barrier.wait() - - # Wait for workers to come back - log("Wait for workers", "") - barrier.wait() - - # Shut workers down - tester.should_die = True - pending.put(None) - barrier.wait() - -finally: - barrier.abort() - tester.should_die = True - pending.put(None) - tester.join() - -# Summary hack -os.system(f'for i in $(find {args.result_dir} -type f -name summary); do dir=$(dirname "$i"); head -n2 "$dir"/summary; cat "$dir"/desc 2>/dev/null; done' - ) +parser.add_argument('--list-tests', action='https://github.com/store_true', + help='print all available tests and exit') +parser.add_argument('-d', '--disable-test', nargs='+', + help='disable test, can be specified multiple times') +parser.add_argument('-t', '--test', nargs='+', + help='run only specified tests. Note: full test name is needed, e.g. "patch/pylint" or "series/ynl" not just "pylint" or "ynl"') +parser.add_argument('--dbg-print-run', help='print results of previous run') + + +def get_console_width(): + """ Get console width to avoid line wraps where we can. """ + + global CONSOLE_WIDTH + + if CONSOLE_WIDTH is None: + try: + terminal_size = shutil.get_terminal_size() + CONSOLE_WIDTH = terminal_size.columns + except OSError: + CONSOLE_WIDTH = 80 + return CONSOLE_WIDTH + + +def get_series_id(result_dir): + """ Find an unused series ID. """ + + i = 1 + while os.path.exists(os.path.join(result_dir, str(i))): + i += 1 + return i + + +def __print_summary_result(offset, files, full_path): + with open(os.path.join(full_path, "retcode"), "r", encoding="utf-8") as fp: + retcode = int(fp.read()) + desc = None + if "desc" in files: + with open(os.path.join(full_path, "desc"), "r", encoding="utf-8") as fp: + desc = fp.read().strip().replace('\n', ' ') + + failed = False + + if retcode == 0: + print(GREEN + "OKAY " + RESET, end='') + elif retcode == 250: + print(YELLOW + "WARNING" + RESET, end='') + else: + print(RED + "FAIL " + RESET + f"({retcode})", end='') + failed = True + + if failed or (desc and len(desc) + offset > get_console_width()): + print("\n", end=" ") + if desc: + print("", desc, end='') + if failed: + print("\n", end=" ") + if failed: + print(" Outputs:", full_path, end='') + print('', flush=True) + + +def print_summary_singleton(print_state, files, full_path, patch_id): + """ + Print summaries, single patch mode. + Output differs if we have one patch vs many because tester will + run the same test on all the patches in sequence. + """ + + if len(print_state['seen']) == 1: + print() + print(BOLD + "Series level tests:") + + if patch_id != print_state['last_patch']: + print_state['last_patch'] = patch_id + print(BOLD + "Patch level tests:") + + test_name = os.path.basename(full_path) + + print(BOLD + f" {test_name:32}", end='') + __print_summary_result(41, files, full_path) + + +def print_summary_series(print_state, files, full_path, patch_id): + """ Print summaries, series mode (more than one patch). """ + + test_name = os.path.basename(full_path) + if test_name != print_state.get('last_test'): + print_state['last_test'] = test_name + print() + print(BOLD + test_name) + + if patch_id >= 0: + patch_str = f"Patch {patch_id + 1:<6}" + else: + patch_str = "Full series " + + print(BOLD + " " + patch_str, end='') + __print_summary_result(21, files, full_path) + + +def print_test_summary(args, series, print_state, tests=None): + """ + Report results based on files created by the tester in the filesystem. + Track which files we have already as this function should be called + periodically to check for new results, as the tester runs. + """ + + seen = print_state.get('seen', set()) + print_state['seen'] = seen + print_state['last_patch'] = print_state.get('last_patch', -1) + + for full_path, _, files in os.walk(os.path.join(args.result_dir, + str(series.id))): + if full_path in seen: + continue + if "summary" not in files: + continue + seen.add(full_path) + + rel_path = full_path[len(args.result_dir) + 1:].split('/') + test_name = os.path.basename(full_path) + + if tests and test_name not in tests: + continue + + patch_id = -1 + if len(rel_path) == 3: + patch_id = int(rel_path[-2]) - 1 + + if len(series.patches) == 1: + print_summary_singleton(print_state, files, full_path, patch_id) + else: + print_summary_series(print_state, files, full_path, patch_id) + + +def print_series_info(series): + """ Print list of patches """ + + if len(series.patches) > 2 and series.cover_letter is None: + print(BOLD + "No cover letter" + RESET) + elif series.cover_letter: + print(BOLD + series.title + RESET) + + for p in series.patches: + print(" " + f"[{p.id}] " + p.title) + + +def run_tester(args, tree, series): + """ Run the tester, report results as they appear """ + + summary_seen = {} + + try: + done = queue.Queue() + pending = queue.Queue() + tester = Tester(args.result_dir, tree, pending, done, + config=config) + tester.start() + + pending.put(series) + pending.put(None) + + while done.empty(): + print_test_summary(args, series, summary_seen) + time.sleep(0.2) + # Finish, print the last test's result + print_test_summary(args, series, summary_seen) + except: + print("Error / Interrupt detected, asking runner to stop") + tester.should_die = True + tester.join() + raise + finally: + tester.join() + + +def load_patches(args): + """ Load patches from specified location on disk """ + + if args.dbg_print_run is None: + series_id = get_series_id(args.result_dir) + else: + series_id = int(args.dbg_print_run) + + log_open_sec("Loading patches") + try: + if args.mdir: + mdir = os.path.abspath(args.mdir) + files = [os.path.join(mdir, f) for f in sorted(os.listdir(mdir))] + else: + files = [os.path.abspath(args.patch)] + + series = Series(ident=series_id) + series.tree_selection_comment = "ingest_mdir" + series.tree_mark_expected = False + + for f in files: + with open(f, 'r', encoding="utf-8") as fp: + data = fp.read() + if re.search(r"\[.* 0+/\d.*\]", data) and \ + not re.search(r"\n@@ -\d", data): + series.set_cover_letter(data) + else: + series.add_patch(Patch(data)) + finally: + log_end_sec() + + return series + + +def list_tests(args, config): + """ List all available tests and exit """ + + tester = Tester(args.result_dir, None, None, None, config=config) + print(' ', '\n '.join(tester.get_test_names())) + + +def main(): + """ Main function """ + + args = parser.parse_args() + + args.tree = os.path.abspath(args.tree) + + if args.test: + config.set('tests', 'include', ','.join(args.test)) + + if args.list_tests: + list_tests(args, config) + return + + if args.result_dir is None: + args.result_dir = tempfile.mkdtemp() + print("Saving output and logs to:", args.result_dir) + + config.set('log', 'type', 'org') + config.set('log', 'dir', args.result_dir) + config.set('log', 'path', "nipa.log") + + log_init(config.get('log', 'type'), + os.path.join(args.result_dir, 'nipa.log'), + force_single_thread=True) + + series = load_patches(args) + + tree_name = args.tree_name + if tree_name is None: + # Try to guess tree name from the patch subject, expecting subject + # to be something like [PATCH tree-name 2/N]. + tags = re.search( + r'Subject: \[(?:PATCH|RFC) (?:v\d+ )?([a-zA-Z-]+)(?: v\d+)?(?: \d*\/\d*)?\]', + series.patches[0].raw_patch + ) + if tags: + tree_name = tags.group(1).strip() + print("Tree name extracted from patches:", tree_name) + else: + tree_name = "unknown" + print("Tree name unknown") + + # Default settings for networking trees: + if tree_name.startswith('net'): + if not args.disable_test: + config.set('tests', 'exclude', 'patch/signed') + + print_series_info(series) + + if args.dbg_print_run: + print_test_summary(args, series, {}, tests={'ynl', 'build_clang'}) + return + + try: + tree = Tree(tree_name, tree_name, args.tree, current_branch=True) + except cmd.CmdError: + print("Can't assertain tree state, is a valid branch checked out?") + raise + head = tree.head_hash() + tree.git_checkout(head) + + if not tree.check_applies(series): + print("Patch series does not apply cleanly to the tree") + os.sys.exit(1) + + tree.git_reset(head, hard=True) + + run_tester(args, tree, series) + tree.git_checkout(tree.branch) + tree.git_reset(head, hard=True) + + +if __name__ == "__main__": + main() diff --git a/mailbot.py b/mailbot.py index 205a0f2..c42d0d0 100755 --- a/mailbot.py +++ b/mailbot.py @@ -6,6 +6,7 @@ import csv import datetime import os +import re import requests import signal import time @@ -29,7 +30,9 @@ maintainers = None authorized_users = set() auto_changes_requested = set() +auto_awaiting_upstream = set() delay_actions = [] # contains tuples of (datetime, email) +http_headers = None pw_act_active = { @@ -187,7 +190,8 @@ def load_section(self, location, name): self.loc_map[name] = location - r = requests.get(f'https://www.kernel.org/doc/html/next/%7Blocation%7D.html') + r = requests.get(f'https://www.kernel.org/doc/html/next/%7Blocation%7D.html', + headers=http_headers) data = r.content.decode('utf-8') offs = 0 @@ -206,7 +210,7 @@ def load_section(self, location, name): # Now populate the plain text contents url = f'https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/plain/Documentation/%7Blocation%7D.rst' - r = requests.get(url) + r = requests.get(url, headers=http_headers) data = r.content.decode('utf-8') lines = data.split('\n') @@ -260,6 +264,15 @@ def __init__(self, msg_path): self._series_author = None self._authorized = None + def __eq__(self, other): + return True + + def __lt__(self, other): + return False + + def __gt__(self, other): + return False + def get(self, item, failobj=None): return self.msg.get(item, failobj) @@ -304,7 +317,6 @@ def _resolve_authorized(self, pw): continue file_names.add(line[6:]) - global maintainers maintainer_matches = maintainers.find_by_paths(file_names).find_by_owner(self.msg.get('From')) if len(maintainer_matches): self._authorized = repr(maintainer_matches) @@ -312,7 +324,31 @@ def _resolve_authorized(self, pw): self._authorized = False def user_bot(self): - return self.msg.get('From') in auto_changes_requested + sender = self.msg.get('From') + # strip down bla+01234@email.com to bla@email.com, for syzbot + sender = re.sub(r"\+[a-zA-Z0-9_-]*@", "@", sender) + return sender in auto_changes_requested + + def auto_awaiting_upstream(self): + # Try to operate only on the first message in the thread + if self.get('References', ""): + return False + subject = self.get('Subject') + if not subject or subject[0] != '[': + return False + + tags_end = subject.rfind(']') + if tags_end == -1: + return False + tags = subject[1:tags_end] + + for designation in auto_awaiting_upstream: + if designation in tags: + return True + return False + + def auto_actions(self): + return self.user_bot() or self.auto_awaiting_upstream() def self_reply(self, pw): return self.get_thread_author(pw) == self.msg.get("From") @@ -327,7 +363,7 @@ def dkim_ok(self): def _resolve_thread(self, pw): subject = self.get('Subject') - if subject.find(' 0/') != -1 or subject.find(' 00/') != -1: + if re.search(r"\W0+/", subject): obj_type = 'covers' else: obj_type = 'patches' @@ -346,7 +382,8 @@ def _resolve_thread(self, pw): self._series_id = pw_obj[0]['series'][0]['id'] - r = requests.get(f'https://lore.kernel.org/all/%7Bmid%7D/raw') + r = requests.get(f'https://lore.kernel.org/all/%7Bmid%7D/raw', + headers=http_headers) data = r.content.decode('utf-8') msg = email.message_from_string(data, policy=default) self._series_author = msg.get('From') @@ -370,7 +407,7 @@ def get_thread_author(self, pw): return self._series_author def has_actions(self): - if self.user_bot(): + if self.auto_actions(): return True body_str = self._body() @@ -404,6 +441,10 @@ def extract_actions(self, pw): self.actions.append('pw-bot: changes-requested') self.pw_act.append('changes-requested') + if len(self.pw_act) == 0 and self.auto_awaiting_upstream(): + self.actions.append('pw-bot: awaiting-upstream') + self.pw_act.append('awaiting-upstream') + if not self.user_authorized(pw): bad = False if len(self.dr_act) or len(self.pw_act) > 1: @@ -422,6 +463,11 @@ def extract_actions(self, pw): self.dr_act = [] self.pw_act = [] + def flush_actions(self): + self.actions = [] + self.dr_act = [] + self.pw_act = [] + # # PW stuff @@ -493,7 +539,6 @@ def handler(signum, _): def pw_state_log(fields): - global config log_name = config.get('mailbot', 'change-log') if not log_name: return @@ -506,8 +551,6 @@ def pw_state_log(fields): def weak_act_should_ignore(msg, series, want): - global pw_act_active - if msg.user_authorized(): return None current = series.state() @@ -531,8 +574,8 @@ def do_mail(msg, pw, dr): series_id = msg.get_thread_series(pw) if not series_id: - print('', 'ERROR: could not find patchwork series') - return + print('', 'INFO: could not find patchwork series, retry in an hour') + raise MlDelayActions("not in PW", datetime.datetime.now() + datetime.timedelta(hours=1)) series = PwSeries(pw, series_id) patches = [p['id'] for p in series.patches] @@ -560,7 +603,10 @@ def do_mail(msg, pw, dr): name = series["name"] if not name: name = '? ' + msg.get('Subject') - log = [name, msg.get('From'), series.state(), pw_act_map[act], series["id"], mid] + actor = msg.get('From') + if msg.auto_awaiting_upstream(): + actor = "auto" + log = [name, actor, series.state(), pw_act_map[act], series["id"], mid] pw_state_log(log) else: print('', '', "ERROR: action not in the map:", f"'{act}'") @@ -583,15 +629,15 @@ def do_mail(msg, pw, dr): def do_mail_file(msg_path, pw, dr): msg = MlEmail(msg_path) + if not msg.has_actions(): + print('INFO: no actions, skip:', msg.get('Message-ID')) + return + print('Message-ID:', msg.get('Message-ID')) print('', 'Subject:', msg.get('Subject')) print('', 'From:', msg.get('From')) - if not msg.has_actions(): - print('', '', 'INFO: no actions, skip') - return - - if not msg.user_authorized(pw) and not msg.user_bot() and not msg.self_reply(pw): + if not msg.user_authorized(pw) and not msg.auto_actions() and not msg.self_reply(pw): print('', '', 'INFO: not an authorized user, skip') return print('', 'Authorized:', msg.user_authorized()) @@ -604,7 +650,7 @@ def do_mail_file(msg_path, pw, dr): try: do_mail(msg, pw, dr) except MlDelayActions as e: - global delay_actions + msg.flush_actions() # avoid duplicates, actions will get re-parsed delay_actions.append((e.when, msg, )) @@ -613,7 +659,7 @@ def do_mail_delayed(msg, pw, dr): print('', 'Subject:', msg.get('Subject')) print('', 'From:', msg.get('From')) - if not msg.user_authorized(pw) and not msg.user_bot(): + if not msg.user_authorized(pw) and not msg.auto_actions(): print('', '', 'INFO: not an authorized user, skip') return print('', 'Authorized:', msg.user_authorized()) @@ -626,12 +672,21 @@ def do_mail_delayed(msg, pw, dr): try: do_mail(msg, pw, dr) except MlDelayActions as e: - global delay_actions - delay_actions.append((e.when, msg, )) + print("ERROR: message delayed for the second time", str(e)) + + +def fetch_tree(tree): + for _ in range(3): + try: + tree.git_fetch(tree.remote) + return + except: + print('WARNING: git fetch failed, retrying') + time.sleep(300) def check_new(tree, pw, dr): - tree.git_fetch(tree.remote) + fetch_tree(tree) hashes = tree.git(['log', "--format=%h", f'..{tree.remote}/{tree.branch}', '--reverse']) hashes = hashes.split() for h in hashes: @@ -645,8 +700,9 @@ def main(): config = configparser.ConfigParser() config.read(['nipa.config', 'pw.config', 'mailbot.config']) + log_dir = config.get('log', 'dir', fallback=NIPA_DIR) log_init(config.get('log', 'type', fallback='org'), - config.get('log', 'file', fallback=os.path.join(NIPA_DIR, "mailbot.org")), + config.get('log', 'file', fallback=os.path.join(log_dir, "mailbot.org")), force_single_thread=True) pw = Patchwork(config) @@ -654,14 +710,20 @@ def main(): signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) - global authorized_users + global http_headers + ua = config.get('patchwork', 'user-agent', fallback='') + if ua: + http_headers = {"user-agent":ua} + users = config.get('mailbot', 'authorized') authorized_users.update(set(users.split(','))) - global auto_changes_requested users = config.get('mailbot', 'error-bots') auto_changes_requested.update(set(users.split(','))) + users = config.get('mailbot', 'awaiting-upstream') + auto_awaiting_upstream.update(set(users.split(','))) + tree_dir = config.get('dirs', 'trees', fallback=os.path.join(NIPA_DIR, "../")) mail_repos = {} for tree in config['mail-repos']: @@ -679,13 +741,12 @@ def main(): doc_load_time = datetime.datetime.fromtimestamp(0) dr = None - global should_stop while not should_stop: req_time = datetime.datetime.now() if (req_time - doc_load_time).total_seconds() > 24 * 60 * 60: global maintainers - maintainers = Maintainers(url='https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/MAINTAINERS') + maintainers = Maintainers(url='https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/MAINTAINERS', config=config) dr = DocRefs() dr.load_section('process/maintainer-netdev', 'net') @@ -696,10 +757,13 @@ def main(): dr.alias_section('submitting-patches', 'submit') dr.alias_section('submitting-patches', 'sub') + doc_load_time = req_time + for t in mail_repos.values(): check_new(t, pw, dr) global delay_actions + delay_actions.sort() while len(delay_actions) and (delay_actions[0][0] - req_time).total_seconds() < 0: msg = delay_actions[0][1] delay_actions = delay_actions[1:] diff --git a/netdev/__init__.py b/netdev/__init__.py index d20d9f1..e081618 100644 --- a/netdev/__init__.py +++ b/netdev/__init__.py @@ -12,3 +12,6 @@ series_tree_name_should_be_local, \ series_is_a_fix_for, \ series_needs_async + +current_tree = 'net' +next_tree = 'net-next' diff --git a/netdev/tree_match.py b/netdev/tree_match.py index 0844530..9046c37 100644 --- a/netdev/tree_match.py +++ b/netdev/tree_match.py @@ -9,8 +9,8 @@ from core import log, log_open_sec, log_end_sec -def series_tree_name_direct(series): - for t in ['net-next', 'net', 'bpf-next', 'bpf']: +def series_tree_name_direct(conf_trees, series): + for t in conf_trees: if re.match(r'\[.*{pfx}.*\]'.format(pfx=t), series.subject): return t @@ -44,28 +44,35 @@ def _tree_name_should_be_local_files(raw_email): 'Documentation/', 'include/', 'rust/', + 'tools/', + 'drivers/phy/', + 'drivers/vhost/', } required_files = { + 'Documentation/devicetree/bindings/net/', + 'Documentation/netlink/', 'Documentation/networking/', 'include/linux/netdevice.h', 'include/linux/skbuff.h', 'include/net/', 'include/phy/', + # lib/ is pretty broad but patch volume is low + 'lib/', 'net/', 'drivers/atm/', + 'drivers/bluetooth/', 'drivers/dpll/', + 'drivers/isdn/', 'drivers/net/', 'drivers/dsa/', 'drivers/nfc/', - 'drivers/phy/', 'drivers/ptp/', 'drivers/net/ethernet/', 'kernel/bpf/', + 'tools/net/', 'tools/testing/selftests/net/', } - excluded_files = { - 'drivers/net/wireless/', - } + excluded_files = set() all_files = acceptable_files.union(required_files) required_found = False foreign_found = False diff --git a/pw/patchwork.py b/pw/patchwork.py index bd01347..c08e07c 100644 --- a/pw/patchwork.py +++ b/pw/patchwork.py @@ -9,6 +9,7 @@ import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry +import time import urllib import core @@ -41,6 +42,10 @@ def __init__(self, config): self._token = config.get('patchwork', 'token', fallback='') self._user = config.get('patchwork', 'user', fallback='') + ua = config.get('patchwork', 'user-agent', fallback='') + if ua: + self._session.headers.update({"user-agent":ua}) + config_project = config.get('patchwork', 'project') pw_project = self.get_project(config_project) if pw_project: @@ -68,6 +73,24 @@ def _request(self, url): def request(self, url): return self._request(url).json() + def request_all(self, url): + items = [] + + while url: + response = self._request(url) + items += response.json() + + if 'Link' not in response.headers: + break + url = '' + links = response.headers['Link'].split(',') + for link in links: + info = link.split(';') + if info[1].strip() == 'rel="next"': + url = info[0][1:-1] + + return items + def get(self, object_type, identifier): return self._get(f'{object_type}/{identifier}/').json() @@ -125,7 +148,10 @@ def _post(self, req, headers, data, api='1.1'): core.log("Headers", headers) core.log("Data", data) core.log("Response", ret) - core.log("Response data", ret.json()) + try: + core.log("Response data", ret.json()) + except json.decoder.JSONDecodeError: + core.log("Response data", ret.content.decode()) finally: core.log_end_sec() @@ -170,10 +196,21 @@ def get_patches_all(self, delegate=None, project=None, since=None, action_requir query['archived'] = 'false' return self.get_all('patches', query) - def get_series_all(self, project=None, since=None): + def get_new_series(self, project=None, since=None): if project is None: project = self._project - return self.get_all('series', {'project': project, 'since': since}) + event_params = { + 'project': project, + 'since': since, + 'order': 'date', + 'category': 'series-completed', + } + events = self.get_all('events', event_params) + if not events: + return [], since + since = events[-1]['date'] + series = [self.get('series', e['payload']['series']['id']) for e in events] + return series, since def post_check(self, patch, name, state, url, desc): headers = {} @@ -189,6 +226,10 @@ def post_check(self, patch, name, state, url, desc): } r = self._post(f'patches/{patch}/checks/', headers=headers, data=data) + if r.status_code == 502 or r.status_code == 504: + # Timeout, let's wait 30 sec and retry, POST isn't retried by the lib. + time.sleep(30) + r = self._post(f'patches/{patch}/checks/', headers=headers, data=data) if r.status_code != 201: raise PatchworkPostException(r) diff --git a/pw/pw_series.py b/pw/pw_series.py index f974245..fdbee78 100644 --- a/pw/pw_series.py +++ b/pw/pw_series.py @@ -72,7 +72,7 @@ def __init__(self, pw, pw_series): for pid in pids: raw_patch = pw.get_mbox('patch', pid) - self.patches.append(Patch(raw_patch, pid)) + self.add_patch(Patch(raw_patch, pid)) if not pw_series['cover_letter']: if len(self.patches) == 1: diff --git a/pw_brancher.py b/pw_brancher.py index f0860a3..0ac022f 100755 --- a/pw_brancher.py +++ b/pw_brancher.py @@ -5,8 +5,11 @@ import datetime import json import os +import psycopg2 +import subprocess import time from typing import List, Tuple +import uuid from core import NIPA_DIR from core import log, log_open_sec, log_end_sec, log_init @@ -30,9 +33,13 @@ [output] branches=branches.json info=branches-info.json +deltas=/path/to/dir/ +[db] +db=db-name """ +psql_conn = None ignore_delegate = {} gate_checks = {} @@ -55,11 +62,11 @@ def pwe_has_all_checks(pw, entry) -> bool: if "checks" not in entry: return False checks = pw.request(entry["checks"]) - found = 0 + found = dict.fromkeys(gate_checks, 0) for c in checks: - if c["context"] in gate_checks and c["state"] == "success": - found += 1 - return found == len(gate_checks) + if c["context"] in gate_checks: + found[c["context"]] = int(c["state"] == "success") + return sum(found.values()) == len(gate_checks) def pwe_series_id_or_none(entry) -> int: @@ -151,24 +158,97 @@ def apply_pending_patches(pw, config, tree) -> Tuple[List, List]: return list(applied_series), list(applied_prs) -def apply_local_patches(config, tree) -> List: - extras = [] - for entry in config.get("local", "patches", fallback="").split(','): - with open(entry, "r") as fp: +def _apply_local_patch(path, tree, dir_path=None) -> bool: + """Apply a single patch file to the tree.""" + log_msg = "Applying: " + path + if dir_path: + log_msg += f" (dir: {dir_path})" + log_open_sec(log_msg) + try: + with open(path, "r") as fp: data = fp.read() - - log_open_sec("Applying: " + entry) p = Patch(data) try: tree.apply(p) - extras.append(entry) + success = True except PatchApplyError: - pass - log_end_sec() + success = False + except Exception as e: + log(f"Error reading or applying patch {path}: {str(e)}") + success = False + log_end_sec() + return success + +def apply_local_patches(config, tree) -> List: + extras = [] + for entry in config.get("local", "patches", fallback="").split(','): + if not entry: + continue + + if os.path.isdir(entry): + # If entry is a directory, apply all .patch files in it + for filename in os.listdir(entry): + if filename.endswith(".patch"): + patch_path = os.path.join(entry, filename) + if _apply_local_patch(patch_path, tree, entry): + extras.append(patch_path) + else: + # Regular file handling + if _apply_local_patch(entry, tree): + extras.append(entry) return extras +def db_insert(config, state, name): + # Branches usually have a trailing separator + pfx = config.get("target", "branch_pfx")[:-1] + pub_url = config.get('target', 'public_url') + row = {"branch": name, + "date": state["branches"][name], + "base": state["hashes"].get(name, None), + "url": pub_url + " " + name} + row |= state["info"][name] + + with psql_conn.cursor() as cur: + cols = "(branch, stream, t_date, base, url, info)" + arg = cur.mogrify("(%s,%s,%s,%s,%s,%s)", + (row["branch"], pfx, row["date"], row["base"], + row["url"], json.dumps(row))) + cur.execute(f"INSERT INTO branches {cols} VALUES " + arg.decode('utf-8')) + + +def generate_deltas(config, tree, name): + outdir = config.get("output", "deltas", fallback=None) + if not outdir: + return + + outfile = os.path.join(outdir, name) + cidiff = os.path.join(os.path.dirname(__file__), "contest", "cidiff") + + with open(outfile, 'w') as fp: + subprocess.run([cidiff, name], cwd=tree.path, stdout=fp, check=True) + + outfile += ".html" + cidiff = os.path.join(os.path.dirname(__file__), "contest", "cidiff.py") + # pub_url is for git, so it most likely ends with ".git" + pub_url = config.get('target', 'public_url')[:-4] + subprocess.run([cidiff, name, '-H', '-o', outfile, '-g', pub_url], + cwd=tree.path, check=False) + + +def get_change_from_last(tree, branch_list) -> bool: + branch_list = list(sorted(branch_list)) + if len(branch_list) < 2: + return True + + try: + tree.git(['diff', '--quiet', branch_list[-1], branch_list[-2]]) + return False + except: + return True + + def create_new(pw, config, state, tree, tgt_remote) -> None: now = datetime.datetime.now(datetime.UTC) pfx = config.get("target", "branch_pfx") @@ -179,32 +259,47 @@ def create_new(pw, config, state, tree, tgt_remote) -> None: tree.git_reset(tree.branch, hard=True) log_end_sec() + state["info"][branch_name] = {"base-pulls":{}} + pull_list = config.get("target", "pull", fallback=None) if pull_list: log_open_sec("Pulling in other trees") for url in pull_list.split(','): try: - tree.pull(url, reset=False) + tree.pull(url, reset=False, ff=False) + state["info"][branch_name]["base-pulls"][url] = "okay" except PullError: - log("PULL FAILED") - pass + try: + tree.pull(url, reset=False, trust_rerere=True) + state["info"][branch_name]["base-pulls"][url] = "resolved" + except PullError: + log("PULL FAILED") + state["info"][branch_name]["base-pulls"][url] = "fail" log_end_sec() state["hashes"][branch_name] = tree.head_hash() series, prs = apply_pending_patches(pw, config, tree) - state["info"][branch_name] = {"series": series, "prs": prs} + state["info"][branch_name] |= {"series": series, "prs": prs} extras = apply_local_patches(config, tree) state["info"][branch_name]["extras"] = extras + state["info"][branch_name]["new-changes"] = get_change_from_last(tree, state["info"].keys()) + state["branches"][branch_name] = now.isoformat() + db_insert(config, state, branch_name) + log_open_sec("Pushing out") tree.git_push(tgt_remote, "HEAD:" + branch_name) log_end_sec() + log_open_sec("Generate deltas") + generate_deltas(config, tree, branch_name) + log_end_sec() + def state_delete_branch(state, br): del state["branches"][br] @@ -228,6 +323,9 @@ def reap_old(config, state, tree, tgt_remote) -> None: br = br.strip() if not br.startswith(r_tgt_pfx + pfx): continue + # In case our prefix is a prefix of another brancher + if len(br) != len(r_tgt_pfx + pfx + "2000-01-01--00-00"): + continue br = br[len(r_tgt_pfx):] found.add(br) if br not in state["branches"]: @@ -304,6 +402,13 @@ def prep_remote(config, tree) -> str: return "brancher" +def open_db(config): + db_name = config.get("db", "db") + conn = psycopg2.connect(database=db_name) + conn.autocommit = True + return conn + + def main() -> None: config = configparser.ConfigParser() config.read(['nipa.config', 'pw.config', 'brancher.config']) @@ -332,6 +437,8 @@ def main() -> None: ignore_delegate = set(config.get('filters', 'ignore_delegate', fallback="").split(',')) global gate_checks gate_checks = set(config.get('filters', 'gate_checks', fallback="").split(',')) + global psql_conn + psql_conn = open_db(config) tree_obj = None tree_dir = config.get('dirs', 'trees', fallback=os.path.join(NIPA_DIR, "../")) diff --git a/pw_contest.py b/pw_contest.py index ec5ffc2..bdd8434 100755 --- a/pw_contest.py +++ b/pw_contest.py @@ -63,22 +63,28 @@ class Codes: } -def result_can_skip(entry, filters): - for ignore in filters["ignore-tests"]: - if entry["group"] == ignore["group"] and entry["test"] == ignore["test"]: - return True +def result_can_skip(results, entry, filters): + for ignore in filters["ignore-results"]: + if ("remote" not in ignore or results["remote"] == ignore["remote"]) and \ + ("executor" not in ignore or results["executor"] == ignore["executor"]) and \ + ("branch" not in ignore or results["branch"] == ignore["branch"]) and \ + ("group" not in ignore or entry["group"] == ignore["group"]) and \ + ("test" not in ignore or entry["test"] == ignore["test"]): + return True + return False -def results_summarize(filters: dict, result_list: list) -> dict: - if not result_list: - return {'result': 'pending', 'code': Codes.PENDING, 'cnt': 0} + +def results_summarize(filters: dict, results: dict) -> dict: + if not results or not results["results"]: + return {'result': 'pending', 'code': Codes.UNKNOWN, 'cnt': 0} cnt = 0 code = 0 - for entry in result_list: + for entry in results["results"]: test_code = str_to_code[entry["result"]] if test_code: - if result_can_skip(entry, filters): + if result_can_skip(results, entry, filters): continue code = max(code, test_code) @@ -86,17 +92,30 @@ def results_summarize(filters: dict, result_list: list) -> dict: return {'result': code_to_str[code], 'code': code, 'cnt': cnt} +def results_summary_combine(a, b): + code = max(a["code"], b["code"]) + return {'result': code_to_str[code], + 'code': code, + 'cnt': a['cnt'] + b['cnt']} + + def results_pivot(filters: dict, results: dict) -> dict: """ results come in as a list, we want to flip them into: - { "branch-name": {"code": ...}, } + { "branch-name": {"remote-name": {"code": ...}, }, } """ flipped = {} for entry in results: if entry['branch'] not in flipped: flipped[entry['branch']] = {} - flipped[entry['branch']][entry['executor']] = \ - results_summarize(filters, entry["results"]) + if entry['remote'] not in flipped[entry['branch']]: + flipped[entry['branch']][entry['remote']] = \ + results_summarize({}, {}) + + old = flipped[entry['branch']][entry['remote']] + new = results_summarize(filters, entry) + flipped[entry['branch']][entry['remote']] = \ + results_summary_combine(old, new) return flipped @@ -105,13 +124,20 @@ def branch_summarize(filters: dict, results_by_branch: dict) -> dict: for name, branch in results_by_branch.items(): code = 0 test_cnt = 0 - for executor in filters["executors"]: - if executor in branch: - code = max(code, branch[executor]['code']) - test_cnt += branch[executor]["cnt"] - else: - code = Codes.PENDING + pending = 0 + for remote in filters["remotes"]: + new_code = Codes.PENDING + if remote in branch: + # Stick to Pending for all unreal results + if branch[remote]['code'] not in unreal_results: + new_code = branch[remote]['code'] + test_cnt += branch[remote]["cnt"] + code = max(code, new_code) + if new_code == Codes.PENDING: + pending += 1 summary[name] = {'result': code_to_str[code], 'code': code, 'cnt': test_cnt} + if pending: + summary[name]['pending'] = pending return summary @@ -132,7 +158,11 @@ def result_upgrades(states: dict, item_id: str, outcome: dict, branch: str): if prev['code'] > outcome['code']: return True if prev['code'] == outcome['code']: - return prev['cnt'] < outcome['cnt'] + # use the run with most reported results, but wait for it to finish + # otherwise first contest for a patch updates the check every time + # a remote finishes and bumps the test case count (~12 updates) + if prev['cnt'] < outcome['cnt']: + return not outcome.get('pending') return False @@ -265,8 +295,9 @@ def parse_configs(): def main() -> None: config = parse_configs() + log_dir = config.get('log', 'dir', fallback=NIPA_DIR) log_init(config.get('log', 'type', fallback='org'), - config.get('log', 'file', fallback=os.path.join(NIPA_DIR, "contest.org")), + config.get('log', 'file', fallback=os.path.join(log_dir, "contest.org")), force_single_thread=True) pw = Patchwork(config) diff --git a/pw_poller.py b/pw_poller.py index 293fe1a..6226b1a 100755 --- a/pw_poller.py +++ b/pw_poller.py @@ -8,20 +8,21 @@ import datetime import json import os -import threading import shutil +import socket import time import queue from typing import Dict +from importlib import import_module from core import NIPA_DIR +from core import NipaLifetime from core import log, log_open_sec, log_end_sec, log_init from core import Tester from core import Tree from pw import Patchwork from pw import PwSeries import core -import netdev class IncompleteSeries(Exception): @@ -29,13 +30,7 @@ class IncompleteSeries(Exception): class PwPoller: - def __init__(self) -> None: - config = configparser.ConfigParser() - config.read(['nipa.config', 'pw.config', 'poller.config']) - - log_init(config.get('log', 'type', fallback='org'), - config.get('log', 'file', fallback=os.path.join(NIPA_DIR, "poller.org"))) - + def __init__(self, config) -> None: self._worker_id = 0 self._async_workers = [] @@ -59,28 +54,37 @@ def __init__(self) -> None: shutil.rmtree(self.worker_dir) os.makedirs(self.worker_dir) - self._barrier = threading.Barrier(len(self._trees) + 1) self._done_queue = queue.Queue() - self._workers = {} + self._workers = [] + self._work_queues = {} for k, tree in self._trees.items(): - self._workers[k] = Tester(self.result_dir, tree, queue.Queue(), self._done_queue, - self._barrier) - self._workers[k].start() - log(f"Started worker {self._workers[k].name} for {k}") + self._work_queues[k] = queue.Queue() + + worker_cnt = config.getint('workers', tree.name, fallback=1) + for worker_id in range(worker_cnt): + worker = Tester(self.result_dir, tree.work_tree(worker_id), + self._work_queues[k], self._done_queue) + worker.start() + log(f"Started worker {worker.name} for {k}") + self._workers.append(worker) self._pw = Patchwork(config) self._state = { - 'last_poll': (datetime.datetime.now() - datetime.timedelta(hours=2)).timestamp(), - 'done_series': [], + 'last_event_ts': (datetime.datetime.now() - + datetime.timedelta(hours=2)).strftime('%Y-%m-%dT%H:%M:%S'), } self.init_state_from_disk() - self.seen_series = set(self._state['done_series']) - self.done_series = self.seen_series.copy() self._recheck_period = config.getint('poller', 'recheck_period', fallback=3) self._recheck_lookback = config.getint('poller', 'recheck_lookback', fallback=9) + listmodname = config.get('list', 'module', fallback='netdev') + self.list_module = import_module(listmodname) + + self._local_sock = None + self._start_lock_sock(config) + def init_state_from_disk(self) -> None: try: with open('poller.state', 'r') as f: @@ -92,15 +96,15 @@ def init_state_from_disk(self) -> None: pass def _series_determine_tree(self, s: PwSeries) -> str: - s.tree_name = netdev.series_tree_name_direct(s) + s.tree_name = self.list_module.series_tree_name_direct(self._trees.keys(), s) s.tree_mark_expected = True s.tree_marked = bool(s.tree_name) if s.is_pure_pull(): if s.title.find('-next') >= 0: - s.tree_name = 'net-next' + s.tree_name = self.list_module.next_tree else: - s.tree_name = 'net' + s.tree_name = self.list_module.current_tree s.tree_mark_expected = None return f"Pull request for {s.tree_name}" @@ -108,12 +112,12 @@ def _series_determine_tree(self, s: PwSeries) -> str: log(f'Series is clearly designated for: {s.tree_name}', "") return f"Clearly marked for {s.tree_name}" - s.tree_mark_expected, should_test = netdev.series_tree_name_should_be_local(s) + s.tree_mark_expected, should_test = self.list_module.series_tree_name_should_be_local(s) if not should_test: log("No tree designation found or guessed", "") return "Not a local patch" - if netdev.series_ignore_missing_tree_name(s): + if self.list_module.series_ignore_missing_tree_name(s): s.tree_mark_expected = None log('Okay to ignore lack of tree in subject, ignoring series', "") return "Series ignored based on subject" @@ -123,11 +127,12 @@ def _series_determine_tree(self, s: PwSeries) -> str: else: log_open_sec('Series okay without a tree designation') - # TODO: make this configurable - if "net" in self._trees and netdev.series_is_a_fix_for(s, self._trees["net"]): - s.tree_name = "net" - elif "net-next" in self._trees and self._trees["net-next"].check_applies(s): - s.tree_name = "net-next" + if self.list_module.current_tree in self._trees and \ + self.list_module.series_is_a_fix_for(s, self._trees[self.list_module.current_tree]): + s.tree_name = self.list_module.current_tree + elif self.list_module.next_tree in self._trees and \ + self._trees[self.list_module.next_tree].check_applies(s): + s.tree_name = self.list_module.next_tree if s.tree_name: log(f"Target tree - {s.tree_name}", "") @@ -148,11 +153,7 @@ def series_determine_tree(self, s: PwSeries) -> str: return ret - def _process_series(self, pw_series) -> None: - if pw_series['id'] in self.seen_series: - log(f"Already seen {pw_series['id']}", "") - return - + def _process_series(self, pw_series, force_tree=None) -> None: s = PwSeries(self._pw, pw_series) log("Series info", @@ -163,119 +164,173 @@ def _process_series(self, pw_series) -> None: log(p['name'], "") log_end_sec() - if not s['received_all']: - raise IncompleteSeries + if force_tree: + comment = f"Force tree {force_tree}" + s.tree_name = force_tree + s.tree_mark_expected = None + s.tree_marked = True + else: + comment = self.series_determine_tree(s) + if not s['received_all']: + raise IncompleteSeries - comment = self.series_determine_tree(s) - s.need_async = netdev.series_needs_async(s) + s.need_async = self.list_module.series_needs_async(s) if s.need_async: comment += ', async' if hasattr(s, 'tree_name') and s.tree_name: s.tree_selection_comment = comment - self._workers[s.tree_name].queue.put(s) + if not s.tree_name in self._work_queues: + log(f"skip {pw_series['id']} for unknown tree {s.tree_name}", "") + return + self._work_queues[s.tree_name].put(s) else: core.write_tree_selection_result(self.result_dir, s, comment) core.mark_done(self.result_dir, s) - self.seen_series.add(s['id']) - - def process_series(self, pw_series) -> None: + def process_series(self, pw_series, force_tree=None) -> None: log_open_sec(f"Checking series {pw_series['id']} with {pw_series['total']} patches") try: - self._process_series(pw_series) + self._process_series(pw_series, force_tree) finally: log_end_sec() - def run(self) -> None: - partial_series = {} + def _start_lock_sock(self, config) -> None: + socket_path = config.get('poller', 'local_sock_path', fallback=None) + if not socket_path: + return - prev_big_scan = datetime.datetime.fromtimestamp(self._state['last_poll']) - prev_req_time = datetime.datetime.now() + if os.path.exists(socket_path): + os.unlink(socket_path) - # We poll every 2 minutes, for series from last 10 minutes - # Every 3 hours we do a larger check of series of last 12 hours to make sure we didn't miss anything - # apparently patchwork uses the time from the email headers and people back date their emails, a lot - # We keep a history of the series we've seen in and since the last big poll to not process twice + self._local_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self._local_sock.setblocking(False) + self._local_sock.bind(socket_path) + self._local_sock.listen(5) + + log(f"Socket listener started on {socket_path}", "") + + def _check_local_sock(self) -> None: + if not self._local_sock: + return + + try: + conn, _ = self._local_sock.accept() + except BlockingIOError: + return + + log_open_sec("Processing local socket connection") try: + data = b"" while True: - this_poll_seen = set() - req_time = datetime.datetime.now() + chunk = conn.recv(4096) + data += chunk + if len(chunk) < 4096: + break - # Decide if this is a normal 4 minute history poll or big scan of last 12 hours - if prev_big_scan + datetime.timedelta(hours=self._recheck_period) < req_time: - big_scan = True - since = prev_big_scan - datetime.timedelta(hours=self._recheck_lookback) - log_open_sec(f"Big scan of last 12 hours at {req_time} since {since}") - else: - big_scan = False - since = prev_req_time - datetime.timedelta(minutes=10) - log_open_sec(f"Checking at {req_time} since {since}") - - json_resp = self._pw.get_series_all(since=since) + if data: + data = data.decode("utf-8") + series_ids = [] + items = data.split(";") + for item in items: + item = item.strip() + if not item: + continue + + # We accept "series [tree]; series [tree]; ..." + parts = item.rsplit(" ", 1) + if len(parts) == 2: + tree = parts[1].strip() + else: + tree = None + try: + s_id = int(parts[0].strip()) + series_ids.append((tree, s_id)) + log("Processing", series_ids[-1]) + except ValueError: + log("Invalid number in tuple", item) + continue + + for tree, series_id in series_ids: + try: + pw_series = self._pw.get("series", series_id) + self.process_series(pw_series, force_tree=tree) + conn.sendall(f"OK: {series_id}\n".encode("utf-8")) + except Exception as e: + log("Error processing series", str(e)) + conn.sendall(f"ERROR: {series_id}: {e}\n".encode("utf-8")) + else: + conn.sendall(b"DONE\n") + except Exception as e: + log("Error processing socket request", str(e)) + finally: + conn.close() + log_end_sec() + + def run(self, life) -> None: + since = self._state['last_event_ts'] + + try: + # We poll every 2 minutes after this + secs = 0 + while life.next_poll(secs): + req_time = datetime.datetime.now() + log_open_sec(f"Querying patchwork at {req_time} since {since}") + json_resp, since = self._pw.get_new_series(since=since) log(f"Loaded {len(json_resp)} series", "") - had_partial_series = False + # Advance the time by 1 usec, pw does >= for time comparison + since = datetime.datetime.fromisoformat(since) + since += datetime.timedelta(microseconds=1) + since = since.isoformat() + for pw_series in json_resp: try: self.process_series(pw_series) - this_poll_seen.add(pw_series['id']) except IncompleteSeries: - partial_series.setdefault(pw_series['id'], 0) - if partial_series[pw_series['id']] < 5: - had_partial_series = True - partial_series[pw_series['id']] += 1 - - if big_scan: - prev_req_time = req_time - prev_big_scan = req_time - # Shorten the history of series we've seen to just the last 12 hours - self.seen_series = this_poll_seen - self.done_series &= self.seen_series - elif had_partial_series: - log("Partial series, not moving time forward", "") - else: - prev_req_time = req_time - - # Unleash all workers - log("Activate workers", "") - self._barrier.wait() - # Wait for workers to come back - log("Wait for workers", "") - self._barrier.wait() + # didn't make it to the list fully, patchwork + # shouldn't have had this event at all though + pass + + self._check_local_sock() while not self._done_queue.empty(): s = self._done_queue.get() - self.done_series.add(s['id']) log(f"Testing complete for series {s['id']}", "") secs = 120 - (datetime.datetime.now() - req_time).total_seconds() if secs > 0: log("Sleep", secs) - time.sleep(secs) log_end_sec() - if os.path.exists('poller.quit'): - os.remove('poller.quit') - break + except KeyboardInterrupt: + pass # finally will still run, but don't splat finally: + # Dump state before trying to stop workers, in case they hang + self._state['last_event_ts'] = since + with open('poller.state', 'w') as f: + json.dump(self._state, f) + log_open_sec(f"Stopping threads") - self._barrier.abort() - for _, worker in self._workers.items(): + for worker in self._workers: worker.should_die = True worker.queue.put(None) - for _, worker in self._workers.items(): + for worker in self._workers: log(f"Waiting for worker {worker.tree.name} / {worker.name}") worker.join() log_end_sec() - self._state['last_poll'] = prev_big_scan.timestamp() - self._state['done_series'] = list(self.seen_series) - # Dump state - with open('poller.state', 'w') as f: - json.dump(self._state, f) - if __name__ == "__main__": os.umask(0o002) - poller = PwPoller() - poller.run() + + config = configparser.ConfigParser() + config.read(['nipa.config', 'pw.config', 'poller.config']) + + log_dir = config.get('log', 'dir', fallback=NIPA_DIR) + log_init(config.get('log', 'type', fallback='org'), + config.get('log', 'file', fallback=os.path.join(log_dir, "poller.org"))) + + life = NipaLifetime(config) + poller = PwPoller(config) + poller.run(life) + life.exit() diff --git a/pw_upload.py b/pw_upload.py index ff356ba..6b7c508 100755 --- a/pw_upload.py +++ b/pw_upload.py @@ -166,8 +166,6 @@ def initial_scan(self): break def watch(self): - global should_stop - if self.main_wd is None: raise Exception('Not initialized') @@ -194,8 +192,9 @@ def main(): config = configparser.ConfigParser() config.read(['nipa.config', 'pw.config', 'upload.config']) + log_dir = config.get('log', 'dir', fallback=NIPA_DIR) log_init(config.get('log', 'type', fallback='org'), - config.get('log', 'file', fallback=os.path.join(NIPA_DIR, "upload.org")), + config.get('log', 'file', fallback=os.path.join(log_dir, "upload.org")), force_single_thread=True) results_dir = config.get('results', 'dir', fallback=os.path.join(NIPA_DIR, "results")) diff --git a/scripts/ui.sh b/scripts/ui.sh new file mode 100755 index 0000000..aabef77 --- /dev/null +++ b/scripts/ui.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# +# This script opens your local checkout of the UI in your browser. +# +# Note you need to have run `scripts/ui_assets.sh download` at least +# once prior to running this script. +# +# Usage example: +# +# ./scripts/ui.sh + +set -eu + +# Change dir to project root +cd "$(git rev-parse --show-toplevel)" + +# Quick sanity check +if [[ ! -d ./ui/static ]]; then + echo >&2 "Error: you haven't run scripts/ui_assets.sh yet" + exit 1 +fi + +# Need to run a local webserver to avoid CORS violations +python -m http.server -d ./ui -b localhost 8080 &> /dev/null & +pid=$! +trap 'kill ${pid}' EXIT + +# Best effort in case someone is on OSX or SSH forwarding +if ! xdg-open http://localhost:8080/status.html &> /dev/null; then + echo "UI is available at http://localhost:8080/status.html" +fi + +echo "Press Enter to stop serving and exit..." +read -r -p "" diff --git a/scripts/ui_assets.sh b/scripts/ui_assets.sh new file mode 100755 index 0000000..28f0980 --- /dev/null +++ b/scripts/ui_assets.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# +# This script manages all the assets a UI running locally +# on your computer would need. +# +# Note we are downloading real assets from a production instance. +# +# Usage examples: +# +# ./scripts/ui_assets.sh download +# ./scripts/ui_assets.sh clean + +set -eu + +PROD=https://netdev.bots.linux.dev +LOCAL=./ui +ASSETS=( + "static/nipa/checks.json" + "static/nipa/systemd.json" + "static/nipa/branch-results.json" + "static/nipa/branches-info.json" + "contest/filters.json" + "contest/all-results.json" +) + +function usage() { + echo "Usage: ${0} download|clean" +} + +function download() { + mkdir -p "${LOCAL}/static/nipa" + mkdir -p "${LOCAL}/contest" + for asset in "${ASSETS[@]}"; do + curl "${PROD}/${asset}" -o "${LOCAL}/${asset}" + done +} + +function clean() { + for asset in "${ASSETS[@]}"; do + rm -f "${LOCAL}/${asset}" + done + rm -r "${LOCAL}/static" + rm -r "${LOCAL}/contest" +} + +# Change dir to project root +cd "$(git rev-parse --show-toplevel)" + +if [[ $# -ne 1 ]]; then + usage + exit 1 +fi + +case $1 in + download) + download + ;; + clean) + clean + ;; + *) + echo >&2 "Error: Unrecognized subcommand $1" + usage + exit 1 + ;; +esac diff --git a/status.js b/status.js deleted file mode 100644 index eacc377..0000000 --- a/status.js +++ /dev/null @@ -1,585 +0,0 @@ -function load_times(data, canva_id, patch_time) -{ - const minute = 1000 * 60; - const hour = minute * 60; - const day = hour * 24; - const year = day * 365; - - var entries = []; - var prev_min = 0; - var prev_val = 0; - - var offset = new Date().getTimezoneOffset() * minute; - var now = Date.now() + offset; - - $.each(data, function(i, v) { - if (v["check-date"] == null) - return true; - - var p_date = new Date(v["date"]); - var c_date = new Date(v["check-date"]); - - if (patch_time) { - minutes_back = v.minutes_back; - } else { - minutes_back = Math.round((now - c_date) / minute); - } - if (minutes_back / (64 * 24) > 7) - return true; - - value = ((c_date - p_date) / hour).toFixed(2); - - if (Math.abs(prev_min - minutes_back) > 2 || - Math.abs(prev_val - value) > 0.02) { - entries.push({"l": (minutes_back / 60).toFixed(2), "v": Math.max(value, 0)}); - - prev_min = minutes_back; - prev_val = value; - } - }); - - // Sort by labels - entries.sort(function(a, b){return a.l - b.l;}); - - const ctx = document.getElementById(canva_id); - - new Chart(ctx, { - type: 'line', - data: { - labels: entries.map(function(e){return e.l;}), - datasets: [{ - tension: 0.1, - label: 'Patch age at check delivery', - data: entries.map(function(e){return e.v;}) - }] - }, - options: { - scales: { - y: { - type: 'linear', - ticks: { - stepSize: 3 - }, - beginAtZero: true - }, - x: { - type: 'linear', - ticks: { - stepSize: 24 - }, - reverse: true - } - } - } - }); -} - -function run_it(data_raw) -{ - const minute = 1000 * 60; - const hour = minute * 60; - const day = hour * 24; - const year = day * 365; - - var offset = new Date().getTimezoneOffset() * minute; - var now = Date.now() + offset; - - var latest = new Date(data_raw[0].date); - var data = []; - $.each(data_raw, function(i, v) { - var date = new Date(v.date); - if (latest < date) - latest = date; - - if (v.check != "build_clang") - return true; - - v.days_back = Math.round((now - date) / day) + 1; - v.minutes_back = Math.round((now - date) / minute) + 1; - - data.push(v); - }); - - load_times(data, 'process-time', false); - load_times(data, 'process-time-p', true); -} - -function colorify_str_any(value, color_map) -{ - if (!(value in color_map)) - return value; - return '' + value + ''; -} - -function colorify_str(value, good) -{ - if (value == good) { - ret = ''; - } else { - ret = ''; - } - return ret + value + ''; -} - -function systemd_add_one(table, system, sname, v) -{ - var row = table.insertRow(); - var name = row.insertCell(0); - var ss = row.insertCell(1); - var tasks = row.insertCell(2); - var cpu = row.insertCell(3); - var mem = row.insertCell(4); - - let sstate = ""; - let now = system["time-mono"]; - - if (v.TriggeredBy == 0) { - cpuSec = v.CPUUsageNSec / 1000; - cpuHours = cpuSec / (now - v.ExecMainStartTimestampMonotonic); - cpuHours = cpuHours.toFixed(2); - - memGb = (v.MemoryCurrent / (1024 * 1024 * 1024)).toFixed(2); - memGb = memGb + 'GB'; - - state = v.ActiveState + " / " + v.SubState; - sstate = colorify_str(state, "active / running"); - - taskcnt = v.TasksCurrent; - } else { - cpuSec = v.CPUUsageNSec / 1000; - cpuHours = cpuSec / (v.ExecMainExitTimestampMonotonic - - v.ExecMainStartTimestampMonotonic); - cpuHours = cpuHours.toFixed(2); - - sstate = colorify_str(v.Result, "success"); - - taskcnt = ''; - memGb = ''; - } - - name.innerHTML = sname; - ss.innerHTML = sstate; - ss.setAttribute("style", "text-align: center"); - tasks.innerHTML = taskcnt; - tasks.setAttribute("style", "text-align: right"); - cpu.innerHTML = cpuHours; - cpu.setAttribute("style", "text-align: right"); - mem.innerHTML = memGb; - mem.setAttribute("style", "text-align: right"); -} - -function systemd(data_raw, data_local, data_remote) -{ - var table = document.getElementById("systemd"); - - $.each(data_local, function(i, v) { - systemd_add_one(table, data_raw, i, v); - }); - - $.each(data_remote, function(name, remote) { - $.each(remote["services"], function(service, v) { - systemd_add_one(table, remote, name + "/" + service, v); - }); - }); -} - -function load_runners(data_raw) -{ - var table = document.getElementById("runners"); - - $.each(data_raw, function(i, v) { - var row = table.insertRow(); - var name = row.insertCell(0); - var qlen = row.insertCell(1); - var pid = row.insertCell(2); - var patch = row.insertCell(3); - var test = row.insertCell(4); - - name.innerHTML = i; - pid.innerHTML = v.progress; - patch.innerHTML = v.patch; - test.innerHTML = v.test; - qlen.innerHTML = v.backlog; - }); -} - -function load_runtime(data_raw) -{ - var entries = []; - - $.each(data_raw["data"], function(i, v) { - entries.push({"l": i, "v": v}); - }); - - entries.sort(function(a, b){return b.v.pct - a.v.pct;}); - - const ctx = document.getElementById("run-time"); - - new Chart(ctx, { - type: 'bar', - data: { - labels: entries.map(function(e){return e.l;}), - datasets: [{ - yAxisID: 'A', - label: 'Percent of total runtime', - borderRadius: 5, - data: entries.map(function(e){return e.v.pct;}), - }, { - yAxisID: 'B', - label: 'Avgerage runtime in sec', - borderWidth: 1, - borderRadius: 5, - data: entries.map(function(e){return e.v.avg;}) - }] - }, - options: { - responsive: true, - plugins: { - legend: { - position: 'bottom', - }, - title: { - display: true, - text: 'Check runtime' - } - }, - scales: { - A: { - display: true, - beginAtZero: true - }, - B: { - position: 'right', - display: true, - beginAtZero: true - } - }, - }, - }); -} - -function status_system(data_raw) -{ - systemd(data_raw, data_raw["services"], data_raw["remote"]); - load_runners(data_raw["runners"]); - load_runtime(data_raw["log-files"]); -} - -function msec_to_str(msec) { - const convs = [ - [1, "ms"], - [1000, "s"], - [60, "m"], - [60, "h"], - [24, "d"], - [7, "w"] - ]; - - if (msec <= 0) - return msec.toString(); - - for (i = 0; i < convs.length; i++) { - if (msec < convs[i][0]) { - var full = Math.floor(msec) + convs[i - 1][1]; - if (i > 1) { - var frac = Math.round(msec * convs[i - 1][0] % convs[i - 1][0]); - if (frac) - full += " " + frac + convs[i - 2][1]; - } - return full; - } - msec /= convs[i][0]; - } - - return "TLE"; -} - -function colorify_str_psf(str_psf, name, value, color) -{ - var bspan = ''; - var cspan = ''; - - if (value && str_psf.overall == "") - str_psf.overall = cspan + name + ''; - - if (str_psf.str != "") { - str_psf.str = " / " + str_psf.str; - } - - var p; - if (value == 0) { - p = value; - } else { - p = bspan + value + ''; - } - str_psf.str = p + str_psf.str; -} - -function avg_time_e(avgs, v) -{ - const ent_name = v.remote + '/' + v.executor; - - if (!(ent_name in avgs)) - return 0; - return avgs[ent_name]["min-dly"] + - avgs[ent_name]["sum"] / avgs[ent_name]["cnt"]; -} - -function pw_filted_r(v, r) -{ - if (!reported_execs.has(v.executor)) - return false; - - for (const test of filtered_tests) { - if (r.group == test.group && r.test == test.test) - return false; - } - return true; -} - - -function load_result_table_one(data_raw, table, reported, avgs) -{ - $.each(data_raw, function(i, v) { - if (!reported_execs.has(v.executor) && reported) - return 1; - - var pass = 0, skip = 0, warn = 0, fail = 0, total = 0, ignored = 0; - var link = v.link; - $.each(v.results, function(i, r) { - if (pw_filted_r(v, r) != reported) { - ignored++; - return 1; - } - - if (r.result == "pass") { - pass++; - } else if (r.result == "skip") { - skip++; - } else if (r.result == "warn") { - warn++; - } else if (r.result == "fail") { - fail++; - } - - total++; - if (!link) - link = r.link; - }); - - if (reported_execs.has(v.executor) && !reported && !total) - return 1; - - var str_psf = {"str": "", "overall": ""}; - - colorify_str_psf(str_psf, "fail", fail, "red"); - colorify_str_psf(str_psf, "warn", warn, "orange"); - colorify_str_psf(str_psf, "skip", skip, "blue"); - colorify_str_psf(str_psf, "pass", pass, "green"); - - const span_small = " ("; - if (ignored) { - if (reported) - str_psf.overall += span_small + "ignored: " + ignored + ")"; - else - str_psf.overall += span_small + "reported: " + ignored + ")"; - } - - var row = table.insertRow(); - - var branch = row.insertCell(0); - var remote = row.insertCell(1); - - var t_start = new Date(v.start); - var t_end = new Date(v.end); - var a = ""; - - if (v.remote != "brancher") { - var time = row.insertCell(2); - - if (link) - remote.innerHTML = a + v.remote + ""; - else - remote.innerHTML = v.remote; - if (total) { - var cnt = row.insertCell(3); - var res = row.insertCell(4); - - var link_to_contest = ""; - - cnt.innerHTML = link_to_contest + str_psf.str + ""; - res.innerHTML = str_psf.overall; - time.innerHTML = msec_to_str(t_end - t_start); - } else { - var pend; - - const passed = Date.now() - v.start; - const expect = Math.round(avg_time_e(avgs, v)); - var remain = expect - passed; - var color = "pink"; - - if (remain > 0) { - pend = "pending (expected in " + (msec_to_str(remain)).toString() + ")"; - color = "blue"; - } else if (remain < -1000 * 60 * 60 * 2) { /* 2 h */ - pend = "timeout"; - } else { - pend = "pending (expected " + (msec_to_str(-remain)).toString() + " ago)"; - } - time.innerHTML = "" + pend + ""; - time.setAttribute("colspan", "3"); - } - } else { - let res = row.insertCell(2); - let br_res; - - remote.innerHTML = v.start.toLocaleString(); - remote.setAttribute("colspan", "2"); - branch.innerHTML = a + v.branch + ""; - branch.setAttribute("colspan", "2"); - br_res = ''; - br_res += colorify_str_any(branch_results[v.branch], - {"fail": "red", - "pass": "green", - "pending": "blue"}); - br_res += ''; - res.innerHTML = br_res; - } - }); -} - -function load_result_table(data_raw) -{ - var table = document.getElementById("contest"); - var table_nr = document.getElementById("contest-purgatory"); - - var branch_start = {}; - - $.each(data_raw, function(i, v) { - v.start = new Date(v.start); - v.end = new Date(v.end); - - if (v.remote == "brancher") - branch_start[v.branch] = v.start; - }); - - data_raw.sort(function(a, b){return b.end - a.end;}); - data_raw = data_raw.slice(0, 200); - - var avgs = {}; - $.each(data_raw, function(i, v) { - if (!v.results) - return 1; - - const ent_name = v.remote + '/' + v.executor; - - if (!(ent_name in avgs)) - avgs[ent_name] = {"cnt": 0, "sum": 0, "min-dly": 0}; - avgs[ent_name]["cnt"] += 1; - avgs[ent_name]["sum"] += (v.end - v.start); - - if (v.branch in branch_start) { - const dly = v.start - branch_start[v.branch]; - const old = avgs[ent_name]["min-dly"]; - - if (!old || old > dly) - avgs[ent_name]["min-dly"] = dly; - } - }); - - data_raw.sort(function(a, b){return avg_time_e(avgs, b) - avg_time_e(avgs, a);}); - data_raw.sort(function(a, b){return b.end - a.end;}); - data_raw.sort(function(a, b){return b.branch > a.branch;}); - - data_raw = data_raw.slice(0, 75); - - reported_execs.add("brancher"); - load_result_table_one(data_raw, table, true, avgs); - reported_execs.delete("brancher"); - load_result_table_one(data_raw, table_nr, false, avgs); -} - -let xfr_todo = 3; -let all_results = null; -let reported_execs = new Set(); -let filtered_tests = new Array(); -let branch_results = {}; - -function loaded_one() -{ - if (!--xfr_todo) - load_result_table(all_results); -} - -function results_loaded(data_raw) -{ - all_results = data_raw; - loaded_one(); -} - -function branch_res_doit(data_raw) -{ - $.each(data_raw, function(i, v) { - branch_results[i] = v.result; - }); - - loaded_one(); -} - -function filters_doit(data_raw) -{ - let cf_crashes = document.getElementById("cf-crashes"); - let cf_execs = document.getElementById("cf-execs"); - let cf_tests = document.getElementById("cf-tests"); - var output, sep = ""; - var execs = "Executors reported "; - - output = "Executors reported: "; - $.each(data_raw.executors, function(i, v) { - reported_execs.add(v); - output += sep + v; - sep = ", "; - }); - cf_execs.innerHTML = output; - - output = "Test ignored:
"; - $.each(data_raw["ignore-tests"], function(i, v) { - output += v.group + '/' + v.test + "
"; - filtered_tests.push(v); - }); - cf_tests.innerHTML = output; - - output = "Crashes ignored:
"; - $.each(data_raw["ignore-crashes"], function(i, v) { - output += v + "
"; - }); - cf_crashes.innerHTML = output; - - loaded_one(); -} - -function do_it() -{ - $(document).ready(function() { - $.get("static/nipa/checks.json", run_it) - }); - $(document).ready(function() { - $.get("static/nipa/systemd.json", status_system) - }); - $(document).ready(function() { - $.get("contest/filters.json", filters_doit) - }); - $(document).ready(function() { - $.get("static/nipa/branch-results.json", branch_res_doit) - }); - $(document).ready(function() { - $.get("contest/all-results.json", results_loaded) - }); -} diff --git a/system-status.py b/system-status.py index aebe12d..94a49b8 100755 --- a/system-status.py +++ b/system-status.py @@ -4,6 +4,7 @@ import datetime import lzma import os +import psycopg2 import re import requests import sys @@ -38,24 +39,38 @@ def add_one_service(result, name): result['time-mono'] = time.monotonic_ns() // 1000 +def add_disk_size(result, path): + output = subprocess.check_output(f"df {path} --output=avail,size".split()).decode('utf-8') + sizes = output.split('\n')[1].split() + sizes = [int(s) for s in sizes] + result["disk-use"] = round(sizes[0] / sizes[1] * 100, 2) + + def pre_strip(line, needle): return line[line.find(needle) + len(needle):].strip() def add_one_tree(result, pfx, name): - global char_filter + log_file = os.path.join(pfx, name) + stat = os.stat(log_file) - with open(os.path.join(pfx, name), 'r') as fp: + with open(log_file, 'r') as fp: lines = fp.readlines() last = None test = '' + test_prog = '' blog = '' progress = '' for line in lines: if 'Testing patch' in line: patch = pre_strip(line, 'Testing patch') - progress = patch[:patch.find('|')] - patch = patch[patch.find('|') + 2:] + + test_sep = patch.find('|') + patch_sep = patch.find('|', test_sep + 1) + + test_prog = patch[:test_sep] + progress = patch[test_sep + 1:patch_sep] + patch = patch[patch_sep + 2:] last = re.sub(char_filter, "", patch) test = '' elif '* Testing pull request' in line: @@ -69,12 +84,18 @@ def add_one_tree(result, pfx, name): test = pre_strip(line, 'Running test') elif 'Tester commencing ' in line: blog = line[35:].strip() - if 'Checking barrier' in line: + if 'Tester done processing' in line: last = None progress = '' test = '' + test_prog = '' blog = '' - result['runners'][name] = {"patch": last, "progress": progress, "test": test, "backlog": blog} + result['runners'][name] = {"patch": last, + "progress": progress, + "test": test, + "test-progress": test_prog, + "backlog": blog, + "mtime": stat.st_mtime} def add_one_runtime(fname, total, res): @@ -151,28 +172,105 @@ def add_remote_services(result, remote): result["remote"][remote["name"]] = data +def get_metric_values(db_connection, source, category, name, limit=120): + """ Query metrics from the DB """ + with db_connection.cursor() as cur: + cur.execute(""" + SELECT ts, value + FROM metrics + WHERE source = %s AND category = %s AND name = %s + ORDER BY ts DESC + LIMIT %s + """, (source, category, name, limit)) + return cur.fetchall() + + +def add_db(result, cfg): + db_name = cfg["db"]["name"] + + psql = psycopg2.connect(database=db_name) + psql.autocommit = True + + with psql.cursor() as cur: + cur.execute(f"SELECT pg_database_size('{db_name}')") + size = cur.fetchall()[0][0] + print("DB size", size) + + remote_disk = 0 + for _, remote in result["remote"].items(): + remote_disk = remote["disk-use"] + + # Insert metrics data + metrics_data = [ + ("system", "db", "size", size), + ("system", "disk", "util", result["disk-use"]), + ("system-metal", "disk", "util", remote_disk) + ] + + for source, category, name, value in metrics_data: + cur.execute(f"INSERT INTO metrics(ts, source, category, name, value) VALUES(NOW(), '{source}', '{category}', '{name}', %s)", (value,)) + + # Retrieve display data - query each metric individually + size_data = get_metric_values(psql, "system", "db", "size", limit=40) + disk_data = get_metric_values(psql, "system", "disk", "util", limit=40) + disk_remote_data = get_metric_values(psql, "system-metal", "disk", "util", limit=40) + + # Since they're inserted with the same timestamp, we can just zip them together + result["db"]["data"] = [ + { + 'ts': ts.isoformat(), + 'size': size, + 'disk': disk, + 'disk_remote': disk_remote + } + for (ts, size), (_, disk), (_, disk_remote) in zip(size_data, disk_data, disk_remote_data) + ] + # Reverse to get chronological order (oldest first) + result["db"]["data"].reverse() + + psql.close() + + def main(): with open(sys.argv[1], 'r') as fp: cfg = json.load(fp) log_files = {} run_logs = 'log-files' in cfg + + db = {} + run_db = 'db' in cfg + if os.path.isfile(sys.argv[2]): with open(sys.argv[2], 'r') as fp: prev = json.load(fp) + if "log-files" in prev and "prev-date" in prev["log-files"]: prev_date = datetime.datetime.fromisoformat(prev["log-files"]["prev-date"]) run_logs = datetime.datetime.now() - prev_date > datetime.timedelta(hours=3) print("Since log scan", datetime.datetime.now() - prev_date, "Will rescan:", run_logs) prev_date = prev["log-files"]["prev-date"] log_files = {"prev-date": prev_date, "data": prev["log-files"]["data"]} + + if "db" in prev and "prev-date" in prev["db"]: + prev_date = datetime.datetime.fromisoformat(prev["db"]["prev-date"]) + run_db = datetime.datetime.now() - prev_date > datetime.timedelta(hours=24) + print("Since db monitor", datetime.datetime.now() - prev_date, "Will rescan:", run_db) + prev_date = prev["db"]["prev-date"] + db = {"prev-date": prev_date, "data": prev["db"]["data"]} + if run_logs: prev_date = datetime.datetime.now().isoformat() log_files = {"prev-date": prev_date, } + if run_db: + prev_date = datetime.datetime.now().isoformat() + db = {"prev-date": prev_date, } + result = {'services': {}, 'runners': {}, 'remote': {}, 'date': datetime.datetime.now().isoformat(), - "log-files": log_files} + "log-files": log_files, + "db": db} if "trees" in cfg: for name in cfg["trees"]: add_one_tree(result, cfg["tree-path"], name) @@ -186,6 +284,11 @@ def main(): for remote in cfg["remote"]: add_remote_services(result, remote) + add_disk_size(result, "/") + + if "db" in cfg and run_db: + add_db(result, cfg) + with open(sys.argv[2], 'w') as fp: json.dump(result, fp) diff --git a/tests/patch/build_32bit/build_32bit.sh b/tests/patch/build_32bit/build_32bit.sh index d774e6c..6232aab 100755 --- a/tests/patch/build_32bit/build_32bit.sh +++ b/tests/patch/build_32bit/build_32bit.sh @@ -6,7 +6,7 @@ cc="ccache gcc" output_dir=build_32bit/ ncpu=$(grep -c processor /proc/cpuinfo) -build_flags="-Oline -j $ncpu W=1 C=1" +build_flags="-Oline -j $ncpu W=1" tmpfile_o=$(mktemp) tmpfile_n=$(mktemp) rc=0 @@ -14,6 +14,15 @@ rc=0 prep_config() { make CC="$cc" O=$output_dir ARCH=i386 allmodconfig ./scripts/config --file $output_dir/.config -d werror + ./scripts/config --file $output_dir/.config -d drm_werror + ./scripts/config --file $output_dir/.config -d kvm_werror +} + +clean_up_output() { + local file=$1 + + # modpost triggers this randomly on use of existing symbols + sed -i '/arch\/x86\/boot.* warning: symbol .* was not declared. Should it be static?/d' $file } echo "Using $build_flags redirect to $tmpfile_o and $tmpfile_n" @@ -25,10 +34,31 @@ HEAD=$(git rev-parse HEAD) echo "Tree base:" git log -1 --pretty='%h ("%s")' HEAD~ -echo "Baseline building the tree" +if [ x$FIRST_IN_SERIES == x0 ] && \ + ! git diff --name-only HEAD~ | grep -q -E "Kconfig$" +then + echo "Skip baseline build, not the first patch and no Kconfig updates" +else + echo "Baseline building the tree" -prep_config -make CC="$cc" O=$output_dir ARCH=i386 $build_flags + prep_config + make CC="$cc" O=$output_dir ARCH=i386 $build_flags +fi + +# Check if new files were added, new files will cause mod re-linking +# so all module and linker related warnings will pop up in the "after" +# but not "before". To avoid this we need to force re-linking on +# the "before", too. +touch_relink=/dev/null +if ! git log --diff-filter=A HEAD~.. --exit-code >>/dev/null || \ + git diff --name-only HEAD~ | grep -q -E "Makefile$" || \ + git diff --name-only HEAD~ | grep -q -E "Kconfig$" +then + echo "Trying to force re-linking, new files were added" + touch_relink=${output_dir}/include/generated/utsrelease.h +fi + +touch $touch_relink git checkout -q HEAD~ @@ -36,15 +66,19 @@ echo "Building the tree before the patch" prep_config make CC="$cc" O=$output_dir ARCH=i386 $build_flags 2> >(tee $tmpfile_o >&2) +clean_up_output $tmpfile_o incumbent=$(grep -i -c "\(warn\|error\)" $tmpfile_o) echo "Building the tree with the patch" git checkout -q $HEAD -prep_config -make CC="$cc" O=$output_dir ARCH=i386 $build_flags -j $ncpu 2> >(tee $tmpfile_n >&2) || rc=1 +# Also force rebuild "after" in case the file added isn't important. +touch $touch_relink +prep_config +make CC="$cc" O=$output_dir ARCH=i386 $build_flags 2> >(tee $tmpfile_n >&2) || rc=1 +clean_up_output $tmpfile_n current=$(grep -i -c "\(warn\|error\)" $tmpfile_n) echo "Errors and warnings before: $incumbent this patch: $current" >&$DESC_FD diff --git a/tests/patch/build_allmodconfig_warn/build_allmodconfig.sh b/tests/patch/build_allmodconfig_warn/build_allmodconfig.sh index ff839dc..0ed1a13 100755 --- a/tests/patch/build_allmodconfig_warn/build_allmodconfig.sh +++ b/tests/patch/build_allmodconfig_warn/build_allmodconfig.sh @@ -14,6 +14,15 @@ rc=0 prep_config() { make CC="$cc" O=$output_dir allmodconfig ./scripts/config --file $output_dir/.config -d werror + ./scripts/config --file $output_dir/.config -d drm_werror + ./scripts/config --file $output_dir/.config -d kvm_werror +} + +clean_up_output() { + local file=$1 + + # modpost triggers this randomly on use of existing symbols + sed -i '/arch\/x86\/boot.* warning: symbol .* was not declared. Should it be static?/d' $file } echo "Using $build_flags redirect to $tmpfile_o and $tmpfile_n" @@ -25,10 +34,31 @@ HEAD=$(git rev-parse HEAD) echo "Tree base:" git log -1 --pretty='%h ("%s")' HEAD~ -echo "Baseline building the tree" +if [ x$FIRST_IN_SERIES == x0 ] && \ + ! git diff --name-only HEAD~ | grep -q -E "Kconfig$" +then + echo "Skip baseline build, not the first patch and no Kconfig updates" +else + echo "Baseline building the tree" -prep_config -make CC="$cc" O=$output_dir $build_flags + prep_config + make CC="$cc" O=$output_dir $build_flags +fi + +# Check if new files were added, new files will cause mod re-linking +# so all module and linker related warnings will pop up in the "after" +# but not "before". To avoid this we need to force re-linking on +# the "before", too. +touch_relink=/dev/null +if ! git log --diff-filter=A HEAD~.. --exit-code >>/dev/null || \ + git diff --name-only HEAD~ | grep -q -E "Makefile$" || \ + git diff --name-only HEAD~ | grep -q -E "Kconfig$" +then + echo "Trying to force re-linking, new files were added" + touch_relink=${output_dir}/include/generated/utsrelease.h +fi + +touch $touch_relink git checkout -q HEAD~ @@ -36,15 +66,19 @@ echo "Building the tree before the patch" prep_config make CC="$cc" O=$output_dir $build_flags 2> >(tee $tmpfile_o >&2) +clean_up_output $tmpfile_o incumbent=$(grep -i -c "\(warn\|error\)" $tmpfile_o) echo "Building the tree with the patch" git checkout -q $HEAD -prep_config -make CC="$cc" O=$output_dir $build_flags -j $ncpu 2> >(tee $tmpfile_n >&2) || rc=1 +# Also force rebuild "after" in case the file added isn't important. +touch $touch_relink +prep_config +make CC="$cc" O=$output_dir $build_flags 2> >(tee $tmpfile_n >&2) || rc=1 +clean_up_output $tmpfile_n current=$(grep -i -c "\(warn\|error\)" $tmpfile_n) echo "Errors and warnings before: $incumbent this patch: $current" >&$DESC_FD diff --git a/tests/patch/build_clang/build_clang.sh b/tests/patch/build_clang/build_clang.sh index 3b59aed..ea33497 100755 --- a/tests/patch/build_clang/build_clang.sh +++ b/tests/patch/build_clang/build_clang.sh @@ -14,6 +14,8 @@ rc=0 prep_config() { make LLVM=1 O=$output_dir allmodconfig ./scripts/config --file $output_dir/.config -d werror + ./scripts/config --file $output_dir/.config -d drm_werror + ./scripts/config --file $output_dir/.config -d kvm_werror } echo "Using $build_flags redirect to $tmpfile_o and $tmpfile_n" @@ -25,10 +27,31 @@ HEAD=$(git rev-parse HEAD) echo "Tree base:" git log -1 --pretty='%h ("%s")' HEAD~ -echo "Baseline building the tree" +if [ x$FIRST_IN_SERIES == x0 ] && \ + ! git diff --name-only HEAD~ | grep -q -E "Kconfig$" +then + echo "Skip baseline build, not the first patch and no Kconfig updates" +else + echo "Baseline building the tree" -prep_config -make LLVM=1 O=$output_dir $build_flags + prep_config + make LLVM=1 O=$output_dir $build_flags +fi + +# Check if new files were added, new files will cause mod re-linking +# so all module and linker related warnings will pop up in the "after" +# but not "before". To avoid this we need to force re-linking on +# the "before", too. +touch_relink=/dev/null +if ! git log --diff-filter=A HEAD~.. --exit-code >>/dev/null || \ + git diff --name-only HEAD~ | grep -q -E "Makefile$" || \ + git diff --name-only HEAD~ | grep -q -E "Kconfig$" +then + echo "Trying to force re-linking, new files were added" + touch_relink=${output_dir}/include/generated/utsrelease.h +fi + +touch $touch_relink git checkout -q HEAD~ @@ -42,8 +65,11 @@ echo "Building the tree with the patch" git checkout -q $HEAD +# Also force rebuild "after" in case the file added isn't important. +touch $touch_relink + prep_config -make LLVM=1 O=$output_dir $build_flags -j $ncpu 2> >(tee $tmpfile_n >&2) || rc=1 +make LLVM=1 O=$output_dir $build_flags 2> >(tee $tmpfile_n >&2) || rc=1 current=$(grep -i -c "\(warn\|error\)" $tmpfile_n) diff --git a/tests/patch/build_clang_rust/build_clang_rust.sh b/tests/patch/build_clang_rust/build_clang_rust.sh index d715bbc..25b3451 100755 --- a/tests/patch/build_clang_rust/build_clang_rust.sh +++ b/tests/patch/build_clang_rust/build_clang_rust.sh @@ -7,12 +7,10 @@ cc=clang output_dir=build_clang_rust/ ncpu=$(grep -c processor /proc/cpuinfo) build_flags="-Oline -j $ncpu W=1" -tmpfile_o=$(mktemp) -tmpfile_n=$(mktemp) rc=0 prep_config() { - make LLVM=1 O=$output_dir allmodconfig + make LLVM=1 O=$output_dir allmodconfig $build_flags # Disable -Werror so we get to see all the errors ./scripts/config --file $output_dir/.config -d werror @@ -20,6 +18,12 @@ prep_config() { # KVM has its own WERROR control, and it currently does generate errors! ./scripts/config --file $output_dir/.config -d kvm_werror + # Unclear if this is related to Rust but we seem to get key generation + # issues with SHA1 on Fedora 41. Switch to SHA256. + ./scripts/config --file $output_dir/.config -d module_sig_sha1 + ./scripts/config --file $output_dir/.config -e module_sig_sha256 + ./scripts/config --file $output_dir/.config --set-str module_sig_hash sha256 + # allmodconfig is not sufficient to get Rust support enabled. So # flip some options. @@ -28,15 +32,12 @@ prep_config() { ./scripts/config --file $output_dir/.config -d randstruct_full ./scripts/config --file $output_dir/.config -e randstruct_none ./scripts/config --file $output_dir/.config -d modversions + # Rust also seems currently incompatible with CFI (Rust 1.83) + ./scripts/config --file $output_dir/.config -d cfi_clang # Now Rust can be enabled ./scripts/config --file $output_dir/.config -e rust - # The Rust compiler does not play nicely with the kernel workarounds - # for speculation attacks. So turn off RETHUNK and X86_KERNEL_IBT - ./scripts/config --file $output_dir/.config -d rethunk - ./scripts/config --file $output_dir/.config -d x86_kernel_ibt - # Rust currently requires all dependencies are built in, so make # phylib built in. ./scripts/config --file $output_dir/.config -e phylib @@ -49,13 +50,13 @@ prep_config() { # Setting options above enabled some new options. Set them to their # defaults - make LLVM=1 O=$output_dir olddefconfig + make LLVM=1 O=$output_dir olddefconfig $build_flags # And verify rust is now actually enabled in the configuration. config_rust=$(./scripts/config --file $output_dir/.config --state CONFIG_RUST) if [ $config_rust != "y" ]; then - echo Unable to enable CONFIG_RUST + echo "CONFIG_RUST not set in generated config" >& $DESC_FD exit 1 fi } @@ -91,6 +92,9 @@ echo "Baseline building the tree" prep_config make LLVM=1 O=$output_dir $build_flags +tmpfile_o=$(mktemp) +tmpfile_n=$(mktemp) + git checkout -q HEAD~ echo "Building the tree before the patch" diff --git a/tests/patch/build_tools/build_tools.sh b/tests/patch/build_tools/build_tools.sh index 591fa99..369bbb8 100755 --- a/tests/patch/build_tools/build_tools.sh +++ b/tests/patch/build_tools/build_tools.sh @@ -4,16 +4,21 @@ output_dir=build_tools/ ncpu=$(grep -c processor /proc/cpuinfo) build_flags="-Oline -j $ncpu" -tmpfile_o=$(mktemp) -tmpfile_n=$(mktemp) rc=0 +pr() { + echo " ====== $@ ======" | tee -a /dev/stderr +} + # If it doesn't touch tools/ or include/, don't bother -if ! git diff --name-only HEAD~ | grep -E "^(include)|(tools)/"; then +if ! git diff --name-only HEAD~ | grep -q -E "^(include)|(tools)/"; then echo "No tools touched, skip" >&$DESC_FD exit 0 fi +tmpfile_o=$(mktemp) +tmpfile_n=$(mktemp) + # Looks like tools inherit WERROR, otherwise make O=$output_dir allmodconfig ./scripts/config --file $output_dir/.config -d werror @@ -24,34 +29,51 @@ HEAD=$(git rev-parse HEAD) echo "Tree base:" git log -1 --pretty='%h ("%s")' HEAD~ +echo "Now at:" +git log -1 --pretty='%h ("%s")' HEAD -echo "Cleaning" +# These are either very slow or don't build +export SKIP_TARGETS="bpf dt kvm landlock livepatch lsm sched_ext user_events mm powerpc filesystems/mount-notify ublk sgx nolibc nsfs" + +pr "Cleaning" make O=$output_dir $build_flags -C tools/testing/selftests/ clean -echo "Building the tree before the patch" +# Hard-clean YNL, too, otherwise YNL-related build problems may be masked +make -C tools/net/ynl/ distclean + +pr "Baseline building the tree" +git checkout -q HEAD~ +make O=$output_dir $build_flags headers +make O=$output_dir $build_flags -C tools/testing/selftests/ +git checkout -q $HEAD + +pr "Building the tree before the patch" git checkout -q HEAD~ make O=$output_dir $build_flags headers -for what in net net/forwarding net/tcp_ao; do - make O=$output_dir $build_flags -C tools/testing/selftests/ \ - TARGETS=$what 2> >(tee -a $tmpfile_o >&2) -done +make O=$output_dir $build_flags -C tools/testing/selftests/ \ + 2> >(tee -a $tmpfile_o >&2) incumbent=$(grep -i -c "\(warn\|error\)" $tmpfile_o) -echo "Building the tree with the patch" +pr "Checking if tree is clean" +git status -s 1>&2 +incumbent_dirt=$(git status -s | grep -c '^??') +pr "Building the tree with the patch" git checkout -q $HEAD make O=$output_dir $build_flags headers -for what in net net/forwarding net/tcp_ao; do - make O=$output_dir $build_flags -C tools/testing/selftests/ \ - TARGETS=$what 2> >(tee -a $tmpfile_n >&2) -done +make O=$output_dir $build_flags -C tools/testing/selftests/ \ + 2> >(tee -a $tmpfile_n >&2) current=$(grep -i -c "\(warn\|error\)" $tmpfile_n) -echo "Errors and warnings before: $incumbent this patch: $current" >&$DESC_FD +pr "Checking if tree is clean" +git status -s 1>&2 +current_dirt=$(git status -s | grep -c '^??') + +echo "Errors and warnings before: $incumbent (+$incumbent_dirt) this patch: $current (+$current_dirt)" >&$DESC_FD if [ $current -gt $incumbent ]; then echo "New errors added" 1>&2 @@ -72,6 +94,12 @@ if [ $current -gt $incumbent ]; then rc=1 fi +if [ $current_dirt -gt $incumbent_dirt ]; then + echo "New untracked files added" 1>&2 + + rc=1 +fi + rm $tmpfile_o $tmpfile_n exit $rc diff --git a/tests/patch/cc_maintainers/test.py b/tests/patch/cc_maintainers/test.py index 1c9dbb7..2fe9f43 100644 --- a/tests/patch/cc_maintainers/test.py +++ b/tests/patch/cc_maintainers/test.py @@ -2,16 +2,19 @@ # # Copyright (c) 2020 Facebook -from typing import Tuple +""" +Test if relevant maintainers were CCed +""" + import datetime import email import email.utils -import subprocess -import tempfile +import json import os import re -import json -""" Test if relevant maintainers were CCed """ +import subprocess +import tempfile +from typing import Tuple emailpat = re.compile(r'([^ <"]*@[^ >"]*)') @@ -39,6 +42,9 @@ local_map = ["Vladimir Oltean ", "Alexander Duyck "] +# +# Maintainer auto-staleness checking +# class StalenessEntry: def __init__(self, e, since_months): @@ -116,8 +122,6 @@ def is_stale(self, e, since_months, dbg=None): def get_stale(sender_from, missing, out): - global stale_db - sender_corp = None for corp in corp_suffix: if sender_from.endswith(corp): @@ -134,21 +138,25 @@ def get_stale(sender_from, missing, out): ret.add(e) return ret +# +# Main +# def cc_maintainers(tree, thing, result_dir) -> Tuple[int, str, str]: + """ Main test entry point """ out = [] raw_gm = [] patch = thing if patch.series and patch.series.cover_pull: - return 0, f"Pull request co-post, skipping", "" + return 0, "Pull request co-post, skipping", "" msg = email.message_from_string(patch.raw_patch) addrs = msg.get_all('to', []) addrs += msg.get_all('cc', []) addrs += msg.get_all('from', []) addrs += msg.get_all('sender', []) - included = set([e for n, e in email.utils.getaddresses(addrs)]) + included = set([e.lower() for n, e in email.utils.getaddresses(addrs)]) out += ["=== Email ===", f"From: {msg.get_all('from')}", f"Included: {included}", ""] @@ -174,7 +182,7 @@ def cc_maintainers(tree, thing, result_dir) -> Tuple[int, str, str]: raw_gm.append(line.strip()) match = emailpat.search(line) if match: - addr = match.group(1) + addr = match.group(1).lower() expected.add(addr) if 'blamed_fixes' in line: blamed.add(addr) @@ -226,7 +234,7 @@ def cc_maintainers(tree, thing, result_dir) -> Tuple[int, str, str]: continue for have in included: if have in mmap_emails: - mapped.add(m) + mapped.add(m.lower()) found.update(mapped) missing.difference_update(mapped) diff --git a/tests/patch/check_selftest/check_selftest.sh b/tests/patch/check_selftest/check_selftest.sh deleted file mode 100755 index e6ce905..0000000 --- a/tests/patch/check_selftest/check_selftest.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# Check if the shell selftest scripts are in correspond Makefile - -rt=0 - -files=$(git show --pretty="" --name-only -- tools/testing/selftests*.sh) -if [ -z "$files" ]; then - echo "No net selftest shell script" >&$DESC_FD - exit $rt -fi - -for file in $files; do - f=$(basename $file) - d=$(dirname $file) - if [ -f "${d}/Makefile" ] && ! grep -P "[\t| ]${f}" ${d}/Makefile; then - echo "Script ${f} not found in ${d}/Makefile" >&$DESC_FD - rt=1 - fi -done - -[ ${rt} -eq 0 ] && echo "net selftest script(s) already in Makefile" >&$DESC_FD - -exit $rt diff --git a/tests/patch/check_selftest/info.json b/tests/patch/check_selftest/info.json index 615779f..4b3c251 100644 --- a/tests/patch/check_selftest/info.json +++ b/tests/patch/check_selftest/info.json @@ -1,3 +1,4 @@ { - "run": ["check_selftest.sh"] + "pymod": "test", + "pyfunc": "check_selftest" } diff --git a/tests/patch/check_selftest/test.py b/tests/patch/check_selftest/test.py new file mode 100644 index 0000000..17684cb --- /dev/null +++ b/tests/patch/check_selftest/test.py @@ -0,0 +1,184 @@ +# SPDX-License-Identifier: GPL-2.0 + +""" Test Makefile, .gitignore and config format """ + +import os +import subprocess +from typing import Tuple + + +LOCAL_DIR = os.path.dirname(__file__) + + +def ret_merge(ret, nret): + """ merge results """ + if ret[0] == 0 and nret[0] == 0: + val = 0 + elif ret[0] == 1 or nret[0] == 1: + val = 1 + else: + val = max(ret[0], nret[0]) + + desc = "" + if ret[1] and nret[1]: + desc = ret[1] + "; " + nret[1] + else: + desc = ret[1] + nret[1] + return (val, desc) + + +def check_new_files_makefile(tree, new_files, log): + """ Make sure new files are listed in a Makefile, somewhere """ + + ret = (0, "") + cnt = 0 + + for path in new_files: + if path.endswith(('.sh', '.py')): + needle = path + elif path.endswith(('.c')): + needle = path.split('.')[0] + else: + log.append("makefile inclusion check ignoring " + path) + continue + + makefile = os.path.dirname(path) + "/Makefile" + needle = os.path.basename(path) + + cmd = ["git", "grep", needle, "--", makefile] + result = subprocess.run(cmd, cwd=tree.path, capture_output=True, + check=False) + log.append(" ".join(cmd) + f":: {result.returncode}") + if result.returncode: + ret = ret_merge(ret, (1, path + " not found in Makefile")) + cnt += 1 + + if not ret[0] and cnt: + ret = (0, f"New files in Makefile checked ({cnt})") + + return ret + + +def check_new_files_gitignore(tree, new_files, log): + """ Make sure new binaries are listed in .gitignore """ + + ret = (0, "") + cnt = 0 + + for path in new_files: + if path.endswith(('.c')): + needle = path.split('.')[0] + else: + log.append("gitignore check ignoring " + path) + continue + + target = os.path.dirname(path) + "/.gitignore" + needle = os.path.basename(path) + + cmd = ["git", "grep", needle, "--", target] + result = subprocess.run(cmd, cwd=tree.path, capture_output=True, + check=False) + log.append(" ".join(cmd) + f":: {result.returncode}") + if result.returncode: + ret = ret_merge(ret, (1, needle + " not found in .gitignore")) + cnt += 1 + + if not ret[0] and cnt: + ret = (0, f"New files in gitignore checked ({cnt})") + + return ret + + +def _check_file_fmt(tree, path, script, result_dir, ident): + cmd = [os.path.join(LOCAL_DIR, script), os.path.join(tree.path, path)] + + result = subprocess.run(cmd, cwd=LOCAL_DIR, capture_output=True, + text=True, check=False) + with open(os.path.join(result_dir, ident), "w", encoding="utf-8") as fp: + fp.write(result.stdout) + return result.returncode + + +def check_file_formats(tree, file_list, log, result_dir): + """ Validate sort order of all touched files """ + + ret = (0, "") + i = 0 + for path in file_list: + if path.endswith("/config"): + script = "validate_config_format.py" + fmt = f"fmt-config-{i}" + elif path.endswith("/.gitignore"): + script = "validate_config_format.py" + fmt = f"fmt-gitignore-{i}" + elif path.endswith("/Makefile"): + script = "validate_makefile_format.py" + fmt = f"fmt-makefile-{i}" + else: + log.append("format check ignoring " + path) + continue + + i += 1 + if _check_file_fmt(tree, path, script, result_dir, fmt): + ret = ret_merge(ret, (1, "Bad format: " + path)) + + if not ret[0] and i: + ret = (0, f"Good format ({i})") + + return ret + + +def extract_files(patch): + """Extract paths of new files being added by the series.""" + + new_files = set() + mod_files = set() + lines = patch.raw_patch.split("\n") + + # Walk lines, skip last since it doesn't have next + for i, line in enumerate(lines[:-1]): + next_line = lines[i + 1] + + if not next_line.startswith("+++ b/"): + continue + if 'tools/testing/selftests/' not in next_line: + continue + + file_path = next_line[6:] + + if line == "--- /dev/null": + new_files.add(file_path) + else: + mod_files.add(file_path) + + # We're testing a series, same file may appear multiple times + mod_files -= new_files + return list(new_files), list(mod_files) + + +def check_selftest(tree, patch, result_dir) -> Tuple[int, str, str]: + """ Main function / entry point """ + + # Check for new files in the series + new_files, mod_files = extract_files(patch) + + ret = (0, "") + log = ["New files:"] + new_files + ["", "Modified files:"] + mod_files + [""] + + if not new_files and not mod_files: + ret = (0, "No changes to selftests") + else: + nret = check_file_formats(tree, new_files + mod_files, log, result_dir) + ret = ret_merge(ret, nret) + + if new_files: + nret = check_new_files_makefile(tree, new_files, log) + ret = ret_merge(ret, nret) + + nret = check_new_files_gitignore(tree, new_files, log) + ret = ret_merge(ret, nret) + + if not ret[0] and not ret[1]: + ret = (0, f"New files {len(new_files)}, modified {len(mod_files)}, no checks") + + return ret[0], ret[1], "\n".join(log) diff --git a/tests/patch/check_selftest/validate_config_format.py b/tests/patch/check_selftest/validate_config_format.py new file mode 100755 index 0000000..87d49fa --- /dev/null +++ b/tests/patch/check_selftest/validate_config_format.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import sys + + +def extract_key(raw): + k = raw.split("=")[0] + k = k.strip() + k = k.replace('_', '') + return k + + +def check_one(a, b, line): + _a = extract_key(a) + _b = extract_key(b) + + if _a >= _b: + return None + + return f"Lines {line}-{line+1} invalid order, {a} should be after {b}" + + +def validate_config(file_path): + """Validate a Makefile for proper variable assignment format.""" + + with open(file_path, "r", encoding="utf-8") as f: + content = f.read() + + lines = content.split("\n") + + all_errors = [] + + prev = "" + for i, line in enumerate(lines): + # ignore comments + if line.strip().startswith('#'): + continue + # ignore bad lines + if "=" not in line: + continue + if not prev: + prev = line + continue + + err = check_one(line, prev, i) + if err: + all_errors.append(err) + + prev = line + + if all_errors: + print(f"Validation errors in {file_path}:") + for error in all_errors: + print(error) + return False + + print(f"✓ {file_path} is properly formatted") + return True + + +def fix(file_path): + """Fix the config file by sorting entries alphabetically.""" + + with open(file_path, "r", encoding="utf-8") as f: + content = f.read() + + lines = content.split("\n") + + output = [] + + while lines: + idx = 0 + first = lines[0] + for i, line in enumerate(lines): + # ignore comments + if line.strip().startswith('#'): + continue + # ignore bad lines + if "=" not in line: + continue + + err = check_one(line, first, i) + if err: + first = line + idx = i + output.append(first) + lines.pop(idx) + + # Write the fixed content back to the file + with open(file_path, "w", encoding="utf-8") as f: + f.write("\n".join(output)) + + print(f"✓ Fixed {file_path} - config entries sorted alphabetically") + + +def main(): + """Main entry point for the script.""" + if len(sys.argv) < 2: + print("Usage: validate_config_format.py ") + sys.exit(1) + + file_path = sys.argv[1] + if file_path == "--fix": + file_path = sys.argv[2] + + code = 0 + if not validate_config(file_path): + code = 1 + if sys.argv[1] == "--fix": + fix(file_path) + + sys.exit(code) + + +if __name__ == "__main__": + main() diff --git a/tests/patch/check_selftest/validate_makefile_format.py b/tests/patch/check_selftest/validate_makefile_format.py new file mode 100755 index 0000000..fad086c --- /dev/null +++ b/tests/patch/check_selftest/validate_makefile_format.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python3 +""" +Script to validate Makefile variable assignment format. + +Expected format: +- Variable assignment starts with "VARIABLE = \", "VARIABLE := \", or + "VARIABLE += \" (with optional space) +- Each item on its own line, indented with a tab +- Each line ends with " \" (except the last item line) +- Items are sorted alphabetically +- Last line is a comment starting with "#" allowing the previous line to end with "\" +- Variables should only be assigned once (no duplicate assignments) +""" + +import re +import sys + + +def _extract_items(lines, line_nums): + """Extract and validate items from the middle lines.""" + errors = [] + items = [] + + # Skip last line if it's the terminating comment + end = len(lines) + if lines[-1].strip().startswith("#"): + end = -1 + + for i, line in enumerate(lines[:end]): + line_num = line_nums[i] + + # Check indentation (should be a tab) + if not line.startswith("\t"): + errors.append(f"Line {line_num}: Should start with tab, got '{line[:1]}'") + + # Remove tab and trailing " \" + item = line[1:] # Remove tab + if item.endswith(" \\"): + item = item[:-2].strip() + + if ' ' in item and '$' not in item: + errors.append(f"Line {line_num}: contains a splace, multiple values? '{item}'") + + items.append((item, line_num)) + + return items, errors + + +def _directory_sort_key(item): + """Generate sort key considering directory depth first, then alphabetical order.""" + directory_count = item.count("/") + return (directory_count, item.lower()) + + +def _validate_sorting(items): + """Validate directory-aware alphabetical sorting of items.""" + errors = [] + + # Filter out function calls (items starting with $) as they don't need sorting + sortable_items = [] + for item, line_num in items: + if not item.startswith("$"): + sortable_items.append((item, line_num)) + + # Only validate sorting among sortable items + for i in range(len(sortable_items) - 1): + current_item, current_line = sortable_items[i] + next_item, next_line = sortable_items[i + 1] + + if current_item < next_item: + continue + + current_key = _directory_sort_key(current_item) + next_key = _directory_sort_key(next_item) + + if current_key > next_key: + current_dirs = current_item.count("/") + next_dirs = next_item.count("/") + + if current_dirs != next_dirs: + errors.append( + f"Lines {current_line}-{next_line}: Items not in directory-aware order: " + f"'{current_item}' ({current_dirs} dirs) should come after " + f"'{next_item}' ({next_dirs} dirs)" + ) + else: + errors.append( + f"Lines {current_line}-{next_line}: Items not in alphabetical order: " + f"'{current_item}' should come after '{next_item}'" + ) + return errors + + +def validate_variable_block(var_name, lines, line_nums): + """Validate a single variable assignment block.""" + errors = [] + + if not lines: + return errors + + # Extract and validate items from the middle lines + items, item_errors = _extract_items(lines, line_nums) + errors.extend(item_errors) + + # Check last line starts with "#" + if len(lines) > 1: + if not lines[-1].strip().startswith("#"): + errors.append( + f"Line {line_nums[-1]}: Trailing comment should start with '#'," + f" got '{lines[-1].strip()}'" + ) + elif len(lines[-1].strip()) > 5 and var_name not in lines[-1]: + errors.append( + f"Line {line_nums[-1]}: Trailing comment should contain the " + f"variable name ({var_name}), got '{lines[-1].strip()}'" + ) + + # Check alphabetical sorting + if len(items) > 1: + errors.extend(_validate_sorting(items)) + + return errors + + +def check_multiple_blocks(var_name, lines, line_nums): + """Check for multiple variable assignment blocks.""" + errors = [] + + # Check for multiple blocks + for i, line_no in enumerate(line_nums): + if i == 0: + continue + if line_no != line_nums[i - 1] + 1: + errors.append(f"Line {line_no}: Multiple variable assignment blocks, first block starts at line {line_nums[0]}") + + return errors + + +def _process_entry(variable_blocks, var_name, entry, line_num): + """Process a single entry and update the variable_blocks dictionary.""" + if var_name not in variable_blocks: + variable_blocks[var_name] = ([], [], ) + variable_blocks[var_name][0].append(entry) + variable_blocks[var_name][1].append(line_num) + + +def parse_makefile(content): + """Parse Makefile and extract variable assignment blocks.""" + lines = content.split("\n") + variable_blocks = {} + + i = 0 + var_name = None + while i < len(lines): + # Look for variable assignment with backslash continuation (=, :=, +=) + match = re.match(r"^([A-Z_][A-Z0-9_]*)\s*(:?=|\+=)(.*)$", lines[i]) + if match: + var_name = match.group(1) + entry = match.group(3).strip() + if entry.startswith("$") and not entry.startswith("\\"): + # Special entry, probably for a good reason. Ignore completely. + var_name = None + elif len(var_name) < 3 or "FLAGS" in var_name or 'LIBS' in var_name: + # Special case for CFLAGS, which is often used for multiple values + # and is not sorted alphabetically. + var_name = None + elif entry.strip() != "\\": + _process_entry(variable_blocks, var_name, '\t' + entry, i + 1) + elif var_name: + _process_entry(variable_blocks, var_name, lines[i], i + 1) + + if var_name and not lines[i].endswith('\\'): + var_name = None + i += 1 + + return variable_blocks + + +def validate_makefile(file_path): + """Validate a Makefile for proper variable assignment format.""" + + with open(file_path, "r", encoding="utf-8") as f: + content = f.read() + + variable_blocks = parse_makefile(content) + + if not variable_blocks: + print(f"No multi-line variable assignments found in {file_path}") + return True + + all_errors = [] + + # Validate each variable block + for var_name, (block_lines, line_nums) in variable_blocks.items(): + errors = validate_variable_block(var_name, block_lines, line_nums) + errors += check_multiple_blocks(var_name, block_lines, line_nums) + if errors: + all_errors.extend( + [f"Variable {var_name}:"] + [f" {error}" for error in errors] + ) + + if all_errors: + print(f"Validation errors in {file_path}:") + for error in all_errors: + print(error) + return False + + print(f"✓ {file_path} is properly formatted") + return True + + +def main(): + """Main entry point for the script.""" + if len(sys.argv) != 2: + print("Usage: validate_makefile_format.py ") + sys.exit(1) + + file_path = sys.argv[1] + + if not validate_makefile(file_path): + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tests/patch/checkpatch/checkpatch.sh b/tests/patch/checkpatch/checkpatch.sh index 2a95905..886404f 100755 --- a/tests/patch/checkpatch/checkpatch.sh +++ b/tests/patch/checkpatch/checkpatch.sh @@ -9,7 +9,8 @@ MACRO_ARG_REUSE,\ ALLOC_SIZEOF_STRUCT,\ NO_AUTHOR_SIGN_OFF,\ GIT_COMMIT_ID,\ -CAMELCASE +CAMELCASE,\ +FILE_PATH_CHANGES tmpfile=$(mktemp) diff --git a/tests/patch/deprecated_api/deprecated_api.sh b/tests/patch/deprecated_api/deprecated_api.sh index 2d4e67b..31e1562 100755 --- a/tests/patch/deprecated_api/deprecated_api.sh +++ b/tests/patch/deprecated_api/deprecated_api.sh @@ -4,7 +4,7 @@ # Copyright (c) 2020 Facebook errors=( module_param ) -warnings=( "\Wdev_hold(" "\Wdev_put(" "\Wput_net(" "\Wget_net(" ) +warnings=( "\Wdev_hold(" "\Wdev_put(" "\Wput_net(" "\Wget_net(" "\Winit_dummy_netdev(" ) res=0 msg="" @@ -51,5 +51,10 @@ else msg="Found: ${msg:2}" fi -echo -e "$msg" >&$DESC_FD +if [[ -z $DESC_FD ]] +then + echo -e "$msg" +else + echo -e "$msg" >& $DESC_FD +fi exit $res diff --git a/tests/patch/kdoc/info.json b/tests/patch/kdoc/info.json index a409c44..bfac5c9 100644 --- a/tests/patch/kdoc/info.json +++ b/tests/patch/kdoc/info.json @@ -1,3 +1,5 @@ { - "run": ["kdoc.sh"] + "pymod": "test", + "pyfunc": "kdoc", + "pull-requests": true } diff --git a/tests/patch/kdoc/kdoc.sh b/tests/patch/kdoc/kdoc.sh deleted file mode 100755 index 88c574c..0000000 --- a/tests/patch/kdoc/kdoc.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# Copyright (C) 2019 Netronome Systems, Inc. -# Copyright (c) 2020 Facebook - -tmpfile_o=$(mktemp) -tmpfile_n=$(mktemp) -rc=0 - -files=$(git show --pretty="" --name-only HEAD) - -HEAD=$(git rev-parse HEAD) - -echo "Checking the tree before the patch" -git checkout -q HEAD~ -./scripts/kernel-doc -none $files 2> >(tee $tmpfile_o >&2) - -incumbent=$(grep -v 'Error: Cannot open file ' $tmpfile_o | wc -l) - -echo "Checking the tree with the patch" - -git checkout -q $HEAD -./scripts/kernel-doc -none $files 2> >(tee $tmpfile_n >&2) - -current=$(grep -v 'Error: Cannot open file ' $tmpfile_n | wc -l) - -echo "Errors and warnings before: $incumbent this patch: $current" >&$DESC_FD - -if [ $current -gt $incumbent ]; then - echo "New warnings added" 1>&2 - diff $tmpfile_o $tmpfile_n 1>&2 - - echo "Per-file breakdown" 1>&2 - tmpfile_fo=$(mktemp) - tmpfile_fn=$(mktemp) - - grep -i "\(warn\|error\)" $tmpfile_o | sed -n 's@\(^\.\./[/a-zA-Z0-9_.-]*.[ch]\):.*@\1@p' | sort | uniq -c \ - > $tmpfile_fo - grep -i "\(warn\|error\)" $tmpfile_n | sed -n 's@\(^\.\./[/a-zA-Z0-9_.-]*.[ch]\):.*@\1@p' | sort | uniq -c \ - > $tmpfile_fn - - diff $tmpfile_fo $tmpfile_fn 1>&2 - rm $tmpfile_fo $tmpfile_fn - - rc=1 -fi - -rm $tmpfile_o $tmpfile_n - -exit $rc diff --git a/tests/patch/kdoc/test.py b/tests/patch/kdoc/test.py new file mode 100644 index 0000000..e6d3216 --- /dev/null +++ b/tests/patch/kdoc/test.py @@ -0,0 +1,216 @@ +# SPDX-License-Identifier: GPL-2.0 + +""" Test if kernel-doc generates new warnings """ + +import collections +import dataclasses +import re +import subprocess +from typing import List, Optional, Tuple + +def get_git_head(tree) -> str: + """ Get the git commit ID for head commit. """ + + cmd = ["git", "rev-parse", "HEAD"] + result = subprocess.run(cmd, cwd=tree.path, capture_output=True, text=True, + check=True) + + return result.stdout.strip() + +@dataclasses.dataclass(frozen=True, eq=True, order=True, init=True) +class KdocWarning: + # The original warning message + message : str = dataclasses.field(repr=False, compare=False) + _ : dataclasses.KW_ONLY + # Kind of warning line, determined during init + kind : str = dataclasses.field(repr=True, compare=True) + # The file path, or None if unable to determine + file : Optional[str] = dataclasses.field(repr=True, compare=True) + # The line, or None if unable to determine + # Note: *not* part of comparison, or hash! + line : Optional[int] = dataclasses.field(repr=True, compare=False) + # The content of the warning (excluding kind, file, line) + content : str = dataclasses.field(repr=True, compare=True) + + @classmethod + def from_text(self, line, extra=None): + message = line + + if extra: + message += '\n' + extra + + parser = re.compile( + r""" + ^ # Start of string + (?Pwarning|error): # Severity + \s+ # Spacing + (?P[/a-z0-9_.-]*): # File path + (?P[0-9]+) # Line number + \s* # Spacing + (?P.*) # Warning content + $ # End of string + """, + re.VERBOSE | re.IGNORECASE) + + m = parser.match(line) + if m: + kind = m['kind'] + file = m['file'] + line = int(m['line']) + content = m['content'] + if extra: + content += '\n' + extra + else: + kind = 'Unknown' + file = None + line = None + content = message + + return KdocWarning(message, kind=kind, file=file, line=line, + content=content) + + def __str__(self): + return self.message + +def parse_warnings(lines, logs) -> List[KdocWarning]: + skip = False + length = len(lines) + + warnings = [] + + # Walk through lines and convert to warning objects + for i, line in enumerate(lines): + if skip: + skip = False + continue + + if line.endswith(':') and i + 1 < length: + extra = lines[i + 1] + skip = True + elif not line.strip(): + continue + else: + logs += [": " + line.strip()] + extra = None + + warnings.append(KdocWarning.from_text(line, extra)) + + return warnings + +def run_kernel_doc(tree, commitish, files, logs) -> List[KdocWarning]: + """ Run ./scripts/kdoc on a given commit and capture its results. """ + + logs += ["files: " + str(files)] + + if not files: + return [] + + cmd = ["git", "checkout", "-q", commitish] + subprocess.run(cmd, cwd=tree.path, capture_output=False, check=True) + + cmd = ["./scripts/kernel-doc", "-Wall", "-none"] + files + result = subprocess.run(cmd, cwd=tree.path, text=True, check=False, + stderr=subprocess.PIPE) + + lines = result.stderr.strip().split('\n') + + return parse_warnings(lines, logs) + +def extract_files(patch): + """Extract paths added or modified by the patch.""" + + before_files = set() + after_files = set() + lines = patch.raw_patch.split("\n") + + # Walk lines, skip last since it doesn't have next + for i, line in enumerate(lines[:-1]): + next_line = lines[i + 1] + + if not next_line.startswith("+++ b/"): + continue + + file_path = next_line[6:] + + if "/dev/null" not in line: + before_files.add(file_path) + if "/dev/null" not in next_line: + after_files.add(file_path) + + return list(before_files), list(after_files) + +def kdoc(tree, patch, _result_dir) -> Tuple[int, str, str]: + """ Main function / entry point """ + + before_files, after_files = extract_files(patch) + + if not before_files and not after_files: + return 1, "Patch has no modified files?", "" + + ret = 0 + desc = "" + log = [] + + head_commit = get_git_head(tree) + + try: + log += ["Warnings before patch:"] + incumbent_warnings = run_kernel_doc(tree, "HEAD~", before_files, log) + log.extend(map(str, incumbent_warnings)) + + log += ["", "Current warnings:"] + current_warnings = run_kernel_doc(tree, head_commit, after_files, log) + log.extend(map(str, current_warnings)) + except subprocess.CalledProcessError as e: + desc = f'{e.cmd} failed with exit code {e.returncode}' + if e.stderr: + log += e.stderr.split('\n') + ret = 1 + + return ret, desc, "\n".join(log) + + current_set = set(current_warnings) + incumbent_set = set(incumbent_warnings) + + # This construction preserves ordering vs using set difference + new_warnings = [x for x in current_warnings if x not in incumbent_set] + rm_warnings = [x for x in incumbent_warnings if x not in current_set] + + incumbent_count = len(incumbent_warnings) + current_count = len(current_warnings) + new_count = len(new_warnings) + rm_count = len(rm_warnings) + + desc = f'Warnings before: {incumbent_count} after: {current_count}' + brac = [] + if new_count: + brac += [f'add: {new_count}'] + if rm_count: + brac += [f'del: {rm_count}'] + if brac: + desc += f' ({" ".join(brac)})' + log += ["", desc] + + if rm_count: + log += ["", "Warnings removed:"] + log.extend(map(str, rm_warnings)) + + file_breakdown = collections.Counter((x.file for x in rm_warnings)) + + log += ["Per-file breakdown:"] + for f, count in file_breakdown.items(): + log += [f'{count:6} {f}'] + + if new_count: + ret = 1 + + log += ["", "New warnings added:"] + log.extend(map(str, new_warnings)) + + file_breakdown = collections.Counter((x.file for x in new_warnings)) + + log += ["Per-file breakdown:"] + for f, count in file_breakdown.items(): + log += [f'{count:6} {f}'] + + return ret, desc, "\n".join(log) diff --git a/tests/patch/maintainers/info.json b/tests/patch/maintainers/info.json deleted file mode 100644 index 26f8179..0000000 --- a/tests/patch/maintainers/info.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "run": ["maintainers.sh"], - "disabled": true -} diff --git a/tests/patch/maintainers/maintainers.sh b/tests/patch/maintainers/maintainers.sh deleted file mode 100755 index bb50ca6..0000000 --- a/tests/patch/maintainers/maintainers.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -if git diff-index --quiet --name-only HEAD~ -- MAINTAINERS; then - echo "MAINTAINERS not touched" >&$DESC_FD - exit 0 -fi - -tmpfile_o=$(mktemp) -tmpfile_n=$(mktemp) -rc=0 - -echo "MAINTAINERS self-test: redirect to $tmpfile_o and $tmpfile_n" - -HEAD=$(git rev-parse HEAD) - -git checkout -q HEAD~ - -echo "Checking old warning count" - -./scripts/get_maintainer.pl --self-test 2> >(tee $tmpfile_o >&2) -incumbent=$(grep -i -c "\(warn\|error\)" $tmpfile_o) - -echo "Checking new warning count" - -git checkout -q $HEAD - -./scripts/get_maintainer.pl --self-test 2> >(tee $tmpfile_n >&2) -current=$(grep -i -c "\(warn\|error\)" $tmpfile_n) - -echo "Errors and warnings before: $incumbent this patch: $current" >&$DESC_FD - -if [ $current -gt $incumbent ]; then - echo "New errors added" 1>&2 - diff $tmpfile_o $tmpfile_n 1>&2 - - rc=1 -fi - -rm $tmpfile_o $tmpfile_n - -exit $rc diff --git a/tests/patch/pylint/info.json b/tests/patch/pylint/info.json new file mode 100644 index 0000000..176fa1e --- /dev/null +++ b/tests/patch/pylint/info.json @@ -0,0 +1,3 @@ +{ + "run": ["pylint.sh"] +} diff --git a/tests/patch/pylint/pylint.sh b/tests/patch/pylint/pylint.sh new file mode 100755 index 0000000..bac6df8 --- /dev/null +++ b/tests/patch/pylint/pylint.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +HEAD=$(git rev-parse HEAD) +rc=0 + +pr() { + echo " ====== $* ======" | tee -a /dev/stderr +} + +# If it doesn't touch .py files, don't bother. Ignore deleted. +if ! git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -q -E "\.py$" +then + echo "No python scripts touched, skip" >&"$DESC_FD" + exit 0 +fi + +pylint --version || exit 1 + +tmpfile_o=$(mktemp) +tmpfile_n=$(mktemp) + +echo "Redirect to $tmpfile_o and $tmpfile_n" + +echo "Tree base:" +git log -1 --pretty='%h ("%s")' HEAD~ +echo "Now at:" +git log -1 --pretty='%h ("%s")' HEAD + +pr "Checking before the patch" +git checkout -q HEAD~ + +# Also ignore created, as not present in the parent commit +for f in $(git show --diff-filter=M --pretty="" --name-only "${HEAD}" | grep -E "\.py$"); do + pylint "$f" | tee -a "$tmpfile_o" +done + +incumbent=$(grep -i -c ": E[0-9][0-9][0-9][0-9]: " "$tmpfile_o") +incumbent_w=$(grep -i -c ": [WC][0-9][0-9][0-9][0-9]: " "$tmpfile_o") + +pr "Checking the tree with the patch" +git checkout -q "$HEAD" + +for f in $(git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -E "\.py$"); do + pylint "$f" | tee -a "$tmpfile_n" +done + +current=$(grep -i -c ": E[0-9][0-9][0-9][0-9]: " "$tmpfile_n") +current_w=$(grep -i -c ": [WC][0-9][0-9][0-9][0-9]: " "$tmpfile_n") + +echo "Errors before: $incumbent (+warn: $incumbent_w) this patch: $current (+warn: $current_w)" >&"$DESC_FD" + +if [ "$current" -gt "$incumbent" ]; then + echo "New errors added" 1>&2 + diff -U 0 "$tmpfile_o" "$tmpfile_n" 1>&2 + + rc=1 +elif [ "$current_w" -gt "$incumbent_w" ]; then + echo "New warnings added" 1>&2 + diff -U 0 "$tmpfile_o" "$tmpfile_n" 1>&2 + + rc=250 +fi + +rm "$tmpfile_o" "$tmpfile_n" + +exit $rc diff --git a/tests/patch/ruff/info.json b/tests/patch/ruff/info.json new file mode 100644 index 0000000..03f4d40 --- /dev/null +++ b/tests/patch/ruff/info.json @@ -0,0 +1,3 @@ +{ + "run": ["ruff.sh"] +} diff --git a/tests/patch/ruff/ruff.sh b/tests/patch/ruff/ruff.sh new file mode 100755 index 0000000..fa25fde --- /dev/null +++ b/tests/patch/ruff/ruff.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +HEAD=$(git rev-parse HEAD) +rc=0 + +pr() { + echo " ====== $* ======" | tee -a /dev/stderr +} + +# If it doesn't touch .py files, don't bother. Ignore deleted. +if ! git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -q -E "\.py$" +then + echo "No python scripts touched, skip" >&"$DESC_FD" + exit 0 +fi + +ruff --version || exit 1 + +tmpfile_o=$(mktemp) +tmpfile_n=$(mktemp) + +echo "Redirect to $tmpfile_o and $tmpfile_n" + +echo "Tree base:" +git log -1 --pretty='%h ("%s")' HEAD~ +echo "Now at:" +git log -1 --pretty='%h ("%s")' HEAD + +pr "Checking before the patch" +git checkout -q HEAD~ + +# Also ignore created, as not present in the parent commit +for f in $(git show --diff-filter=M --pretty="" --name-only "${HEAD}" | grep -E "\.py$"); do + ruff check --output-format pylint "$f" | tee -a "$tmpfile_o" +done + +incumbent=$(wc -l < "$tmpfile_o") + +pr "Checking the tree with the patch" +git checkout -q "$HEAD" + +for f in $(git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -E "\.py$"); do + ruff check --output-format pylint "$f" | tee -a "$tmpfile_n" +done + +current=$(wc -l < "$tmpfile_n") + +echo "Errors before: $incumbent ; this patch: $current" >&"$DESC_FD" + +if [ "$current" -gt "$incumbent" ]; then + echo "New errors added" 1>&2 + diff -U 0 "$tmpfile_o" "$tmpfile_n" 1>&2 + + rc=1 +fi + +rm "$tmpfile_o" "$tmpfile_n" + +exit $rc diff --git a/tests/patch/shellcheck/info.json b/tests/patch/shellcheck/info.json new file mode 100644 index 0000000..fa95e9e --- /dev/null +++ b/tests/patch/shellcheck/info.json @@ -0,0 +1,3 @@ +{ + "run": ["shellcheck.sh"] +} diff --git a/tests/patch/shellcheck/shellcheck.sh b/tests/patch/shellcheck/shellcheck.sh new file mode 100755 index 0000000..8a93c03 --- /dev/null +++ b/tests/patch/shellcheck/shellcheck.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +HEAD=$(git rev-parse HEAD) +rc=0 + +# SC2317 = unreachable code, gets confused by test case definitions +SC_FLAGS="-x -e SC2317" + +pr() { + echo " ====== $* ======" | tee -a /dev/stderr +} + +# If it doesn't touch .sh files, don't bother. Ignore deleted. +if ! git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -q -E "\.sh$" +then + echo "No shell scripts touched, skip" >&"$DESC_FD" + exit 0 +fi + +shellcheck --version || exit 1 + +tmpfile_o=$(mktemp) +tmpfile_n=$(mktemp) + +echo "Redirect to $tmpfile_o and $tmpfile_n" + +echo "Tree base:" +git log -1 --pretty='%h ("%s")' HEAD~ +echo "Now at:" +git log -1 --pretty='%h ("%s")' HEAD + +pr "Checking before the patch" +git checkout -q HEAD~ + +# Also ignore created, as not present in the parent commit +for f in $(git show --diff-filter=M --pretty="" --name-only "${HEAD}" | grep -E "\.sh$"); do + sha=$(echo "$f" | sha256sum | awk '{print $1}') + echo "Checking $f - $sha" + echo + + ( + cd "$(dirname "$f")" || exit 1 + sha="${tmpfile_o}_${sha}" + rm -f "${sha}" + shellcheck $SC_FLAGS "$(basename "$f")" | tee -a "${tmpfile_o}" "${sha}" + echo + ) +done + +# ex: SC3045 (warning): In POSIX sh, printf -v is undefined. +# severity: error, warning, info, style +incumbent=$(grep -c " (error):" "$tmpfile_o") +incumbent_w=$(grep -c " (warning):" "$tmpfile_o") + +pr "Checking the tree with the patch" +git checkout -q "$HEAD" + +declare -A files +for f in $(git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -E "\.sh$"); do + sha=$(echo "$f" | sha256sum | awk '{print $1}') + files[${sha}]="${f}" + echo "Checking $f - $sha" + echo + + ( + cd "$(dirname "$f")" || exit 1 + sha="${tmpfile_n}_${sha}" + rm -f "${sha}" + shellcheck $SC_FLAGS "$(basename "$f")" | tee -a "${tmpfile_n}" "${sha}" + echo + ) +done + +# severity: error, warning, info, style +current=$(grep -c " (error):" "$tmpfile_n") +current_w=$(grep -c " (warning):" "$tmpfile_n") + +# if a file was compliant before or is new, mark everything as error to keep it good. +for f in "${tmpfile_n}_"*; do + sha="${f:${#tmpfile_n}+1}" + fpath="${files[${sha}]}" + [ ! -s "${f}" ] && echo "${fpath} is shellcheck compliant" && continue + + old="${tmpfile_o}_${sha}" + [ -s "${old}" ] && continue # wasn't compliant + + if [ -f "${old}" ]; then + echo "${fpath} was shellcheck compliant, not anymore" 1>&2 + else + echo "${fpath} is a new file, but not shellcheck compliant" 1>&2 + fi + + extra=$(grep -c -E " \((warning|info|style)\):" "${f}") + current=$((current + extra)) +done + +echo "Errors before: $incumbent (+warn: $incumbent_w) this patch: $current (+warn: $current_w)" >&"$DESC_FD" + +if [ "$current" -gt "$incumbent" ]; then + echo "New errors added" 1>&2 + diff -U 0 "$tmpfile_o" "$tmpfile_n" 1>&2 + + rc=1 +elif [ "$current_w" -gt "$incumbent_w" ]; then + echo "New warnings added" 1>&2 + diff -U 0 "$tmpfile_o" "$tmpfile_n" 1>&2 + + rc=250 +fi + +rm "$tmpfile_o"* "$tmpfile_n"* + +exit $rc diff --git a/tests/patch/verify_fixes/info.json b/tests/patch/verify_fixes/info.json index 114c7d3..181fc6c 100644 --- a/tests/patch/verify_fixes/info.json +++ b/tests/patch/verify_fixes/info.json @@ -1,5 +1,5 @@ { - "source": "https://raw.githubusercontent.com/gregkh/gregkh-linux/master/work/verify_fixes.sh", + "source": "https://raw.githubusercontent.com/gregkh/gregkh-linux/master/work/scripts/verify_fixes.sh", "run": ["verify_fixes.sh", "HEAD~..HEAD"], "pull-requests": true } diff --git a/tests/patch/verify_signedoff/info.json b/tests/patch/verify_signedoff/info.json index e044eaa..1539d03 100644 --- a/tests/patch/verify_signedoff/info.json +++ b/tests/patch/verify_signedoff/info.json @@ -1,5 +1,5 @@ { - "source": "https://raw.githubusercontent.com/gregkh/gregkh-linux/master/work/verify_signedoff.sh", + "source": "https://raw.githubusercontent.com/gregkh/gregkh-linux/master/work/scripts/verify_signedoff.sh", "run": ["verify_signedoff.sh", "HEAD~..HEAD"], "pull-requests": true } diff --git a/tests/patch/yamllint/info.json b/tests/patch/yamllint/info.json new file mode 100644 index 0000000..1dea919 --- /dev/null +++ b/tests/patch/yamllint/info.json @@ -0,0 +1,3 @@ +{ + "run": ["yamllint.sh"] +} diff --git a/tests/patch/yamllint/yamllint.sh b/tests/patch/yamllint/yamllint.sh new file mode 100755 index 0000000..37d014c --- /dev/null +++ b/tests/patch/yamllint/yamllint.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +HEAD=$(git rev-parse HEAD) +rc=0 + +pr() { + echo " ====== $* ======" | tee -a /dev/stderr +} + +# If it doesn't touch .yaml files, don't bother. Ignore deleted. +if ! git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -q -E "\.yaml$" +then + echo "No YAML files touched, skip" >&"$DESC_FD" + exit 0 +fi + +yamllint --version || exit 1 + +tmpfile_o=$(mktemp) +tmpfile_n=$(mktemp) + +echo "Redirect to $tmpfile_o and $tmpfile_n" + +echo "Tree base:" +git log -1 --pretty='%h ("%s")' HEAD~ +echo "Now at:" +git log -1 --pretty='%h ("%s")' HEAD + +pr "Checking before the patch" +git checkout -q HEAD~ + +# Also ignore created, as not present in the parent commit +for f in $(git show --diff-filter=M --pretty="" --name-only "${HEAD}" | grep -E "\.yaml$"); do + ( + cd "$(dirname "$f")" || exit 1 + + yamllint "$(basename "$f")" | tee -a "$tmpfile_o" + ) +done + +incumbent=$(grep -i -c " error " "$tmpfile_o") +incumbent_w=$(grep -i -c " warning " "$tmpfile_o") + +pr "Checking the tree with the patch" +git checkout -q "$HEAD" + +for f in $(git show --diff-filter=AM --pretty="" --name-only "${HEAD}" | grep -E "\.yaml$"); do + ( + cd "$(dirname "$f")" || exit 1 + + yamllint "$(basename "$f")" | tee -a "$tmpfile_n" + ) +done + +current=$(grep -i -c " error " "$tmpfile_n") +current_w=$(grep -i -c " warning " "$tmpfile_n") + +echo "Errors before: $incumbent (+warn: $incumbent_w) this patch: $current (+warn: $current_w)" >&"$DESC_FD" + +if [ "$current" -gt "$incumbent" ]; then + echo "New errors added" 1>&2 + diff -U 0 "$tmpfile_o" "$tmpfile_n" 1>&2 + + rc=1 +elif [ "$current_w" -gt "$incumbent_w" ]; then + echo "New warnings added" 1>&2 + diff -U 0 "$tmpfile_o" "$tmpfile_n" 1>&2 + + rc=250 +fi + +rm "$tmpfile_o" "$tmpfile_n" + +exit $rc diff --git a/tests/series/maintainers/info.json b/tests/series/maintainers/info.json new file mode 100644 index 0000000..cce6da5 --- /dev/null +++ b/tests/series/maintainers/info.json @@ -0,0 +1,4 @@ +{ + "pymod": "test", + "pyfunc": "maintainers" +} diff --git a/tests/series/maintainers/test.py b/tests/series/maintainers/test.py new file mode 100644 index 0000000..6adb05f --- /dev/null +++ b/tests/series/maintainers/test.py @@ -0,0 +1,190 @@ +# SPDX-License-Identifier: GPL-2.0 + +""" Test if the MAINTAINERS file needs an update """ + +import os +import subprocess +from typing import Tuple + +# +# Checking for needed new MAINTAINERS entries +# + +new_file_ignore_pfx = [ 'Documentation/', 'tools/testing/'] + +def extract_files(series): + """Extract paths of new files being added by the series.""" + + new_files = set() + mod_files = set() + lines = [] + for patch in series.patches: + lines += patch.raw_patch.split("\n") + + # Walk lines, skip last since it doesn't have next + for i, line in enumerate(lines[:-1]): + next_line = lines[i + 1] + + if not next_line.startswith("+++ b/"): + continue + file_path = next_line[6:] + + # .startswith() can take a while array of alternatives + if file_path.startswith(tuple(new_file_ignore_pfx)): + continue + + if line == "--- /dev/null": + new_files.add(file_path) + else: + mod_files.add(file_path) + + # We're testing a series, same file may appear multiple times + mod_files -= new_files + return list(new_files), list(mod_files) + + +def count_files_for_maintainer_entry(tree, maintainer_entry): + """Count how many files are covered by a specific maintainer entry.""" + patterns = [] + + # Extract file patterns from the maintainer entry + for line in maintainer_entry.split("\n"): + if line.startswith("F:"): + pattern = line[2:].strip() + patterns.append(pattern) + if not patterns: + return 0 + + # Count files matching these patterns + total_files = 0 + for pattern in patterns: + if pattern[-1] == '/': + where = pattern + what = '*' + elif '/' in pattern: + where = os.path.dirname(pattern) + what = os.path.basename(pattern) + else: + where = "." + what = pattern + cmd = ["find", where, "-name", what, "-type", "f"] + result = subprocess.run(cmd, cwd=tree.path, capture_output=True, + text=True, check=False) + if result.returncode == 0: + total_files += result.stdout.count("\n") + + return total_files + + +def get_maintainer_entry_for_file(tree, file_path): + """Get the full MAINTAINERS entry for a specific file.""" + + cmd = ["./scripts/get_maintainer.pl", "--sections", file_path] + result = subprocess.run(cmd, cwd=tree.path, capture_output=True, text=True, + check=False) + + if result.returncode == 0: + return result.stdout + return "" + + +def check_maintainer_coverage(tree, new_files, out): + """Check if new files should have an MAINTAINERS entry.""" + has_miss = False + has_fail = False + has_warn = False + warnings = [] + + # Ideal entry size is <50. But if someone is adding a Kconfig file, + # chances are they should be a maintainer. + pass_target = 50 + if 'Kconfig' in new_files: + pass_target = 3 + + for file_path in new_files: + # The build files are sometimes outside of the directory covered + # by the new MAINTAINERS entry + if file_path.endswith(("/Makefile", "/Kconfig")): + continue + + out.append("\nChecking coverage for a new file: " + file_path) + + maintainer_info = get_maintainer_entry_for_file(tree, file_path) + + # This should not happen, Linus catches all + if not maintainer_info.strip(): + warnings.append(f"Failed to fetch MAINTAINERS for {file_path}") + has_warn = True + continue + + # Parse the maintainer sections + sections = [] + current_section = [] + + prev = "" + for line in maintainer_info.split("\n"): + if len(line) > 1 and line[1] == ':': + if not current_section: + current_section = [prev] + current_section.append(line) + elif len(line) < 2: + if current_section: + sections.append("\n".join(current_section)) + current_section = [] + prev = line + + if current_section: + sections.append("\n".join(current_section)) + + # Check each maintainer section + min_cnt = 999999 + for section in sections: + name = section.split("\n")[0] + # Count files for this maintainer entry + file_count = count_files_for_maintainer_entry(tree, section) + out.append(f" Section {name} covers ~{file_count} files") + + if 0 < file_count < pass_target: + out.append("PASS") + break + min_cnt = min(min_cnt, file_count) + else: + # Intel and nVidia drivers have 400+ files, just warn for these + # sort of sizes. More files than 500 means we fell down to subsystem + # level of entries. + out.append(f" MIN {min_cnt}") + has_miss = True + if min_cnt < 500: + has_warn = True + else: + has_fail = True + + if has_miss: + warnings.append("Expecting a new MAINTAINERS entry") + else: + warnings.append("MAINTAINERS coverage looks sufficient") + + ret = 0 + if has_fail: + ret = 1 + elif has_warn: + ret = 250 + + return ret, "; ".join(warnings) + + +def maintainers(tree, series, _result_dir) -> Tuple[int, str, str]: + """ Main function / entry point """ + + # Check for new files in the series + new_files, mod_files = extract_files(series) + + ret = 0 + log = ["New files:"] + new_files + ["", "Modified files:"] + mod_files + + if not new_files: + desc = "No new files, skip" + else: + ret, desc = check_maintainer_coverage(tree, new_files, log) + + return ret, desc, "\n".join(log) diff --git a/tests/series/series_format/test.py b/tests/series/series_format/test.py index 392d3a4..f5151fa 100644 --- a/tests/series/series_format/test.py +++ b/tests/series/series_format/test.py @@ -24,7 +24,9 @@ def patch_count(tree, thing, result_dir) -> Tuple[int, str]: if len(thing.patches) <= 15: return 0, "" if thing.cover_pull: - return 250, "Series longer than 15 patches" + return 250, "Series longer than 15 patches (PR)" + if thing.cover_letter: + return 1, "Series longer than 15 patches" # Really no good if there's no cover letter. return 1, "Series longer than 15 patches (and no cover letter)" diff --git a/tests/series/ynl/ynl.sh b/tests/series/ynl/ynl.sh index 77f0ef1..b70622c 100755 --- a/tests/series/ynl/ynl.sh +++ b/tests/series/ynl/ynl.sh @@ -22,7 +22,7 @@ fi ################################################################## echo " ====== 2/ Test build ======" -make -C tools/net/ynl/ hardclean +make -C tools/net/ynl/ distclean if ! make -C tools/net/ynl/ -j $ncpu 2> >(tee $tmpfile >&2); then echo "build failed;" >&$DESC_FD rc=1 @@ -42,13 +42,13 @@ echo " ====== 3/ Generate diffs for user codegen ======" mkdir $RESULTS_DIR/old-code git checkout -q $BRANCH_BASE -make -C tools/net/ynl/generated/ hardclean +make -C tools/net/ynl/generated/ distclean make -C tools/net/ynl/generated/ -j $ncpu cp tools/net/ynl/generated/*.[ch] $RESULTS_DIR/old-code/ mkdir $RESULTS_DIR/new-code git checkout -q $HEAD -make -C tools/net/ynl/generated/ hardclean +make -C tools/net/ynl/generated/ distclean make -C tools/net/ynl/generated/ -j $ncpu cp tools/net/ynl/generated/*.[ch] $RESULTS_DIR/new-code/ diff --git a/checks.html b/ui/checks.html similarity index 88% rename from checks.html rename to ui/checks.html index 603020e..b1b291d 100644 --- a/checks.html +++ b/ui/checks.html @@ -7,37 +7,16 @@ + - + +

@@ -140,7 +119,8 @@

Top 20 check outputs

Check Output - Hits + Acc + Tot
diff --git a/checks.js b/ui/checks.js similarity index 93% rename from checks.js rename to ui/checks.js index 7d2c949..bc3848f 100644 --- a/checks.js +++ b/ui/checks.js @@ -217,18 +217,19 @@ function load_outputs(data) var top_out = []; var top_out_cnt = {}; $.each(data, function(i, v) { - if (v.result != "success") { - if (top_out_cnt[v.description]) { - top_out_cnt[v.description]++; - } else { - top_out.push(v); - top_out_cnt[v.description] = 1; - } + if (v.result == "success") + return 1; + + if (!(v.description in top_out_cnt)) { + top_out.push(v); + top_out_cnt[v.description] = {true: 0, false: 0}; } + + top_out_cnt[v.description][v.state == "accepted"]++; }); top_out.sort(function(a, b) { - return top_out_cnt[b.description] - top_out_cnt[a.description]; + return top_out_cnt[b.description][true] - top_out_cnt[a.description][true]; }); for (let i = 0; i < 20; i++) { @@ -237,11 +238,13 @@ function load_outputs(data) var row = table.insertRow(); var check = row.insertCell(0); var output = row.insertCell(1); - var hits = row.insertCell(2); + var a_hits = row.insertCell(2); + var t_hits = row.insertCell(3); - check.innerHTML = v.check; - output.innerHTML = v.description; - hits.innerHTML = top_out_cnt[v.description]; + check.innerText = v.check; + output.innerText = v.description; + a_hits.innerText = top_out_cnt[v.description][true]; + t_hits.innerText = top_out_cnt[v.description][false]; } } @@ -402,6 +405,9 @@ function run_it(data_raw) function do_it() { + /* + * Please remember to keep these assets in sync with `scripts/ui_assets.sh` + */ $(document).ready(function() { $.get("static/nipa/checks.json", run_it) }); diff --git a/contest/contest.html b/ui/contest.html similarity index 64% rename from contest/contest.html rename to ui/contest.html index fa437d4..3cfee4d 100644 --- a/contest/contest.html +++ b/ui/contest.html @@ -2,7 +2,7 @@ - PW status + PW contest @@ -10,41 +10,36 @@ + + - -
+ +
+
+
+ Loading: + +
+ +
 
+ +
+ +
 
+ + +
Filtering: -
-
+
+
@@ -62,7 +57,7 @@
-
+
-
+
@@ -96,11 +91,26 @@
+
+ +

+

+ Update URL +

+
-
+
Loading...
+
@@ -113,6 +123,9 @@ Group Test Result + Retry + Time + Links
diff --git a/ui/contest.js b/ui/contest.js new file mode 100644 index 0000000..9670e10 --- /dev/null +++ b/ui/contest.js @@ -0,0 +1,385 @@ +function colorify_str(value) +{ + if (value == "pass") { + ret = ''; + } else if (value == "skip") { + ret = ''; + } else { + ret = ''; + } + return ret + value + ''; +} + +function sort_results(rows) +{ + for (const sort_key of nipa_sort_keys) { + let sort_ord = nipa_sort_get(sort_key); + + if (sort_key === "date") { + rows.sort(function(a, b) { + return sort_ord * (b.v.end - a.v.end); + }); + } else if (sort_key === "time") { + rows.sort(function(a, b) { + if (a.r[sort_key] === undefined && b.r[sort_key] === undefined) + return 0; + if (a.r[sort_key] === undefined) + return 1; + if (b.r[sort_key] === undefined) + return -1; + return sort_ord * (b.r[sort_key] - a.r[sort_key]); + }); + } else { + rows.sort(function(a, b) { + return sort_ord * (b.r[sort_key] < a.r[sort_key] ? 1 : -1); + }); + } + } +} + +function load_result_table(data_raw) +{ + var table = document.getElementById("results"); + var result_filter = { + "pass": document.getElementById("pass").checked, + "skip": document.getElementById("skip").checked, + "warn": document.getElementById("warn").checked, + "fail": document.getElementById("fail").checked + }; + var branch_filter = document.getElementById("branch").value; + var exec_filter = document.getElementById("executor").value; + var remote_filter = document.getElementById("remote").value; + var test_filter = document.getElementById("test").value; + var pw_n = document.getElementById("pw-n").checked; + var pw_y = document.getElementById("pw-y").checked; + + // Remove all rows but first (leave headers) + $("#results tr").slice(1).remove(); + + let warn_box = document.getElementById("fl-warn-box"); + warn_box.innerHTML = ""; + + let form = ""; + if (document.getElementById("ld-cases").checked) + form = "&ld-cases=1"; + + let rows = []; + let total_results = 0; + let filtered_results = 0; + + $.each(data_raw, function(i, v) { + if (rows.length >= 5000) { + warn_box.innerHTML = "Reached 5000 rows. Set an executor, branch or test filter. Otherwise this page will set your browser on fire..."; + return 0; + } + + let branch_matches = !branch_filter || branch_filter == v.branch; + let exec_matches = !exec_filter || exec_filter == v.executor; + let remote_matches = !remote_filter || remote_filter == v.remote; + + $.each(v.results, function(j, r) { + total_results++; + + if (!branch_matches || !exec_matches || !remote_matches) + return 1; + + if (test_filter && r.test != test_filter) + return 1; + if (result_filter[r.result] == false) + return 1; + if (pw_y == false && nipa_pw_reported(v, r) == true) + return 1; + if (pw_n == false && nipa_pw_reported(v, r) == false) + return 1; + + filtered_results++; + rows.push({"v": v, "r": r}); + }); + }); + + // Display filtering information + let filter_info_elem = document.getElementById("filter-info"); + if (total_results > 0) { + let filtered_out = total_results - filtered_results; + if (filtered_out > 0) { + filter_info_elem.innerHTML = `${total_results} results
(${filtered_out} filtered out)`; + } else { + filter_info_elem.innerHTML = `${total_results} results`; + } + } else { + filter_info_elem.innerHTML = ""; + } + + // Trim the time, so that sort behavior matches what user sees + for (const result of rows) { + if (result.r.time) + result.r.time = Math.round(result.r.time); + } + + sort_results(rows); + + for (const result of rows) { + const r = result.r; + const v = result.v; + + var row = table.insertRow(); + + var date = row.insertCell(0); + var branch = row.insertCell(1); + var remote = row.insertCell(2); + var exe = row.insertCell(3); + var group = row.insertCell(4); + var test = row.insertCell(5); + var res = row.insertCell(6); + let row_id = 7; + var retry = row.insertCell(row_id++); + var time = row.insertCell(row_id++); + var outputs = row.insertCell(row_id++); + var flake = row.insertCell(row_id++); + var hist = row.insertCell(row_id++); + + date.innerHTML = v.end.toLocaleString(); + branch.innerHTML = "" + v.branch + ""; + remote.innerHTML = v.remote; + exe.innerHTML = v.executor; + group.innerHTML = r.group; + test.innerHTML = "" + r.test + ""; + if ("retry" in r) + retry.innerHTML = colorify_str(r.retry); + if ("time" in r) + time.innerHTML = nipa_msec_to_str(r.time * 1000); + res.innerHTML = colorify_str(r.result); + outputs.innerHTML = "outputs"; + hist.innerHTML = "history"; + flake.innerHTML = "matrix"; + } +} + +function find_branch_urls(loaded_data) +{ + $.each(loaded_data, function(i, v) { + if (v.remote == "brancher") + branch_urls[v.branch] = v.results[0].link; + }); +} + +function results_update() +{ + load_result_table(loaded_data); +} + +let xfr_todo = 2; +let branch_urls = {}; +let loaded_data = null; + +function reload_select_filters(first_load) +{ + let old_values = new Object(); + + // Save old values before we wipe things out + for (const elem_id of ["branch", "executor", "remote"]) { + var elem = document.getElementById(elem_id); + old_values[elem_id] = elem.value; + } + + // Wipe the options and re-add + $("select option").remove(); + + // We have all JSONs now, do processing. + nipa_filter_add_options(loaded_data, "branch", "branch"); + nipa_filter_add_options(loaded_data, "executor", "executor"); + nipa_filter_add_options(loaded_data, "remote", "remote"); + + // On first load we use URL, later we try to keep settings user tweaked + if (first_load) + nipa_filters_set_from_url(); + + for (const elem_id of ["branch", "executor", "remote"]) { + var elem = document.getElementById(elem_id); + + if (!first_load) + elem.value = old_values[elem_id]; + if (elem.selectedIndex == -1) + elem.selectedIndex = 0; + } +} + +function loaded_one() +{ + if (--xfr_todo) + return; + + let headers = document.getElementsByTagName("th"); + for (const th of headers) { + th.addEventListener("click", nipa_sort_key_set); + } + reload_select_filters(true); + nipa_filters_enable(reload_data, "ld-pw"); + nipa_filters_enable(results_update, "fl-pw"); + + results_update(); +} + +function filters_loaded(data_raw) +{ + nipa_set_filters_json(data_raw); + loaded_one(); +} + +function results_loaded(data_raw) +{ + $.each(data_raw, function(i, v) { + v.start = new Date(v.start); + v.end = new Date(v.end); + }); + data_raw.sort(function(a, b){return b.end - a.end;}); + + find_branch_urls(data_raw); + + const had_data = loaded_data; + loaded_data = data_raw; + if (!had_data) { + loaded_one(); + } else if (!xfr_todo) { + reload_select_filters(false); + results_update(); + } + + nipa_filters_enable(null, ["ld-pw", "fl-pw"]); +} + +function reload_data(event) +{ + const format_l2 = document.getElementById("ld-cases"); + const br_cnt = document.getElementById("ld_cnt"); + const br_name = document.getElementById("ld_branch"); + + if (event) { + if (event.target == br_name) + br_cnt.value = 1; + else if (event.target == br_cnt) + br_name.value = ""; + } + + let req_url = "query/results?"; + if (br_name.value) { + req_url += "branch-name=" + br_name.value; + } else { + req_url += "branches=" + br_cnt.value; + } + if (format_l2.checked) + req_url += '&format=l2'; + + nipa_filters_disable(["ld-pw", "fl-pw"]); + $(document).ready(function() { + $.get(req_url, results_loaded) + }); +} + +function update_url_from_filters() +{ + const result_filter = { + "pass": document.getElementById("pass").checked, + "skip": document.getElementById("skip").checked, + "warn": document.getElementById("warn").checked, + "fail": document.getElementById("fail").checked + }; + const branch_filter = document.getElementById("branch").value; + const exec_filter = document.getElementById("executor").value; + const remote_filter = document.getElementById("remote").value; + const test_filter = document.getElementById("test").value; + const pw_n = document.getElementById("pw-n").checked; + const pw_y = document.getElementById("pw-y").checked; + const ld_cases = document.getElementById("ld-cases").checked; + + // Create new URL with current filters + const currentUrl = new URL(window.location.href); + + // Clear existing filter parameters + const filterParams = ['pass', 'skip', 'warn', 'fail', 'branch', 'executor', + 'remote', 'test', 'pw-n', 'pw-y', 'ld-cases']; + filterParams.forEach(param => currentUrl.searchParams.delete(param)); + + // Add current filter states to URL + if (!result_filter.pass) + currentUrl.searchParams.set('pass', '0'); + if (!result_filter.skip) + currentUrl.searchParams.set('skip', '0'); + if (!result_filter.warn) + currentUrl.searchParams.set('warn', '0'); + if (!result_filter.fail) + currentUrl.searchParams.set('fail', '0'); + + if (branch_filter) + currentUrl.searchParams.set('branch', branch_filter); + if (exec_filter) + currentUrl.searchParams.set('executor', exec_filter); + if (remote_filter) + currentUrl.searchParams.set('remote', remote_filter); + if (test_filter) + currentUrl.searchParams.set('test', test_filter); + + if (!pw_n) + currentUrl.searchParams.set('pw-n', '0'); + if (!pw_y) + currentUrl.searchParams.set('pw-y', '0'); + + if (ld_cases) + currentUrl.searchParams.set('ld-cases', '1'); + + // Update the browser URL without reloading the page + window.history.pushState({}, '', currentUrl.toString()); +} + +function embedded_mode() { + $('#loading-fieldset').hide(); + $('#sitemap').hide(); + + $('#open-full-page').show(); + + // Set up click handler for the "Open in full page" link + $('#open-full-page-link').on('click', function(e) { + e.preventDefault(); + + // Create a new URL without the embed parameter + const currentUrl = new URL(window.location.href); + currentUrl.searchParams.delete('embed'); + + // Open in a new tab + window.open(currentUrl.toString(), '_blank'); + }); +} + +function do_it() +{ + const urlParams = new URLSearchParams(window.location.search); + + // embed=1 means we in an iframe in another page, hide navigation + if (urlParams.get("embed") === "1") { + embedded_mode(); + } + + nipa_input_set_from_url("https://github.com/ld-pw"); + /* The filter is called "branch" the load selector is called "ld_branch" + * auto-copy will not work, but we want them to match, initially. + */ + if (urlParams.get("branch")) { + document.getElementById("ld_branch").value = urlParams.get("branch"); + document.getElementById("ld_cnt").value = 1; + } + + $('#update-url-button').on('click', function (e) { + e.preventDefault(); + update_url_from_filters(); + }); + + nipa_sort_cb = results_update; + + /* + * Please remember to keep these assets in sync with `scripts/ui_assets.sh` + */ + $(document).ready(function() { + $.get("contest/filters.json", filters_loaded) + }); + reload_data(null); +} diff --git a/ui/devices.html b/ui/devices.html new file mode 100644 index 0000000..0a58807 --- /dev/null +++ b/ui/devices.html @@ -0,0 +1,48 @@ + + + + + NIPA device tests + + + + + + + + + + +
+
+

Device test results

+
+

Starting with Linux v6.12 (October 2024) all officially supported NIC drivers in Linux are required to be continuously tested.

+

See the announcement on the mailing list.

+
+
+
+
+

Latest device info

+ +
+
+
+
+
+

Test case status

+ +
+
+
+
+
+

Old test cases (no reports for 2 weeks+)

+ +
+
+
+ + diff --git a/ui/devices.js b/ui/devices.js new file mode 100644 index 0000000..56fa0b2 --- /dev/null +++ b/ui/devices.js @@ -0,0 +1,136 @@ +let xfr_todo = 2; +let dev_info = null; +let stability = null; + +function load_tables() +{ + // Turn stability into matrix by executor + let rn_seen = new Set(); + let tn_db = []; + let sta_db = {}; + // Test age + let tn_time = {}; + let year_ago = new Date(); + year_ago.setFullYear(year_ago.getFullYear() - 1); + + for (ste of stability) { + let tn = ste.grp + ':' + ste.test + ':' + ste.subtest; + if (ste.subtest == null) + tn = ste.grp + ':' + ste.test + ':'; + let rn = ste.remote + ste.executor; + + if (!(tn in sta_db)) { + sta_db[tn] = {}; + tn_db.push(tn); + tn_time[tn] = year_ago; + } + + sta_db[tn][rn] = ste; + rn_seen.add(rn); + let d = new Date(ste.last_update); + if (d > tn_time[tn]) + tn_time[tn] = d; + } + + // Simple sort by name + tn_db.sort(); + + // Render device info + let display_names = {}; + let dev_table = document.getElementById("device_info"); + + for (dev of dev_info) { + let rn = dev.remote + dev.executor; + if (!rn_seen.has(rn)) + continue; + + let row = dev_table.insertRow(); + + row.insertCell(0).innerText = dev.remote; + row.insertCell(1).innerText = dev.executor; + + const info = JSON.parse(dev.info); + const driver = info.driver; + row.insertCell(2).innerText = driver; + + delete info.driver; + const versions = JSON.stringify(info); + row.insertCell(3).innerText = versions; + + display_names[dev.remote + dev.executor] = + dev.remote + '
' + dev.executor + '
' + driver; + } + + // Create headers + let sta_tb = document.getElementById("stability"); + let sta_to = document.getElementById("stability-old"); + + for (tbl of [sta_tb, sta_to]) { + const hdr = tbl.createTHead().insertRow(); + hdr.insertCell().innerText = 'Group'; + hdr.insertCell().innerText = 'Test'; + hdr.insertCell().innerText = 'Subtest'; + for (rn of Object.keys(display_names)) { + let cell = hdr.insertCell(); + + cell.innerHTML = display_names[rn]; + cell.setAttribute("style", "writing-mode: tb-rl;"); + } + } + + // Display + let two_weeks_ago = new Date().setDate(new Date().getDate() - 14); + + for (tn of tn_db) { + let row = null; + + if (tn_time[tn] > two_weeks_ago) + row = sta_tb.insertRow(); + else + row = sta_to.insertRow(); + + row.insertCell(0).innerText = tn.split(':')[0]; + row.insertCell(1).innerText = tn.split(':')[1]; + let cell = row.insertCell(2); + if (tn.split(':').length == 3) + cell.innerText = tn.split(':')[2]; + + let i = 3; + for (rn of Object.keys(display_names)) { + cell = row.insertCell(i++); + if (rn in sta_db[tn]) { + let ste = sta_db[tn][rn]; + + pct = 100 * ste.pass_cnt / (ste.fail_cnt + ste.pass_cnt); + pct = Math.round(pct); + if (ste.passing) { + cell.setAttribute("class", "box-pass"); + if (pct != 100) + cell.innerText = pct + "%"; + } else { + cell.setAttribute("class", "box-skip"); + if (pct != 0) + cell.innerText = pct + "%"; + } + } + } + } +} + +function do_it() +{ + $(document).ready(function() { + $.get("query/device-info", function(data_raw) { + dev_info = data_raw; + if (!--xfr_todo) + load_tables(); + }) + }); + $(document).ready(function() { + $.get("query/stability?auto=1", function(data_raw) { + stability = data_raw; + if (!--xfr_todo) + load_tables(); + }) + }); +} diff --git a/contest/favicon-contest.png b/ui/favicon-contest.png similarity index 100% rename from contest/favicon-contest.png rename to ui/favicon-contest.png diff --git a/ui/favicon-flakes.png b/ui/favicon-flakes.png new file mode 100644 index 0000000..5934fdf Binary files /dev/null and b/ui/favicon-flakes.png differ diff --git a/ui/favicon-nic.png b/ui/favicon-nic.png new file mode 100644 index 0000000..bf34166 Binary files /dev/null and b/ui/favicon-nic.png differ diff --git a/ui/favicon-stats.png b/ui/favicon-stats.png new file mode 100644 index 0000000..35c4491 Binary files /dev/null and b/ui/favicon-stats.png differ diff --git a/ui/favicon-status.png b/ui/favicon-status.png new file mode 100644 index 0000000..322488e Binary files /dev/null and b/ui/favicon-status.png differ diff --git a/contest/flakes.html b/ui/flakes.html similarity index 55% rename from contest/flakes.html rename to ui/flakes.html index fa2c2f7..98dd6a3 100644 --- a/contest/flakes.html +++ b/ui/flakes.html @@ -10,62 +10,53 @@ + + - -
+ +
+
+
+ Loading: +
 
+ +
+ +
 
+ + +
 
+ + +
 
+ + +
Filtering: -
-
+
+
-
+
-
- -
-
+

-
+
Sort: @@ -78,12 +69,27 @@
+

+ Update URL +

-
+
Loading...
+
+
+ + + + + + + +
pass
skip
fail
flake
+
+
diff --git a/ui/flakes.js b/ui/flakes.js new file mode 100644 index 0000000..e92c435 --- /dev/null +++ b/ui/flakes.js @@ -0,0 +1,305 @@ +function colorify(cell, value) +{ + if (value == "pass" || value == "skip" || + value == "fail" || value == "flake") + cell.setAttribute("class", "box-" + value); +} + +function get_sort_key() +{ + if (document.getElementById("sort-streak").checked) + return "streak"; + return "cnt"; +} + +var branch_pfx_set = new Set(); + +function load_result_table(data_raw) +{ + // Get all branch names + var branch_set = new Set(); + $.each(data_raw, function(i, v) { + branch_set.add(v.branch); + }); + + // Populate the load filters with prefixes + let select_br_pfx = document.getElementById("br-pfx"); + for (const br of branch_set) { + const br_pfx = nipa_br_pfx_get(br); + + if (select_br_pfx.length == 0) + nipa_select_add_option(select_br_pfx, "-- all --", ""); + if (branch_pfx_set.has(br_pfx)) + continue; + nipa_select_add_option(select_br_pfx, br_pfx, br_pfx); + branch_pfx_set.add(br_pfx); + } + + // Annotate which results will be visible + var pw_n = document.getElementById("pw-n").checked; + var pw_y = document.getElementById("pw-y").checked; + let needle = document.getElementById("tn-needle").value; + let br_pfx_with_data = new Set(); + + $.each(data_raw, function(i, v) { + $.each(v.results, function(j, r) { + r.visible = false; + + if (pw_y == false && nipa_pw_reported(v, r) == true) + return 1; + if (pw_n == false && nipa_pw_reported(v, r) == false) + return 1; + + const tn = v.remote + '/' + r.group + '/' + r.test; + if (needle && !tn.includes(needle)) + return 1; + + r.visible = true; + + const br_pfx = nipa_br_pfx_get(v.branch); + br_pfx_with_data.add(br_pfx); + }); + }); + + // Hide all the branches with prefixes which saw no data + let br_cnt = document.getElementById("br-cnt").value; + var branches = Array.from(branch_set); + branches = branches.filter( + (name) => br_pfx_with_data.has(nipa_br_pfx_get(name)) + ); + branches = branches.slice(0, br_cnt); + + // Build the result map + var test_row = {}; + let tn_urls = {}; + + $.each(data_raw, function(i, v) { + $.each(v.results, function(j, r) { + if (!r.visible) + return 1; + + const tn = v.remote + '/' + r.group + '/' + r.test; + tn_urls[tn] = "executor=" + v.executor + "&test=" + r.test; + + if (!(tn in test_row)) { + test_row[tn] = {}; + for (let i = 1; i <= branches.length; i++) + test_row[tn][branches[i - 1]] = ""; + } + test_row[tn][v.branch] = r.result; + if (r.result == "fail" && r.retry == "pass") + test_row[tn][v.branch] = "flake"; + }); + }); + + // Sort from most to least flaky + for (const [tn, entries] of Object.entries(test_row)) { + let count = 0, streak = 0, total = 0; + let prev = "pass"; + + for (let i = 0; i < branches.length; i++) { + let current = entries[branches[i]]; + + if (current != "") + total++; + + if (current == "pass" && count == 0) + streak++; + + if (current != "" && current != prev) { + prev = current; + count++; + } + } + test_row[tn]["total"] = total; + test_row[tn]["cnt"] = count; + test_row[tn]["streak"] = streak; + } + + // Filter out those not flaky enough to show + var min_flip = document.getElementById("min-flip").value; + let test_names = Array.from(Object.keys(test_row)); + test_names = test_names.filter(function(a){return test_row[a].cnt >= min_flip;}); + // Sort by the right key + var sort_key = get_sort_key(); + test_names.sort( + function(a, b) { return test_row[b][sort_key] - test_row[a][sort_key]; } + ); + + // Remove all rows but first (leave headers) + $("#results tr").remove(); + // Display + let table = document.getElementById("results"); + + let header = table.insertRow(); + header.insertCell(0); // name + for (let i = 0; i < branches.length; i++) { + let cell = header.insertCell(i + 1); + cell.innerHTML = branches[i]; + cell.setAttribute("style", "writing-mode: tb-rl; font-size: 0.8em; padding: 0px;"); + } + + let form = ""; + if (document.getElementById("ld-cases").checked) + form = "&ld-cases=1"; + for (const tn of test_names) { + let entries = test_row[tn]; + + if (entries.total == 0) + continue; + + let row = table.insertRow(); + let name = row.insertCell(0); + name.innerHTML = "" + tn + ""; + name.setAttribute("style", "padding: 0px"); + + for (let i = 0; i < branches.length; i++) { + let cell = row.insertCell(i + 1); + colorify(cell, entries[branches[i]]); + } + } +} + +function results_update() +{ + load_result_table(loaded_data); +} + +let xfr_todo = 3; +let loaded_data = null; + +function loaded_one() +{ + if (--xfr_todo) + return; + + // We have all JSONs now, do processing. + nipa_input_set_from_url("https://github.com/fl-pw"); + results_update(); +} + +function filters_loaded(data_raw) +{ + nipa_set_filters_json(data_raw); + loaded_one(); +} + +function results_loaded(data_raw) +{ + $.each(data_raw, function(i, v) { + v.start = new Date(v.start); + v.end = new Date(v.end); + }); + data_raw.sort(function(a, b){return b.end - a.end;}); + + const had_data = loaded_data; + loaded_data = data_raw; + if (!had_data) { + loaded_one(); + } else if (!xfr_todo) { + results_update(); + } + + nipa_filters_enable(null, ["ld-pw", "fl-pw"]); +} + +function remotes_loaded(data_raw) +{ + nipa_filter_add_options(data_raw, "ld-remote", null); + loaded_one(); +} + +function update_url_from_filters() +{ + const tn_needle = document.getElementById("tn-needle").value; + const min_flip = document.getElementById("min-flip").value; + const pw_n = document.getElementById("pw-n").checked; + const pw_y = document.getElementById("pw-y").checked; + const sort_streak = document.getElementById("sort-streak").checked; + const br_cnt = document.getElementById("br-cnt").value; + const br_pfx = document.getElementById("br-pfx").value; + const ld_remote = document.getElementById("ld-remote").value; + const ld_cases = document.getElementById("ld-cases").checked; + + // Create new URL with current filters + const currentUrl = new URL(window.location.href); + + // Clear existing filter parameters + const filterParams = ['tn-needle', 'min-flip', 'pw-n', 'pw-y', 'sort-flips', + 'sort-streak', 'br-cnt', 'br-pfx', + 'ld-remote', 'ld-cases']; + filterParams.forEach(param => currentUrl.searchParams.delete(param)); + + // Add current filter states to URL + if (tn_needle) + currentUrl.searchParams.set('tn-needle', tn_needle); + if (min_flip && min_flip !== '1') + currentUrl.searchParams.set('min-flip', min_flip); + + if (!pw_n) + currentUrl.searchParams.set('pw-n', '0'); + if (!pw_y) + currentUrl.searchParams.set('pw-y', '0'); + + if (sort_streak) + currentUrl.searchParams.set('sort-streak', '1'); + + if (br_cnt && br_cnt !== '100') + currentUrl.searchParams.set('br-cnt', br_cnt); + if (br_pfx) + currentUrl.searchParams.set('br-pfx', br_pfx); + if (ld_remote) + currentUrl.searchParams.set('ld-remote', ld_remote); + + if (ld_cases) + currentUrl.searchParams.set('ld-cases', '1'); + + // Update the browser URL without reloading the page + window.history.pushState({}, '', currentUrl.toString()); +} + +function reload_data() +{ + const format_l2 = document.getElementById("ld-cases"); + const br_cnt = document.getElementById("br-cnt"); + const br_pfx = document.getElementById("br-pfx"); + const remote = document.getElementById("ld-remote"); + + let req_url = "query/results"; + req_url += "?branches=" + br_cnt.value; + + if (format_l2.checked) + req_url += "&format=l2"; + if (remote.value) + req_url += "&remote=" + remote.value; + if (br_pfx.value) + req_url += "&br-pfx=" + br_pfx.value; + + nipa_filters_disable(["ld-pw", "fl-pw"]); + $(document).ready(function() { + $.get(req_url, results_loaded) + }); +} + +function do_it() +{ + nipa_filters_enable(reload_data, "ld-pw"); + nipa_filters_enable(results_update, "fl-pw"); + nipa_input_set_from_url("https://github.com/ld-pw"); + + $('#update-url-button').on('click', function (e) { + e.preventDefault(); + update_url_from_filters(); + }); + + /* + * Please remember to keep these assets in sync with `scripts/ui_assets.sh` + */ + $(document).ready(function() { + $.get("contest/filters.json", filters_loaded) + }); + $(document).ready(function() { + $.get("query/remotes", remotes_loaded) + }); + reload_data(); +} diff --git a/ui/nipa.css b/ui/nipa.css new file mode 100644 index 0000000..5c52d37 --- /dev/null +++ b/ui/nipa.css @@ -0,0 +1,126 @@ +body { + font-family: "roboto mono", helvetica, nunito; +} + +table { + border-collapse: collapse; + width: 100%; +} + +td, th { + border: 1px solid #eeeeee; + text-align: left; + padding: 8px; +} + +tr:nth-child(even) { + background-color: #eeeeee; +} + +.summary-row td { + border-width: 1px 1px 6px 1px; + border-color: white; + text-align: right; + font-style: italic; +} + +.column-sorted { + background-color: #d0d0d0; +} + +.box-pass { background-color: green; } +.box-skip { background-color: royalblue; } +.box-flake { background-color: red; } +.box-fail { background-color: #d06060; } + +@media screen and (max-resolution: 250dpi) { + .row { + display: flex; + } + + .column { + flex: 50%; + padding: 1em; + } +} + +/* layout inside fieldsets even on small screens */ +.row-small { + display: flex; +} +.column-small { + flex: 50%; + padding: 1em; +} + +.box { + position: absolute; + right: 1em; +} + +#contest-filters { + margin: 1em; + padding: 1em; + border: solid grey 1px; +} + +.nipa-button { + background-color: #0366d6; + border: 1px solid #0366d6; + border-radius: 3px; + padding: 5px 10px; + cursor: pointer; + font-size: 0.9em; + color: white; + text-decoration: none; + display: inline-block; + transition: all 0.2s ease; +} + +.nipa-button:hover { + background-color: #0056b3; + text-decoration: none; +} + +.nipa-button:active { + background-color: #004494; +} + +@media (prefers-color-scheme: dark) { + body { + color: #b8b8b8; + background: #1c1c1c; + } + canvas { + background-color: #303030; + } + a { + color: #809fff; + } + tr, th, td { + border-color: #181818; + } + tr:nth-child(even) { + background-color: #282828; + } + tr:nth-child(odd) { + background-color: #303030; + } + .summary-row td { + border-color: #202020; + } + .column-sorted { + background-color: #484848; + } + .nipa-button { + background-color: #2c5282; + border-color: #2c5282; + color: #e2e8f0; + } + .nipa-button:hover { + background-color: #2b4c7e; + } + .nipa-button:active { + background-color: #1e3a5f; + } +} diff --git a/ui/nipa.js b/ui/nipa.js new file mode 100644 index 0000000..1f60ecb --- /dev/null +++ b/ui/nipa.js @@ -0,0 +1,232 @@ +function nipa_msec_to_str(msec) { + const convs = [ + [1, "ms"], + [1000, "s"], + [60, "m"], + [60, "h"], + [24, "d"], + [7, "w"] + ]; + + if (msec <= 0) + return msec.toString(); + + for (i = 0; i < convs.length; i++) { + if (msec < convs[i][0]) { + var full = Math.floor(msec) + convs[i - 1][1]; + if (i > 1) { + var frac = Math.round(msec * convs[i - 1][0] % convs[i - 1][0]); + if (frac) + full += " " + frac + convs[i - 2][1]; + } + return full; + } + msec /= convs[i][0]; + } + + return "TLE"; +} + +function nipa_br_pfx_get(name) +{ + return name.substring(0, name.length - 18); +} + +function nipa_test_fullname(v, r) +{ + return v.remote + "/" + v.executor + "/" + r.group + "/" + r.test; +} + +function __nipa_filters_set(update_cb, set_name, enabled) +{ + if (set_name.constructor === Array) { + for (name of set_name) + __nipa_filters_set(update_cb, name, enabled); + return; + } + + const fl_pw = document.querySelectorAll("[name=" + set_name + "]"); + for (const one of fl_pw) { + if (update_cb) + one.addEventListener("change", update_cb); + one.disabled = enabled; + } +} + +function nipa_filters_enable(update_cb, set_name) +{ + let warn_box = document.getElementById("fl-warn-box"); + warn_box.innerHTML = ""; + + __nipa_filters_set(update_cb, set_name, false); +} + +function nipa_filters_disable(set_name) +{ + let warn_box = document.getElementById("fl-warn-box"); + warn_box.innerHTML = "Loading..."; + + __nipa_filters_set(null, set_name, true); +} + +function nipa_input_set_from_url(https://github.com/name) +{ + const urlParams = new URLSearchParams(window.location.search); + const filters = document.querySelectorAll("[name="+ name + "]"); + + for (const elem of filters) { + let url_val = urlParams.get(elem.id); + + if (!url_val) + continue; + + if (elem.hasAttribute("checked") || + elem.type == "radio" || elem.type == "checkbox") { + if (url_val == "0") + elem.checked = false; + else if (url_val == "1") + elem.checked = true; + } else if (elem.type == "select-one") { + let option = elem.querySelector('[value="' + url_val + '"]'); + + if (!option) { + const opt = document.createElement('option'); + opt.value = url_val; + opt.innerHTML = url_val; + opt.setAttribute("style", "display: none;"); + elem.appendChild(opt); + } + elem.value = url_val; + } else { + elem.value = url_val; + } + } +} + +function nipa_filters_set_from_url() +{ + nipa_input_set_from_url("https://github.com/fl-pw"); +} + +function nipa_select_add_option(select_elem, show_str, value) +{ + const opt = document.createElement('option'); + opt.value = value; + opt.innerHTML = show_str; + select_elem.appendChild(opt); +} + +function nipa_filter_add_options(data_raw, elem_id, field) +{ + var elem = document.getElementById(elem_id); + var values = new Set(); + + // Re-create "all" + nipa_select_add_option(elem, "-- all --", ""); + + // Create the dynamic entries + $.each(data_raw, function(i, v) { + if (field) + values.add(v[field]); + else + values.add(v); + }); + for (const value of values) { + nipa_select_add_option(elem, value, value); + } +} + +// ------------------ + +let nipa_filters_json = null; + +function nipa_set_filters_json(filters_json) +{ + nipa_filters_json = filters_json; +} + +// v == result info, r == particular result / test case +function nipa_pw_reported(v, r) +{ + for (const filter of nipa_filters_json["ignore-results"]) { + if (!("remote" in filter) || filter.remote == v.remote) { + if (!("executor" in filter) || filter.executor == v.executor) { + if (!("branch" in filter) || filter.branch == v.branch) { + if (!("group" in filter) || filter.group == r.group) { + if (!("test" in filter) || filter.test == r.test) { + return false; + } + } + } + } + } + } + + return true; +} + +function nipa_load_sitemap() +{ + $(document).ready(function() { + $("#sitemap").load("/sitemap.html") + }); +} + +// ------------------ + +var nipa_sort_cb = null; +let nipa_sort_keys = []; +let nipa_sort_polarity = []; + +function nipa_sort_key_set(event) +{ + let elem = event.target; + let what = elem.innerText.toLowerCase().replace(/[^a-z0-9]/g, ''); + const index = nipa_sort_keys.indexOf(what); + let polarity = 1; + + if (index != -1) { + polarity = nipa_sort_polarity[index]; + + // if it's the main sort key invert direction, otherwise we're changing + // order of keys but not their direction + let main_key = index == nipa_sort_keys.length - 1; + if (main_key) + polarity *= -1; + + // delete it + nipa_sort_keys.splice(index, 1); + nipa_sort_polarity.splice(index, 1); + elem.innerText = elem.innerText.slice(0, -2); + + // We flipped back to normal polarity, that's a reset + if (main_key && polarity == 1) { + elem.classList.remove('column-sorted'); + nipa_sort_cb(); + return; + } + } else { + elem.classList.add('column-sorted'); + } + + if (polarity == 1) { + elem.innerHTML = elem.innerText + " ⯆"; + } else { + elem.innerHTML = elem.innerText + " ⯅"; + } + + // add it + nipa_sort_keys.push(what); + nipa_sort_polarity.push(polarity); + + nipa_sort_cb(); +} + +function nipa_sort_get(what) +{ + const index = nipa_sort_keys.indexOf(what); + + if (index == -1) + return 0; + return nipa_sort_polarity[index]; +} diff --git a/ui/sitemap.html b/ui/sitemap.html new file mode 100644 index 0000000..2ce877a --- /dev/null +++ b/ui/sitemap.html @@ -0,0 +1,7 @@ + diff --git a/status.html b/ui/status.html similarity index 53% rename from status.html rename to ui/status.html index 1b977af..c4de7e0 100644 --- a/status.html +++ b/ui/status.html @@ -7,67 +7,42 @@ + + + +
-

Processing times (by check post time)

- -
-
-

Processing times (by patch post time)

- -
-
-
-
+

Build processing

+ + + + + + + + + + +
TreeQlenLastTidTestPidPatch
+
@@ -77,20 +52,24 @@

Processing times (by patch post time)

Service Memory Use
-
- +
+

Flakiest tests

+
- - - - + + + +
TreeQlenPidPatchRemoteExecutor TestFlakes (by week: this, 1, 2, 3 ago)Ignored
-
-
+

Continuous testing results

+
+ + +
@@ -100,6 +79,44 @@

Processing times (by patch post time)

Branch Result
+
+

Recent failures

+ + + + + + + + +
BranchRemoteTestResultRetry
+
+

Recent crashes

+ + + + + +
TestCrashes
+
+

Tests with missing results

+ + + + + +
Test# missing
+
+
+
+
+ +
+
+ +
+
+
@@ -107,11 +124,11 @@

Processing times (by patch post time)

Patchwork reporting

- -

+

Ignored tests:

+ + + +

@@ -129,4 +146,4 @@

Not reporting to patchwork:

- \ No newline at end of file + diff --git a/ui/status.js b/ui/status.js new file mode 100644 index 0000000..f798c23 --- /dev/null +++ b/ui/status.js @@ -0,0 +1,1093 @@ +function load_times_series(data, patch_time) +{ + const minute = 1000 * 60; + const hour = minute * 60; + const day = hour * 24; + const year = day * 365; + + var entries = []; + var prev_min = 0; + var prev_val = 0; + + var offset = new Date().getTimezoneOffset() * minute; + var now = Date.now() + offset; + + $.each(data, function(i, v) { + if (v["check-date"] == null) + return true; + + var p_date = new Date(v["date"]); + var c_date = new Date(v["check-date"]); + + if (patch_time) { + minutes_back = v.minutes_back; + } else { + minutes_back = Math.round((now - c_date) / minute); + } + if (minutes_back / (64 * 24) > 7) + return true; + + value = ((c_date - p_date) / hour).toFixed(2); + + if (Math.abs(prev_min - minutes_back) > 2 || + Math.abs(prev_val - value) > 0.02) { + entries.push({"l": (minutes_back / 60).toFixed(2), "v": Math.max(value, 0)}); + + prev_min = minutes_back; + prev_val = value; + } + }); + + // Sort by labels + entries.sort(function(a, b){return a.l - b.l;}); + + return entries; +} + +function load_times(data, canva_id) +{ + let e1, e2; + + e1 = load_times_series(data, true); + e2 = load_times_series(data, false); + + const ctx = document.getElementById(canva_id); + + new Chart(ctx, { + type: 'scatter', + data: { + labels: e1.map(function(e){return e.l;}), + datasets: [{ + backgroundColor: "rgba(0, 0, 0, 0)", + pointBorderColor: "rgba(0, 64, 255, 0.7)", + label: 'By patch post time', + data: e1.map(function(e){return e.v;}) + }, { + backgroundColor: "rgba(0, 0, 0, 0)", + pointBorderColor: "rgba(255, 64, 0, 0.7)", + label: 'By check delivery time', + data: e2.map(function(e){return e.v;}) + }] + }, + options: { + plugins: { + title: { + display: true, + text: 'Patch processing times over last 7 days', + padding: 0 + }, + legend: { + position: 'chartArea', + }, + }, + scales: { + y: { + type: 'linear', + title: { + display: true, + text: 'Hours', + padding: 0 + }, + ticks: { + stepSize: 3 + }, + suggestedMax: 12, + beginAtZero: true + }, + x: { + type: 'linear', + title: { + display: true, + text: 'Hours ago', + padding: 0 + }, + ticks: { + stepSize: 24 + }, + suggestedMax: 12, + reverse: true + } + } + } + }); +} + +function run_it(data_raw) +{ + const minute = 1000 * 60; + const hour = minute * 60; + const day = hour * 24; + const year = day * 365; + + var offset = new Date().getTimezoneOffset() * minute; + var now = Date.now() + offset; + + var latest = new Date(data_raw[0].date); + var data = []; + $.each(data_raw, function(i, v) { + var date = new Date(v.date); + if (latest < date) + latest = date; + + if (v.check != "build_clang") + return true; + + v.days_back = Math.round((now - date) / day) + 1; + v.minutes_back = Math.round((now - date) / minute) + 1; + + data.push(v); + }); + + load_times(data, 'process-time'); +} + +function colorify_str_any(value, color_map) +{ + if (!(value in color_map)) + return value; + return '' + value + ''; +} + +function colorify_basic(value) +{ + return colorify_str_any(value, {"fail": "red", + "pass": "green", + "pending": "#809fff"}); +} + +function colorify_str(value, good) +{ + if (value == good) { + ret = ''; + } else { + ret = ''; + } + return ret + value + ''; +} + +function systemd_add_one(table, system, sname, v) +{ + var row = table.insertRow(); + var name = row.insertCell(0); + var ss = row.insertCell(1); + var tasks = row.insertCell(2); + var cpu = row.insertCell(3); + var mem = row.insertCell(4); + + let sstate = ""; + let now = system["time-mono"]; + + if (v.TriggeredBy == 0) { + cpuSec = v.CPUUsageNSec / 1000; + cpuHours = cpuSec / (now - v.ExecMainStartTimestampMonotonic); + cpuHours = cpuHours.toFixed(2); + + memGb = (v.MemoryCurrent / (1024 * 1024 * 1024)).toFixed(2); + memGb = memGb + 'GB'; + + state = v.ActiveState + " / " + v.SubState; + sstate = colorify_str(state, "active / running"); + + taskcnt = v.TasksCurrent; + } else { + cpuSec = v.CPUUsageNSec / 1000; + cpuHours = cpuSec / (v.ExecMainExitTimestampMonotonic - + v.ExecMainStartTimestampMonotonic); + cpuHours = cpuHours.toFixed(2); + + sstate = colorify_str(v.Result, "success"); + + taskcnt = ''; + memGb = ''; + } + + name.innerHTML = sname; + ss.innerHTML = sstate; + ss.setAttribute("style", "text-align: center"); + tasks.innerHTML = taskcnt; + tasks.setAttribute("style", "text-align: right"); + cpu.innerHTML = cpuHours; + cpu.setAttribute("style", "text-align: right"); + mem.innerHTML = memGb; + mem.setAttribute("style", "text-align: right"); +} + +function systemd(data_raw, data_local, data_remote) +{ + var table = document.getElementById("systemd"); + + $.each(data_local, function(i, v) { + systemd_add_one(table, data_raw, i, v); + }); + + $.each(data_remote, function(name, remote) { + $.each(remote["services"], function(service, v) { + systemd_add_one(table, remote, name + "/" + service, v); + }); + }); +} + +function load_runners(data_raw) +{ + var table = document.getElementById("runners"); + + $.each(data_raw, function(i, v) { + var row = table.insertRow(); + let cell_id = 0; + var name = row.insertCell(cell_id++); + var qlen = row.insertCell(cell_id++); + var modify = row.insertCell(cell_id++); + var tid = row.insertCell(cell_id++); + var test = row.insertCell(cell_id++); + var pid = row.insertCell(cell_id++); + var patch = row.insertCell(cell_id++); + + name.innerHTML = i.slice(0, -6); + pid.innerHTML = v.progress; + patch.innerHTML = v.patch; + tid.innerHTML = v["test-progress"]; + test.innerHTML = v.test; + qlen.innerHTML = v.backlog; + + let since = Date.now() - (new Date(v.mtime * 1000)); + modify.innerHTML = nipa_msec_to_str(since); + if (v.patch && since > 90 * 60 * 1000) { // 1.5 hours + row.setAttribute("style", "color: red"); + } + }); +} + +function load_runtime(data_raw) +{ + var entries = []; + + $.each(data_raw["data"], function(i, v) { + entries.push({"l": i, "v": v}); + }); + + entries.sort(function(a, b){return b.v.pct - a.v.pct;}); + + const ctx = document.getElementById("run-time"); + + new Chart(ctx, { + type: 'bar', + data: { + labels: entries.map(function(e){return e.l;}), + datasets: [{ + yAxisID: 'A', + label: 'Percent of total runtime', + borderRadius: 5, + data: entries.map(function(e){return e.v.pct;}), + }, { + yAxisID: 'B', + label: 'Avgerage runtime in sec', + borderWidth: 1, + borderRadius: 5, + data: entries.map(function(e){return e.v.avg;}) + }] + }, + options: { + responsive: true, + plugins: { + legend: { + position: 'bottom', + }, + title: { + display: true, + text: 'Check runtime' + } + }, + scales: { + A: { + display: true, + beginAtZero: true + }, + B: { + position: 'right', + display: true, + beginAtZero: true + } + }, + }, + }); +} + +function load_db_size(data) +{ + const ctx = document.getElementById("db-size"); + + new Chart(ctx, { + type: 'line', + data: { + labels: data.map(function(e){return new Date(e.ts).toDateString();}), + datasets: [{ + yAxisID: 'A', + label: 'DB size in kB', + data: data.map(function(e){return Math.floor(e.size / 1024);}), + }, { + yAxisID: 'B', + label: 'builder disk use %', + data: data.map(function(e){return 100 - e.disk;}), + }, { + yAxisID: 'B', + label: 'metal disk use %', + data: data.map(function(e){return 100 - e.disk_remote;}), + }] + }, + options: { + responsive: true, + plugins: { + legend: { + position: 'bottom', + }, + title: { + display: true, + text: 'Storage use' + } + }, + scales: { + A: { + display: true + }, + B: { + position: 'right', + display: true, + suggestedMax: 100 + } + }, + } + }); +} + +function status_system(data_raw) +{ + systemd(data_raw, data_raw["services"], data_raw["remote"]); + load_runners(data_raw["runners"]); + load_runtime(data_raw["log-files"]); + load_db_size(data_raw["db"]["data"]); +} + +function msec_to_str(msec) { + return nipa_msec_to_str(msec); +} + +function colorify_str_psf(str_psf, name, value, color) +{ + var bspan = ''; + var cspan = ''; + + if (value && str_psf.overall == "") + str_psf.overall = cspan + name + ''; + + if (str_psf.str != "") { + str_psf.str = " / " + str_psf.str; + } + + var p; + if (value == 0) { + p = value; + } else { + p = bspan + value + ''; + } + str_psf.str = p + str_psf.str; +} + +function avg_time_e(avgs, v) +{ + const ent_name = v.remote + '/' + v.executor; + + if (!(ent_name in avgs)) + return 0; + return avgs[ent_name]["min-dly"] + + avgs[ent_name]["sum"] / avgs[ent_name]["cnt"]; +} + +function wrap_link(objA, objB, text) +{ + let url = null; + + if ("link" in objA) + url = objA.link; + else if ("link" in objB) + url = objB.link; + else + return text; + + return "" + text + ""; +} + +function load_fails(data_raw) +{ + var fail_table = document.getElementById("recent-fails"); + var crash_table = document.getElementById("recent-crashes"); + + $.each(data_raw, function(idx0, v) { + $.each(v.results, function(idx1, r) { + if (r.result != "pass" && nipa_pw_reported(v, r)) { + let i = 0, row = fail_table.insertRow(); + row.insertCell(i++).innerHTML = v.branch; + row.insertCell(i++).innerHTML = v.remote; + row.insertCell(i++).innerHTML = r.test; + row.insertCell(i++).innerHTML = colorify_basic(r.result); + if ("retry" in r) + row.insertCell(i++).innerHTML = colorify_basic(r.retry); + } + + if ("crashes" in r) { + for (crash of r.crashes) { + let i = 0, row = crash_table.insertRow(); + row.insertCell(i++).innerHTML = wrap_link(r, v, r.test); + row.insertCell(i++).innerHTML = crash; + } + } + }); + }); +} + +function load_partial_tests(data) +{ + let table = document.getElementById("test-presence"); + let pending_executors = {}; + let count_map = {}; + let br_map = {}; + let total = {}; + + $.each(data, function(i, v) { + // Ignore tests from AWOL executors, that should be rare + if (v.executor in awol_executors) + return 1; + + if (v.executor == "brancher") { + if (v.br_pfx in total) + total[v.br_pfx]++; + else + total[v.br_pfx] = 1; + return 1; + } + + // Track pending executors + if (v.results == null) { + let name = rem_exe(v); + + if (name in pending_executors) + pending_executors[name]++; + else + pending_executors[name] = 1; + } + + $.each(v.results, function(i, r) { + let name = nipa_test_fullname(v, r); + + if (name in count_map) { + count_map[name]++; + } else { + count_map[name] = 1; + br_map[name] = new Set(); + } + + br_map[name].add(v.br_pfx); + }); + }); + + for (const name of Object.keys(count_map)) { + let expect = 0; + + for (const br_pfx of br_map[name]) + expect += total[br_pfx]; + + let missing = expect - count_map[name]; + + if (!missing) + continue; + for (const pending of Object.keys(pending_executors)) { + if (name.startsWith(pending)) { + if (missing == pending_executors[pending]) + missing = 0; + break; + } + } + if (!missing) + continue; + + let row = table.insertRow(); + row.insertCell(0).innerHTML = name; + row.insertCell(1).innerHTML = missing; + } +} + +function add_summaries(table, summary, reported) +{ + let row = table.insertRow(); + let i = 0; + + let cell = row.insertCell(i++); // branch + cell.innerHTML = "summary"; + + cell = row.insertCell(i++); // remote + let count_line = summary["remote-cnt"] + " remotes"; + if (summary["hidden"]) { + if (summary["hidden"] == summary["remote-cnt"]) + count_line += " (all hidden)"; + else + count_line += " (" + summary["hidden"] + " hidden)"; + } + + cell.innerHTML = count_line; + + cell = row.insertCell(i++); // time + cell.innerHTML = msec_to_str(summary["time-pass"]); + + let str_psf = {"str": "", "overall": ""}; + + colorify_str_psf(str_psf, "fail", summary["fail"], "red"); + colorify_str_psf(str_psf, "skip", summary["skip"], "#809fff"); + colorify_str_psf(str_psf, "pass", summary["pass"], "green"); + + var link_to_contest = " 0) + link_to_contest += "&pass=0"; + link_to_contest += "\">" + str_psf.str + ""; + + cell = row.insertCell(i++); // tests + cell.innerHTML = link_to_contest; + + cell = row.insertCell(i++); // result + cell.setAttribute("style", "text-align: left; font-weight: bold; font-style: normal;"); + cell.innerHTML = colorify_basic(branch_results[summary.branch]); + + row.setAttribute("class", "summary-row"); +} + +function reset_summary(summary, branch) +{ + summary["branch"] = branch; + summary["remote-cnt"] = 0; + summary["time-pass"] = 0; + summary["total"] = 0; + summary["skip"] = 0; + summary["fail"] = 0; + summary["pass"] = 0; + summary["hidden"] = 0; +} + +function load_result_table_one(data_raw, table, reported, avgs) +{ + const summarize = document.getElementById("contest-summary").checked; + let summary = {}; + + reset_summary(summary, data_raw[0].branch); + + $.each(data_raw, function(i, v) { + var pass = 0, skip = 0, fail = 0, total = 0, ignored = 0; + var link = v.link; + + if (summary["branch"] != v.branch) { + add_summaries(table, summary, reported); + reset_summary(summary, v.branch); + } + + $.each(v.results, function(i, r) { + if (nipa_pw_reported(v, r) != reported) { + ignored++; + return 1; + } + + if (r.result == "pass") { + pass++; + } else if (r.result == "skip") { + skip++; + } else { + fail++; + } + + total++; + if (!link) + link = r.link; + }); + + if (!total && ignored && v.executor != "brancher") + return 1; + + var t_start = new Date(v.start); + var t_end = new Date(v.end); + + if (v.executor != "brancher") { + summary["total"] += total; + if (total) { + summary["remote-cnt"] += 1; + if (summary["time-pass"] < t_end - t_start) + summary["time-pass"] = t_end - t_start; + } + + summary["skip"] += skip; + summary["fail"] += fail; + summary["pass"] += pass; + if (summarize && total && total == pass) { + summary["hidden"] += 1; + return 1; + } + } + + var str_psf = {"str": "", "overall": ""}; + + colorify_str_psf(str_psf, "fail", fail, "red"); + colorify_str_psf(str_psf, "skip", skip, "#809fff"); + colorify_str_psf(str_psf, "pass", pass, "green"); + + const span_small = " ("; + if (ignored) { + if (reported) + str_psf.overall += span_small + "ignored: " + ignored + ")"; + else + str_psf.overall += span_small + "reported: " + ignored + ")"; + } + + var row = table.insertRow(); + + var branch = row.insertCell(0); + var remote = row.insertCell(1); + + var a = ""; + + if (v.remote != "brancher") { + var time = row.insertCell(2); + + if (link) + remote.innerHTML = a + v.remote + ""; + else + remote.innerHTML = v.remote; + if (total) { + var cnt = row.insertCell(3); + var res = row.insertCell(4); + + var link_to_contest = " 0) + link_to_contest += "&pass=0"; + link_to_contest += "\">"; + + cnt.innerHTML = link_to_contest + str_psf.str + ""; + res.innerHTML = str_psf.overall; + time.innerHTML = msec_to_str(t_end - t_start); + } else { + var pend; + + const passed = Date.now() - v.start; + const expect = Math.round(avg_time_e(avgs, v)); + var remain = expect - passed; + var color = "pink"; + + if (v.end == 0) { + pend = "no result"; + if (passed > 1000 * 60 * 15 /* 15 min */) + color = "red"; + else + color = "#809fff"; + } else if (remain > 0) { + pend = "pending (expected in " + (msec_to_str(remain)).toString() + ")"; + color = "#809fff"; + } else if (remain < -1000 * 60 * 60 * 2) { /* 2 h */ + pend = "timeout"; + } else { + pend = "pending (expected " + (msec_to_str(-remain)).toString() + " ago)"; + } + time.innerHTML = "" + pend + ""; + time.setAttribute("colspan", "3"); + } + } else { + let br_pull = ""; + + if (v.start) + remote.innerHTML = v.start.toLocaleString(); + else + remote.innerHTML = "unknown"; + remote.setAttribute("colspan", "3"); + if (v.pull_status != "okay") + br_pull = " (pull: " + v.pull_status + ")"; + a = ""; + branch.innerHTML = a + v.branch + "" + br_pull; + branch.setAttribute("colspan", "2"); + } + }); + + add_summaries(table, summary, reported); +} + +function rem_exe(v) +{ + return v.remote + "/" + v.executor; +} + +var awol_executors; + +function load_result_table(data_raw, reload) +{ + var table = document.getElementById("contest"); + var table_nr = document.getElementById("contest-purgatory"); + var branch_pull_status = {}; + var branch_start = {}; + + // Parse branch info to extract pull status + $.each(branches_info, function(i, v) { + let summary = null; + $.each(v['base-pulls'], function(url, res) { + if (res == "okay" && !summary) { + summary = res; + } else if (res == "resolved" && (!summary || summary == "okay")) { + summary = res; + } else { + summary = res; + } + }); + branch_pull_status[i] = summary; + }); + + // Decorate branchers and collect branch_start + $.each(data_raw, function(i, v) { + v.start = new Date(v.start); + v.end = new Date(v.end); + + v.br_pfx = nipa_br_pfx_get(v.branch); + v.br_date = v.branch.substring(v.branch.length - 17); + + branches.add(v.branch); + + if (v.remote == "brancher") { + branch_start[v.branch] = v.start; + v.pull_status = branch_pull_status[v.branch]; + } + }); + + // Calculate expected runtimes + var avgs = {}; + $.each(data_raw, function(i, v) { + if (!v.results) + return 1; + + const ent_name = rem_exe(v); + + if (!(ent_name in avgs)) + avgs[ent_name] = {"cnt": 0, "sum": 0, "min-dly": 0}; + avgs[ent_name]["cnt"] += 1; + avgs[ent_name]["sum"] += (v.end - v.start); + + if (v.branch in branch_start) { + const dly = v.start - branch_start[v.branch]; + const old = avgs[ent_name]["min-dly"]; + + if (!old || old > dly) + avgs[ent_name]["min-dly"] = dly; + } + }); + + // Fill in runs for "AWOL" executors + let known_execs = {}; + let branch_execs = {}; + for (v of data_raw) { + let re = rem_exe(v); + + if (!(v.branch in branch_execs)) + branch_execs[v.branch] = new Set(); + branch_execs[v.branch].add(re); + + if (!(re in known_execs)) + known_execs[re] = { + "executor": v.executor, + "remote" : v.remote, + "br_pfx" : new Set(), + "branches" : new Set() + }; + known_execs[re].br_pfx.add(v.br_pfx); + known_execs[re].branches.add(v.branch); + } + + let known_exec_set = new Set(Object.keys(known_execs)); + awol_executors = new Set(); + for (br of branches) { + for (re of known_exec_set) { + if (branch_execs[br].has(re)) + continue; + // Exec works on different branch stream + if (!known_execs[re].br_pfx.has(nipa_br_pfx_get(br))) + continue; + + data_raw.push({ + "executor" : known_execs[re].executor, + "remote" : known_execs[re].remote, + "branch" : br, + "br_pfx" : br.substring(0, br.length - 18), + "br_date": br.substring(br.length - 17), + "start" : branch_start[br], + "end" : 0, + }); + awol_executors.add(known_execs[re].executor); + } + } + + // Sort & display + data_raw.sort(function(a, b){ + if (b.branch !== a.branch) { + if (b.br_date !== a.br_date) + return b.br_date.localeCompare(a.br_date); + return b.br_pfx.localeCompare(a.br_pfx); + } + + // Keep brancher first + if (a.executor == b.executor) + /* use other keys */; + else if (b.executor == "brancher") + return 1; + else if (a.executor == "brancher") + return -1; + + // fake entry for "no result" always up top + if (b.end === 0) + return 1; + + // both pending, sort by expected time + if (a.results == null && b.results == null) + return avg_time_e(avgs, b) - avg_time_e(avgs, a); + // pending before not pending + if (b.results == null) + return 1; + if (a.results == null) + return -1; + + if (b.end != a.end) + return b.end - a.end > 0 ? 1 : -1; + return 0; + }); + + $("#contest tr").slice(1).remove(); + $("#contest-purgatory tr").slice(1).remove(); + load_result_table_one(data_raw, table, true, avgs); + load_result_table_one(data_raw, table_nr, false, avgs); + if (!reload) { + load_fails(data_raw); + load_partial_tests(data_raw); + } +} + +let xfr_todo = 4; +let all_results = null; +let branches_info = null; +let branches = new Set(); +let branch_results = {}; + +function reload_results() +{ + load_result_table(all_results, true); +} + +function loaded_one() +{ + if (!--xfr_todo) { + load_result_table(all_results, false); + + let summary_checkbox = document.getElementById("contest-summary"); + summary_checkbox.addEventListener("change", reload_results); + } +} + +function results_loaded(data_raw) +{ + all_results = data_raw; + loaded_one(); +} + +function branch_res_doit(data_raw) +{ + $.each(data_raw, function(i, v) { + branch_results[i] = v.result; + }); + + loaded_one(); +} + +function add_one_test_filter_hdr(keys_present, key, hdr, row) +{ + if (!keys_present.has(key)) + return false; + + let th = document.createElement("th"); + th.innerHTML = hdr; + row.appendChild(th); + return true; +} + +function add_one_test_filter(keys_present, key, v, i, row) +{ + if (!keys_present.has(key)) + return 0; + + let cell = row.insertCell(i); + if (key in v) + cell.innerHTML = v[key]; + return 1; +} + +function filters_doit(data_raw) +{ + let cf_crashes = document.getElementById("cf-crashes"); + let cf_execs = document.getElementById("cf-execs"); + let cf_tests = document.getElementById("cf-tests"); + var output, sep = ""; + + output = "Remotes reported: "; + $.each(data_raw.remotes, function(i, v) { + output += sep + v; + sep = ", "; + }); + cf_execs.innerHTML = output; + + let keys_present = new Set(); + $.each(data_raw["ignore-results"], function(i, v) { + for (const k of Object.keys(v)) + keys_present.add(k); + }); + + let cf_tests_hdr = document.getElementById("cf-tests-hdr"); + let has_notes = false; + add_one_test_filter_hdr(keys_present, "remote", "Remote", cf_tests_hdr); + add_one_test_filter_hdr(keys_present, "executor", "Executor", cf_tests_hdr); + add_one_test_filter_hdr(keys_present, "branch", "Branch", cf_tests_hdr); + add_one_test_filter_hdr(keys_present, "group", "Group", cf_tests_hdr); + add_one_test_filter_hdr(keys_present, "test", "Test", cf_tests_hdr); + + if (add_one_test_filter_hdr(keys_present, "link", "Notes", cf_tests_hdr) || + add_one_test_filter_hdr(keys_present, "comment", "Notes", cf_tests_hdr)) + has_notes = true; + + $.each(data_raw["ignore-results"], function(_i, v) { + let row = cf_tests.insertRow(); + let i = 0; + + i += add_one_test_filter(keys_present, "remote", v, i, row); + i += add_one_test_filter(keys_present, "executor", v, i, row); + i += add_one_test_filter(keys_present, "branch", v, i, row); + i += add_one_test_filter(keys_present, "group", v, i, row); + i += add_one_test_filter(keys_present, "test", v, i, row); + + // Must be last, we don't handle counting columns properly here. + if (has_notes) + cell = row.insertCell(i); + if (v["comment"] || v["link"]) { + let comment = v["comment"] || "link"; + comment = wrap_link(v, v, comment); + cell.innerHTML = comment; + } + }); + + output = "Crashes ignored:
"; + $.each(data_raw["ignore-crashes"], function(i, v) { + let breakable = v.replace(/:/g, ":"); + output += "" + breakable + "
"; + }); + cf_crashes.innerHTML = output; + + nipa_set_filters_json(data_raw); + loaded_one(); +} + +function branches_loaded(data_raw) +{ + branches_info = data_raw; + loaded_one(); +} + +function flakes_add_summary(table, name, data) +{ + let row = table.insertRow(); + let cell = row.insertCell(0); + cell.innerHTML = name; + cell.setAttribute("colspan", "3"); + cell.setAttribute("style", "text-align: right; font-style: italic;"); + for (let n = 0; n < 4; n++) + row.insertCell(n + 1).innerHTML = "" + data[n] + ""; + row.insertCell(5).innerText = ""; +} + +function flakes_doit(data_raw) +{ + let flakes = document.getElementById("flakes"); + + data_raw.sort(function(a, b){ + if (a["count"][0] != b["count"][0]) + return b["count"][0] - a["count"][0]; + if (a["count"][1] != b["count"][1]) + return b["count"][1] - a["count"][1]; + if (a["count"][2] != b["count"][2]) + return b["count"][2] - a["count"][2]; + return 0; + }) + + let reminder = [0, 0, 0, 0]; + let total = [0, 0, 0, 0]; + + $.each(data_raw, function(i, v) { + let row = flakes.insertRow(); + let reported = nipa_pw_reported(v, v); + let ignored = ""; + + for (let n = 0; n < 4; n++) + total[n] += v["count"][n]; + + if (v["count"][0] + v["count"][1] + v["count"][2] < 3 && reported) { + for (let n = 0; n < 4; n++) + reminder[n] += v["count"][n]; + return 1; + } + if (!reported) + ignored = "yes"; + + row.insertCell(0).innerText = v["remote"]; + row.insertCell(1).innerText = v["executor"]; + row.insertCell(2).innerText = v["test"]; + row.insertCell(3).innerText = v["count"][0]; + row.insertCell(4).innerText = v["count"][1]; + row.insertCell(5).innerText = v["count"][2]; + row.insertCell(6).innerText = v["count"][3]; + row.insertCell(7).innerText = ignored; + }); + + flakes_add_summary(flakes, "reminder", reminder); + flakes_add_summary(flakes, "total", total); +} + +function do_it() +{ + /* + * Please remember to keep these assets in sync with `scripts/ui_assets.sh` + */ + $(document).ready(function() { + $.get("static/nipa/checks.json", run_it) + }); + $(document).ready(function() { + $.get("static/nipa/systemd.json", status_system) + }); + $(document).ready(function() { + $.get("contest/filters.json", filters_doit) + }); + $(document).ready(function() { + $.get("static/nipa/branch-results.json", branch_res_doit) + }); + $(document).ready(function() { + $.get("query/results?branches=10&pending=y", results_loaded) + }); + $(document).ready(function() { + $.get("static/nipa/branches-info.json", branches_loaded) + }); + $(document).ready(function() { + $.get("query/flaky-tests", flakes_doit) + }); +}