From: Ian Rogers <irogers@google.com>
To: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Namhyung Kim <namhyung@kernel.org>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Jiri Olsa <jolsa@kernel.org>,
Adrian Hunter <adrian.hunter@intel.com>,
James Clark <james.clark@linaro.org>,
linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org
Cc: Ian Rogers <irogers@google.com>
Subject: [PATCH v1 12/14] perf test: Add -j/--junit option for JUnit XML test reports
Date: Wed, 13 May 2026 16:04:48 -0700 [thread overview]
Message-ID: <20260513230450.529380-13-irogers@google.com> (raw)
In-Reply-To: <20260513230450.529380-1-irogers@google.com>
Add a -j/--junit command line option to generate standard JUnit XML format
test reports. The generated file defaults to 'test.xml' if no filename is
specified, but allows users to override the path (e.g. -jmytest.xml).
The XML report captures individual test suite and subtest execution latency,
alongside XML-escaped failure logs and skip reasons, while preserving the
full multi-process concurrency speed of parallel test execution.
Assisted-by: Gemini-CLI:Google Gemini 3
Signed-off-by: Ian Rogers <irogers@google.com>
---
tools/perf/tests/builtin-test.c | 116 ++++++++++++++++++++++++++++++--
1 file changed, 110 insertions(+), 6 deletions(-)
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 765724817776..99f5afba1082 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -19,6 +19,7 @@
#include <dirent.h>
#include <sys/wait.h>
#include <sys/stat.h>
+#include <sys/time.h>
#include "builtin.h"
#include "config.h"
#include "hist.h"
@@ -39,6 +40,9 @@
#include "tests-scripts.h"
+static const char *junit_filename;
+static struct strbuf junit_xml_buf = STRBUF_INIT;
+
/*
* Command line option to not fork the test running in the same process and
* making them easier to debug.
@@ -306,6 +310,8 @@ struct child_test {
struct strbuf err_output;
int result;
bool done;
+ struct timespec start_time;
+ struct timespec end_time;
};
static jmp_buf run_test_jmp_buf;
@@ -366,8 +372,34 @@ static unsigned int summary_tests_skipped;
static unsigned int summary_tests_failed;
static struct strbuf summary_failed_tests_buf = STRBUF_INIT;
+static char *xml_escape(const char *str)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *p;
+ char *res;
+
+ if (!str)
+ return strdup("");
+
+ for (p = str; *p; p++) {
+ if (*p == '&')
+ strbuf_addstr(&buf, "&");
+ else if (*p == '<')
+ strbuf_addstr(&buf, "<");
+ else if (*p == '>')
+ strbuf_addstr(&buf, ">");
+ else if (*p == '"')
+ strbuf_addstr(&buf, """);
+ else if (*p >= 32 || *p == '\n' || *p == '\t')
+ strbuf_addch(&buf, *p);
+ }
+ res = strbuf_detach(&buf, NULL);
+ return res ? res : strdup("");
+}
+
static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
- int result, int width, int running)
+ int result, int width, int running,
+ const char *err_output, double elapsed)
{
if (test_suite__num_test_cases(t) > 1) {
char prefix[32];
@@ -414,6 +446,33 @@ static int print_test_result(struct test_suite *t, int curr_suite, int curr_test
break;
}
+ if (junit_filename && result != TEST_RUNNING) {
+ const char *classname = t->desc;
+ const char *testname = test_description(t, curr_test_case);
+ char *escaped_err = xml_escape(err_output);
+ char *escaped_class = xml_escape(classname);
+ char *escaped_test = xml_escape(testname);
+
+ strbuf_addf(&junit_xml_buf, " <testcase classname=\"%s\" name=\"%s\" time=\"%.2f\">\n",
+ escaped_class, escaped_test, elapsed);
+ if (result == TEST_FAIL) {
+ strbuf_addf(&junit_xml_buf,
+ " <failure message=\"FAILED\">\n%s\n </failure>\n",
+ escaped_err);
+ } else if (result == TEST_SKIP) {
+ const char *reason = skip_reason(t, curr_test_case);
+ char *escaped_reason = xml_escape(reason ? reason : "Skip");
+
+ strbuf_addf(&junit_xml_buf, " <skipped message=\"%s\"/>\n",
+ escaped_reason);
+ free(escaped_reason);
+ }
+ strbuf_addstr(&junit_xml_buf, " </testcase>\n");
+ free(escaped_err);
+ free(escaped_class);
+ free(escaped_test);
+ }
+
return 0;
}
@@ -631,6 +690,8 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
struct strbuf err_output = STRBUF_INIT;
int last_running = -1;
int ret;
+ struct timespec end_time;
+ double elapsed;
if (child_test == NULL) {
/* Test wasn't started. */
@@ -684,7 +745,7 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
}
print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
- width, running);
+ width, running, NULL, 0.0);
last_running = running;
}
}
@@ -733,8 +794,13 @@ static void finish_test(struct child_test **child_tests, int running_test, int c
else if (verbose == 1 && ret == TEST_FAIL)
print_test_failure_snippet(stderr, err_output.buf);
+ clock_gettime(CLOCK_MONOTONIC, &end_time);
+ elapsed = (end_time.tv_sec - child_test->start_time.tv_sec) +
+ (end_time.tv_nsec - child_test->start_time.tv_nsec) / 1000000000.0;
+
+ print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0,
+ err_output.buf, elapsed);
strbuf_release(&err_output);
- print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
if (err > 0)
close(err);
zfree(&child_tests[running_test]);
@@ -830,7 +896,7 @@ static int finish_tests_parallel(struct child_test **child_tests, size_t num_tes
}
print_test_result(next_child->test, next_child->suite_num,
next_child->test_case_num, TEST_RUNNING, width,
- running_count);
+ running_count, NULL, 0.0);
}
last_running = running_count;
}
@@ -866,12 +932,14 @@ static int finish_tests_parallel(struct child_test **child_tests, size_t num_tes
child->process.err = -1;
}
child->result = finish_command(&child->process);
+ clock_gettime(CLOCK_MONOTONIC, &child->end_time);
child->done = true;
}
}
while (next_to_print < num_tests) {
struct child_test *child = child_tests[next_to_print];
+ double elapsed;
if (!child) {
next_to_print++;
@@ -909,8 +977,11 @@ static int finish_tests_parallel(struct child_test **child_tests, size_t num_tes
else if (verbose == 1 && child->result == TEST_FAIL)
print_test_failure_snippet(stderr, child->err_output.buf);
+ elapsed = (child->end_time.tv_sec - child->start_time.tv_sec) +
+ (child->end_time.tv_nsec - child->start_time.tv_nsec) / 1000000000.0;
+
print_test_result(child->test, child->suite_num, child->test_case_num,
- child->result, width, 0);
+ child->result, width, 0, child->err_output.buf, elapsed);
strbuf_release(&child->err_output);
child_tests[next_to_print] = NULL;
zfree(&child);
@@ -931,11 +1002,18 @@ static int start_test(struct test_suite *test, int curr_suite, int curr_test_cas
*child = NULL;
if (dont_fork) {
if (pass == 1) {
+ struct timespec start_time, end_time;
+ double elapsed;
+
+ clock_gettime(CLOCK_MONOTONIC, &start_time);
pr_debug("--- start ---\n");
err = test_function(test, curr_test_case)(test, curr_test_case);
pr_debug("---- end ----\n");
+ clock_gettime(CLOCK_MONOTONIC, &end_time);
+ elapsed = (end_time.tv_sec - start_time.tv_sec) +
+ (end_time.tv_nsec - start_time.tv_nsec) / 1000000000.0;
print_test_result(test, curr_suite, curr_test_case, err, width,
- /*running=*/0);
+ /*running=*/0, NULL, elapsed);
}
return 0;
}
@@ -965,6 +1043,7 @@ static int start_test(struct test_suite *test, int curr_suite, int curr_test_cas
(*child)->process.err = -1;
}
(*child)->process.no_exec_cmd = run_test_child;
+ clock_gettime(CLOCK_MONOTONIC, &(*child)->start_time);
if (sequential || pass == 2) {
err = start_command(&(*child)->process);
if (err)
@@ -999,6 +1078,29 @@ static void print_tests_summary(void)
} else {
color_fprintf(stderr, PERF_COLOR_GREEN, "Failed tests : 0\n");
}
+
+ if (junit_filename) {
+ FILE *fp;
+
+ fp = fopen(junit_filename, "w");
+ if (fp) {
+ unsigned int total = summary_tests_passed + summary_subtests_passed +
+ summary_tests_skipped + summary_tests_failed;
+ fprintf(fp, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
+ fprintf(fp, "<testsuites>\n");
+ fprintf(fp, " <testsuite name=\"perf-tests\" tests=\"%u\" failures=\"%u\" skipped=\"%u\">\n",
+ total, summary_tests_failed, summary_tests_skipped);
+ fprintf(fp, "%s", junit_xml_buf.buf);
+ fprintf(fp, " </testsuite>\n");
+ fprintf(fp, "</testsuites>\n");
+ fclose(fp);
+ pr_info("Wrote junit XML output to %s\n", junit_filename);
+ } else {
+ pr_err("Failed to open %s for writing junit XML output: %s\n",
+ junit_filename, strerror(errno));
+ }
+ }
+ strbuf_release(&junit_xml_buf);
strbuf_release(&summary_failed_tests_buf);
}
@@ -1256,6 +1358,8 @@ int cmd_test(int argc, const char **argv)
"objdump binary to use for disassembly and annotations"),
OPT_UINTEGER(0, "failure-snippet-lines", &failure_snippet_lines,
"Number of lines to include in failure snippet, default 10"),
+ OPT_STRING_OPTARG('j', "junit", &junit_filename, "file",
+ "Generate junit XML output, default test.xml", "test.xml"),
OPT_END()
};
const char * const test_subcommands[] = { "list", NULL };
--
2.54.0.563.g4f69b47b94-goog
next prev parent reply other threads:[~2026-05-13 23:05 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-13 23:04 [PATCH v1 00/14] perf test: Harness improvements Ian Rogers
2026-05-13 23:04 ` [PATCH v1 01/14] perf jevents.py: Make generated C code more kernel style Ian Rogers
2026-05-13 23:04 ` [PATCH v1 02/14] perf pmu-events: Add API to get metric table name and iterate tables Ian Rogers
2026-05-13 23:04 ` [PATCH v1 03/14] perf test: Drain pipe after child finishes to avoid losing output Ian Rogers
2026-05-13 23:04 ` [PATCH v1 04/14] perf test: Support dynamic test suites with setup callback and private data Ian Rogers
2026-05-13 23:04 ` [PATCH v1 05/14] perf test pmu-events: A sub-test per metric table Ian Rogers
2026-05-13 23:04 ` [PATCH v1 06/14] perf test: Refactor parallel poll loop to drain all pipes simultaneously Ian Rogers
2026-05-13 23:04 ` [PATCH v1 07/14] perf test: Show snippet failure output for verbose=1 Ian Rogers
2026-05-13 23:04 ` [PATCH v1 08/14] perf test: Add summary reporting Ian Rogers
2026-05-13 23:04 ` [PATCH v1 09/14] perf test: Fix subtest status alignment for multi-digit indexes Ian Rogers
2026-05-13 23:04 ` [PATCH v1 10/14] perf test: Skip shebang and SPDX comments in shell test descriptions Ian Rogers
2026-05-13 23:04 ` [PATCH v1 11/14] perf test: Split monolithic 'util' test suite into sub-tests Ian Rogers
2026-05-13 23:04 ` Ian Rogers [this message]
2026-05-13 23:04 ` [PATCH v1 13/14] perf test: Add shell test to validate JUnit XML reporting output Ian Rogers
2026-05-13 23:04 ` [PATCH v1 14/14] perf test: Remove /usr/bin/cc dependency from Intel PT shell test Ian Rogers
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260513230450.529380-13-irogers@google.com \
--to=irogers@google.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=james.clark@linaro.org \
--cc=jolsa@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox