From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Andrii Nakryiko <andrii@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Martin KaFai Lau <martin.lau@kernel.org>,
Eduard Zingerman <eddyz87@gmail.com>,
Ritesh Oedayrajsingh Varma <ritesh@superluminal.eu>,
Jelle van der Beek <jelle@superluminal.eu>,
kkd@meta.com, kernel-team@meta.com
Subject: [PATCH bpf-next v2 6/6] selftests/bpf: Add success stats to rqspinlock stress test
Date: Fri, 28 Nov 2025 23:28:02 +0000 [thread overview]
Message-ID: <20251128232802.1031906-7-memxor@gmail.com> (raw)
In-Reply-To: <20251128232802.1031906-1-memxor@gmail.com>
Add stats to observe the success and failure rate of lock acquisition
attempts in various contexts.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
.../bpf/test_kmods/bpf_test_rqspinlock.c | 55 +++++++++++++++----
1 file changed, 43 insertions(+), 12 deletions(-)
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
index e8dd3fbc6ea5..7b4ae5e81d32 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
@@ -33,9 +33,16 @@ static const unsigned int rqsl_hist_ms[] = {
};
#define RQSL_NR_HIST_BUCKETS ARRAY_SIZE(rqsl_hist_ms)
+enum rqsl_context {
+ RQSL_CTX_NORMAL = 0,
+ RQSL_CTX_NMI,
+ RQSL_CTX_MAX,
+};
+
struct rqsl_cpu_hist {
- atomic64_t normal[RQSL_NR_HIST_BUCKETS];
- atomic64_t nmi[RQSL_NR_HIST_BUCKETS];
+ atomic64_t hist[RQSL_CTX_MAX][RQSL_NR_HIST_BUCKETS];
+ atomic64_t success[RQSL_CTX_MAX];
+ atomic64_t failure[RQSL_CTX_MAX];
};
static DEFINE_PER_CPU(struct rqsl_cpu_hist, rqsl_cpu_hists);
@@ -117,14 +124,18 @@ static u32 rqsl_hist_bucket_idx(u32 delta_ms)
return RQSL_NR_HIST_BUCKETS - 1;
}
-static void rqsl_record_lock_time(u64 delta_ns, bool is_nmi)
+static void rqsl_record_lock_result(u64 delta_ns, enum rqsl_context ctx, int ret)
{
struct rqsl_cpu_hist *hist = this_cpu_ptr(&rqsl_cpu_hists);
u32 delta_ms = DIV_ROUND_UP_ULL(delta_ns, NSEC_PER_MSEC);
u32 bucket = rqsl_hist_bucket_idx(delta_ms);
- atomic64_t *buckets = is_nmi ? hist->nmi : hist->normal;
+ atomic64_t *buckets = hist->hist[ctx];
atomic64_inc(&buckets[bucket]);
+ if (!ret)
+ atomic64_inc(&hist->success[ctx]);
+ else
+ atomic64_inc(&hist->failure[ctx]);
}
static int rqspinlock_worker_fn(void *arg)
@@ -147,7 +158,8 @@ static int rqspinlock_worker_fn(void *arg)
}
start_ns = ktime_get_mono_fast_ns();
ret = raw_res_spin_lock_irqsave(worker_lock, flags);
- rqsl_record_lock_time(ktime_get_mono_fast_ns() - start_ns, false);
+ rqsl_record_lock_result(ktime_get_mono_fast_ns() - start_ns,
+ RQSL_CTX_NORMAL, ret);
mdelay(normal_delay);
if (!ret)
raw_res_spin_unlock_irqrestore(worker_lock, flags);
@@ -190,7 +202,8 @@ static void nmi_cb(struct perf_event *event, struct perf_sample_data *data,
locks = rqsl_get_lock_pair(cpu);
start_ns = ktime_get_mono_fast_ns();
ret = raw_res_spin_lock_irqsave(locks.nmi_lock, flags);
- rqsl_record_lock_time(ktime_get_mono_fast_ns() - start_ns, true);
+ rqsl_record_lock_result(ktime_get_mono_fast_ns() - start_ns,
+ RQSL_CTX_NMI, ret);
mdelay(nmi_delay);
@@ -300,12 +313,14 @@ static void rqsl_print_histograms(void)
u64 norm_counts[RQSL_NR_HIST_BUCKETS];
u64 nmi_counts[RQSL_NR_HIST_BUCKETS];
u64 total_counts[RQSL_NR_HIST_BUCKETS];
+ u64 norm_success, nmi_success, success_total;
+ u64 norm_failure, nmi_failure, failure_total;
u64 norm_total = 0, nmi_total = 0, total = 0;
bool has_slow = false;
for (i = 0; i < RQSL_NR_HIST_BUCKETS; i++) {
- norm_counts[i] = atomic64_read(&hist->normal[i]);
- nmi_counts[i] = atomic64_read(&hist->nmi[i]);
+ norm_counts[i] = atomic64_read(&hist->hist[RQSL_CTX_NORMAL][i]);
+ nmi_counts[i] = atomic64_read(&hist->hist[RQSL_CTX_NMI][i]);
total_counts[i] = norm_counts[i] + nmi_counts[i];
norm_total += norm_counts[i];
nmi_total += nmi_counts[i];
@@ -315,17 +330,33 @@ static void rqsl_print_histograms(void)
has_slow = true;
}
+ norm_success = atomic64_read(&hist->success[RQSL_CTX_NORMAL]);
+ nmi_success = atomic64_read(&hist->success[RQSL_CTX_NMI]);
+ norm_failure = atomic64_read(&hist->failure[RQSL_CTX_NORMAL]);
+ nmi_failure = atomic64_read(&hist->failure[RQSL_CTX_NMI]);
+ success_total = norm_success + nmi_success;
+ failure_total = norm_failure + nmi_failure;
+
if (!total)
continue;
if (!has_slow) {
- pr_err(" cpu%d: total %llu (normal %llu, nmi %llu), all within 0-%ums\n",
- cpu, total, norm_total, nmi_total, RQSL_SLOW_THRESHOLD_MS);
+ pr_err(" cpu%d: total %llu (normal %llu, nmi %llu) | "
+ "success %llu (normal %llu, nmi %llu) | "
+ "failure %llu (normal %llu, nmi %llu), all within 0-%ums\n",
+ cpu, total, norm_total, nmi_total,
+ success_total, norm_success, nmi_success,
+ failure_total, norm_failure, nmi_failure,
+ RQSL_SLOW_THRESHOLD_MS);
continue;
}
- pr_err(" cpu%d: total %llu (normal %llu, nmi %llu)\n",
- cpu, total, norm_total, nmi_total);
+ pr_err(" cpu%d: total %llu (normal %llu, nmi %llu) | "
+ "success %llu (normal %llu, nmi %llu) | "
+ "failure %llu (normal %llu, nmi %llu)\n",
+ cpu, total, norm_total, nmi_total,
+ success_total, norm_success, nmi_success,
+ failure_total, norm_failure, nmi_failure);
for (i = 0; i < RQSL_NR_HIST_BUCKETS; i++) {
unsigned int start_ms;
--
2.51.0
next prev parent reply other threads:[~2025-11-28 23:28 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-28 23:27 [PATCH bpf-next v2 0/6] Limited queueing in NMI for rqspinlock Kumar Kartikeya Dwivedi
2025-11-28 23:27 ` [PATCH bpf-next v2 1/6] rqspinlock: Enclose lock/unlock within lock entry acquisitions Kumar Kartikeya Dwivedi
2025-11-28 23:27 ` [PATCH bpf-next v2 2/6] rqspinlock: Perform AA checks immediately Kumar Kartikeya Dwivedi
2025-11-28 23:27 ` [PATCH bpf-next v2 3/6] rqspinlock: Use trylock fallback when per-CPU rqnode is busy Kumar Kartikeya Dwivedi
2025-11-29 1:12 ` bot+bpf-ci
2025-11-29 18:13 ` Kumar Kartikeya Dwivedi
2025-11-28 23:28 ` [PATCH bpf-next v2 4/6] rqspinlock: Disable spinning for trylock fallback Kumar Kartikeya Dwivedi
2025-11-28 23:28 ` [PATCH bpf-next v2 5/6] rqspinlock: Precede non-head waiter queueing with AA check Kumar Kartikeya Dwivedi
2025-11-28 23:28 ` Kumar Kartikeya Dwivedi [this message]
2025-11-29 17:40 ` [PATCH bpf-next v2 0/6] Limited queueing in NMI for rqspinlock patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251128232802.1031906-7-memxor@gmail.com \
--to=memxor@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=jelle@superluminal.eu \
--cc=kernel-team@meta.com \
--cc=kkd@meta.com \
--cc=martin.lau@kernel.org \
--cc=ritesh@superluminal.eu \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox