From: Akinobu Mita <akinobu.mita@gmail.com>
To: damon@lists.linux.dev
Cc: linux-perf-users@vger.kernel.org, sj@kernel.org, akinobu.mita@gmail.com
Subject: [RFC PATCH v2 5/6] mm/damon/paddr: support perf event based access check
Date: Mon, 9 Mar 2026 10:00:08 +0900 [thread overview]
Message-ID: <20260309010009.11639-6-akinobu.mita@gmail.com> (raw)
In-Reply-To: <20260309010009.11639-1-akinobu.mita@gmail.com>
This patch adds perf event based access checks for physical address spaces monitoring.
The implementation is very similar to that described in perf event based access check
for virtual address space monitoring.
However, for perf events that can be specified with physical address spaces monitoring,
the data source address corresponding to the sample must be obtainable as a physical address.
In other words, PERF_SAMPLE_DATA_SRC and PERF_SAMPLE_PHYS_ADDR must be specifiable
in perf_event_attr.sample_type.
---
mm/damon/paddr.c | 102 ++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 96 insertions(+), 6 deletions(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 68dcde5d423f..a5293af870fd 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -48,7 +48,7 @@ static void damon_pa_mkold(phys_addr_t paddr)
folio_put(folio);
}
-static void __damon_pa_prepare_access_check(struct damon_region *r,
+static void __damon_pa_basic_prepare_access_check(struct damon_region *r,
unsigned long addr_unit)
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
@@ -56,14 +56,14 @@ static void __damon_pa_prepare_access_check(struct damon_region *r,
damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, addr_unit));
}
-static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
+static void damon_pa_basic_prepare_access_checks(struct damon_ctx *ctx)
{
struct damon_target *t;
struct damon_region *r;
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t)
- __damon_pa_prepare_access_check(r, ctx->addr_unit);
+ __damon_pa_basic_prepare_access_check(r, ctx->addr_unit);
}
}
@@ -81,7 +81,7 @@ static bool damon_pa_young(phys_addr_t paddr, unsigned long *folio_sz)
return accessed;
}
-static void __damon_pa_check_access(struct damon_region *r,
+static void __damon_pa_basic_check_access(struct damon_region *r,
struct damon_attrs *attrs, unsigned long addr_unit)
{
static phys_addr_t last_addr;
@@ -103,7 +103,7 @@ static void __damon_pa_check_access(struct damon_region *r,
last_addr = sampling_addr;
}
-static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
+static unsigned int damon_pa_basic_check_accesses(struct damon_ctx *ctx)
{
struct damon_target *t;
struct damon_region *r;
@@ -111,7 +111,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t) {
- __damon_pa_check_access(
+ __damon_pa_basic_check_access(
r, &ctx->attrs, ctx->addr_unit);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
@@ -364,6 +364,96 @@ static int damon_pa_scheme_score(struct damon_ctx *context,
return DAMOS_MAX_SCORE;
}
+#ifdef CONFIG_PERF_EVENTS
+
+static void damon_pa_perf_check_accesses(struct damon_ctx *ctx, struct damon_perf_event *event)
+{
+ struct damon_perf *perf = event->priv;
+ struct damon_target *t;
+ unsigned int tidx = 0;
+
+ if (!perf)
+ return;
+
+ damon_paddr_histogram_init(&perf->paddr_histogram);
+
+ damon_perf_populate_paddr_histogram(ctx, event);
+
+ damon_for_each_target(t, ctx) {
+ struct damon_region *r;
+ unsigned int nr_accessed = 0;
+
+ damon_for_each_region(r, t) {
+ unsigned long addr;
+
+ if (r->accessed)
+ continue;
+
+ for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
+ if (damon_paddr_histogram_count(&perf->paddr_histogram,
+ addr & PAGE_MASK)) {
+ r->accessed = true;
+ nr_accessed++;
+ break;
+ }
+ }
+ }
+ tidx++;
+ }
+
+ damon_paddr_histogram_destroy(&perf->paddr_histogram);
+}
+
+#else /* CONFIG_PERF_EVENTS */
+
+static void damon_pa_perf_check_accesses(struct damon_ctx *ctx, struct damon_perf_event *event)
+{
+}
+
+#endif /* CONFIG_PERF_EVENTS */
+
+static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
+{
+ struct damon_perf_event *event;
+
+ if (list_empty(&ctx->perf_events))
+ return damon_pa_basic_prepare_access_checks(ctx);
+
+ list_for_each_entry(event, &ctx->perf_events, list)
+ damon_perf_prepare_access_checks(ctx, event);
+}
+
+static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ struct damon_perf_event *event;
+ unsigned int max_nr_accesses = 0;
+
+ if (list_empty(&ctx->perf_events))
+ return damon_pa_basic_check_accesses(ctx);
+
+ damon_for_each_target(t, ctx) {
+ struct damon_region *r;
+
+ damon_for_each_region(r, t)
+ r->accessed = false;
+ }
+
+ list_for_each_entry(event, &ctx->perf_events, list)
+ damon_pa_perf_check_accesses(ctx, event);
+
+ damon_for_each_target(t, ctx) {
+ struct damon_region *r;
+
+ damon_for_each_region(r, t) {
+ damon_update_region_access_rate(r, r->accessed, &ctx->attrs);
+ max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
+ }
+ }
+
+ return max_nr_accesses;
+}
+
static int __init damon_pa_initcall(void)
{
struct damon_operations ops = {
--
2.43.0
next prev parent reply other threads:[~2026-03-09 1:00 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-09 1:00 [RFC PATCH v2 0/6] mm/damon: introduce perf event based access check Akinobu Mita
2026-03-09 1:00 ` [RFC PATCH v2 1/6] mm/damon: reintroduce damon_operations->cleanup() Akinobu Mita
2026-03-09 1:00 ` [RFC PATCH v2 2/6] mm/damon/core: introduce struct damon_access_report Akinobu Mita
2026-03-09 15:19 ` Ian Rogers
2026-03-10 1:23 ` SeongJae Park
2026-03-09 1:00 ` [RFC PATCH v2 3/6] mm/damon/core: add common code for perf event based access check Akinobu Mita
2026-03-09 1:00 ` [RFC PATCH v2 4/6] mm/damon/vaddr: support " Akinobu Mita
2026-03-09 1:00 ` Akinobu Mita [this message]
2026-03-09 1:00 ` [RFC PATCH v2 6/6] mm/damon: allow user to set min size of region Akinobu Mita
2026-03-11 0:51 ` [RFC PATCH v2 0/6] mm/damon: introduce perf event based access check SeongJae Park
2026-03-13 7:35 ` Akinobu Mita
2026-03-14 1:31 ` SeongJae Park
2026-03-16 4:42 ` Akinobu Mita
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260309010009.11639-6-akinobu.mita@gmail.com \
--to=akinobu.mita@gmail.com \
--cc=damon@lists.linux.dev \
--cc=linux-perf-users@vger.kernel.org \
--cc=sj@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox