From: Lucas De Marchi <lucas.demarchi@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: Lucas De Marchi <lucas.demarchi@intel.com>,
Matthew Brost <matthew.brost@intel.com>,
Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>,
Francois Dugast <francois.dugast@intel.com>,
Priyanka Dandamudi <priyanka.dandamudi@intel.com>,
Matt Roper <matthew.d.roper@intel.com>
Subject: [PATCH 2/2] drm/xe/xe3p: Add support for prefetch page fault
Date: Fri, 7 Nov 2025 10:10:25 -0800 [thread overview]
Message-ID: <20251107-pagefault-prefetch-v1-2-93291d619126@intel.com> (raw)
In-Reply-To: <20251107-pagefault-prefetch-v1-0-93291d619126@intel.com>
Xe3p prefetches memory ranges and it notifies software via an additional
bit in the page fault descriptor that the fault was caused by prefetch.
The prefetch bit should only be in the reply if the page fault handling
was not successful, which allows the HW to avoid generating a CAT error
for prefetch faults.
Based on original patches by Brian Welty <brian.welty@intel.com> and
Priyanka Dandamudi <priyanka.dandamudi@intel.com>.
Bspec: 59311
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Priyanka Dandamudi <priyanka.dandamudi@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
---
drivers/gpu/drm/xe/xe_gt_stats.c | 1 +
drivers/gpu/drm/xe/xe_gt_stats_types.h | 1 +
drivers/gpu/drm/xe/xe_guc_fwif.h | 5 +++--
drivers/gpu/drm/xe/xe_guc_pagefault.c | 2 ++
drivers/gpu/drm/xe/xe_pagefault.c | 13 +++++++++++--
drivers/gpu/drm/xe/xe_pagefault_types.h | 7 ++++++-
6 files changed, 24 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 5f74706bab814..af7b859861730 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -35,6 +35,7 @@ static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
DEF_STAT_STR(SVM_TLB_INVAL_US, "svm_tlb_inval_us"),
DEF_STAT_STR(VMA_PAGEFAULT_COUNT, "vma_pagefault_count"),
DEF_STAT_STR(VMA_PAGEFAULT_KB, "vma_pagefault_kb"),
+ DEF_STAT_STR(PREFETCH_PAGEFAULT_COUNT, "prefetch_pagefault_count"),
DEF_STAT_STR(SVM_4K_PAGEFAULT_COUNT, "svm_4K_pagefault_count"),
DEF_STAT_STR(SVM_64K_PAGEFAULT_COUNT, "svm_64K_pagefault_count"),
DEF_STAT_STR(SVM_2M_PAGEFAULT_COUNT, "svm_2M_pagefault_count"),
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index d8348a8de2e1b..23d72c95a2b5f 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -13,6 +13,7 @@ enum xe_gt_stats_id {
XE_GT_STATS_ID_SVM_TLB_INVAL_US,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
+ XE_GT_STATS_ID_PREFETCH_PAGEFAULT_COUNT,
XE_GT_STATS_ID_SVM_4K_PAGEFAULT_COUNT,
XE_GT_STATS_ID_SVM_64K_PAGEFAULT_COUNT,
XE_GT_STATS_ID_SVM_2M_PAGEFAULT_COUNT,
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index c90dd266e9cf9..1d6f9ebb376df 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -309,7 +309,8 @@ struct xe_guc_pagefault_desc {
#define PFD_ACCESS_TYPE GENMASK(1, 0)
#define PFD_FAULT_TYPE GENMASK(3, 2)
#define PFD_VFID GENMASK(9, 4)
-#define PFD_RSVD_1 GENMASK(11, 10)
+#define PFD_RSVD_1 BIT(10)
+#define XE3P_PFD_PREFETCH BIT(11)
#define PFD_VIRTUAL_ADDR_LO GENMASK(31, 12)
#define PFD_VIRTUAL_ADDR_LO_SHIFT 12
@@ -329,7 +330,7 @@ struct xe_guc_pagefault_reply {
u32 dw1;
#define PFR_VFID GENMASK(5, 0)
-#define PFR_RSVD_1 BIT(6)
+#define XE3P_PFR_PREFETCH BIT(6)
#define PFR_ENG_INSTANCE GENMASK(12, 7)
#define PFR_ENG_CLASS GENMASK(15, 13)
#define PFR_PDATA GENMASK(31, 16)
diff --git a/drivers/gpu/drm/xe/xe_guc_pagefault.c b/drivers/gpu/drm/xe/xe_guc_pagefault.c
index 719a18187a31d..b6c12e5630672 100644
--- a/drivers/gpu/drm/xe/xe_guc_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_guc_pagefault.c
@@ -27,6 +27,7 @@ static void guc_ack_fault(struct xe_pagefault *pf, int err)
FIELD_PREP(PFR_ASID, pf->consumer.asid),
FIELD_PREP(PFR_VFID, vfid) |
+ FIELD_PREP(XE3P_PFR_PREFETCH, pf->consumer.prefetch) |
FIELD_PREP(PFR_ENG_INSTANCE, engine_instance) |
FIELD_PREP(PFR_ENG_CLASS, engine_class) |
FIELD_PREP(PFR_PDATA, pdata),
@@ -77,6 +78,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
pf.consumer.asid = FIELD_GET(PFD_ASID, msg[1]);
pf.consumer.access_type = FIELD_GET(PFD_ACCESS_TYPE, msg[2]);
pf.consumer.fault_type = FIELD_GET(PFD_FAULT_TYPE, msg[2]);
+ pf.consumer.prefetch = FIELD_GET(XE3P_PFD_PREFETCH, msg[2]);
if (FIELD_GET(XE2_PFD_TRVA_FAULT, msg[0]))
pf.consumer.fault_level = XE_PAGEFAULT_LEVEL_NACK;
else
diff --git a/drivers/gpu/drm/xe/xe_pagefault.c b/drivers/gpu/drm/xe/xe_pagefault.c
index 68bcc25f27890..81e641a9b2274 100644
--- a/drivers/gpu/drm/xe/xe_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_pagefault.c
@@ -222,8 +222,14 @@ static bool xe_pagefault_queue_pop(struct xe_pagefault_queue *pf_queue,
return found_fault;
}
-static void xe_pagefault_print(struct xe_pagefault *pf, int err)
+static void xe_pagefault_error_account(struct xe_pagefault *pf, int err)
{
+ /* Don't spam log for prefetch accesses, just add to stats */
+ if (pf->consumer.prefetch) {
+ xe_gt_stats_incr(pf->gt, XE_GT_STATS_ID_PREFETCH_PAGEFAULT_COUNT, 1);
+ return;
+ }
+
xe_gt_dbg(pf->gt, "\nFault response: Unsuccessful %pe\n"
"\tASID: %d\n"
"\tFaulted Address: 0x%08x%08x\n"
@@ -262,7 +268,10 @@ static void xe_pagefault_queue_work(struct work_struct *w)
err = xe_pagefault_service(&pf);
if (err)
- xe_pagefault_print(&pf, err);
+ xe_pagefault_error_account(&pf, err);
+ else
+ /* Prefetch faults should only be acked on error */
+ pf.consumer.prefetch = 0;
pf.producer.ops->ack_fault(&pf, err);
diff --git a/drivers/gpu/drm/xe/xe_pagefault_types.h b/drivers/gpu/drm/xe/xe_pagefault_types.h
index d3b516407d600..f42336602f974 100644
--- a/drivers/gpu/drm/xe/xe_pagefault_types.h
+++ b/drivers/gpu/drm/xe/xe_pagefault_types.h
@@ -84,8 +84,13 @@ struct xe_pagefault {
u8 engine_class;
/** @consumer.engine_instance: engine instance */
u8 engine_instance;
+ /**
+ * @consumer.prefetch: fault is caused by HW prefetch - should
+ * only be acked on failure
+ */
+ u8 prefetch;
/** consumer.reserved: reserved bits for future expansion */
- u8 reserved[7];
+ u8 reserved[6];
} consumer;
/**
* @producer: State for the producer (i.e., HW/FW interface). Populated
--
2.51.2
next prev parent reply other threads:[~2025-11-07 18:11 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-07 18:10 [PATCH 0/2] drm/xe/xe3p: Add page fault prefetch bit handling Lucas De Marchi
2025-11-07 18:10 ` [PATCH 1/2] drm/xe: Coalesce pagefault logging Lucas De Marchi
2025-11-10 23:47 ` Summers, Stuart
2025-11-07 18:10 ` Lucas De Marchi [this message]
2025-11-07 18:51 ` [PATCH 2/2] drm/xe/xe3p: Add support for prefetch page fault Matt Roper
2025-11-07 19:16 ` Lucas De Marchi
2025-11-11 0:03 ` Summers, Stuart
2025-11-07 18:18 ` ✓ CI.KUnit: success for drm/xe/xe3p: Add page fault prefetch bit handling Patchwork
2025-11-07 19:17 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-09 3:01 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251107-pagefault-prefetch-v1-2-93291d619126@intel.com \
--to=lucas.demarchi@intel.com \
--cc=francois.dugast@intel.com \
--cc=gwan-gyeong.mun@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
--cc=matthew.d.roper@intel.com \
--cc=priyanka.dandamudi@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).