Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: <intel-xe@lists.freedesktop.org>, <stuart.summers@intel.com>,
	<francois.dugast@intel.com>
Subject: Re: [PATCH v4 5/7] drm/xe: Implement xe_pagefault_queue_work
Date: Tue, 4 Nov 2025 07:50:50 -0800	[thread overview]
Message-ID: <aQog2h+NLPSeK5Kg@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <3tzdsapjzs7luwkv5pgmrdc6tuj2v2aphudijkib4jsf5uduz4@tbqopp765dft>

On Tue, Nov 04, 2025 at 09:01:35AM -0600, Lucas De Marchi wrote:
> On Fri, Oct 31, 2025 at 09:54:14AM -0700, Matthew Brost wrote:
> > Implement a worker that services page faults, using the same
> > implementation as in xe_gt_pagefault.c.
> > 
> > v2:
> > - Rebase on exhaustive eviction changes
> > - Include engine instance in debug prints (Stuart)
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > Reviewed-by: Stuart Summers <stuart.summers@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_pagefault.c | 235 +++++++++++++++++++++++++++++-
> > 1 file changed, 234 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_pagefault.c b/drivers/gpu/drm/xe/xe_pagefault.c
> > index 194e647a8af6..3ac042c54e8f 100644
> > --- a/drivers/gpu/drm/xe/xe_pagefault.c
> > +++ b/drivers/gpu/drm/xe/xe_pagefault.c
> > @@ -5,12 +5,20 @@
> > 
> > #include <linux/circ_buf.h>
> > 
> > +#include <drm/drm_exec.h>
> > #include <drm/drm_managed.h>
> > 
> > +#include "xe_bo.h"
> > #include "xe_device.h"
> > +#include "xe_gt_printk.h"
> > #include "xe_gt_types.h"
> > +#include "xe_gt_stats.h"
> > +#include "xe_hw_engine.h"
> > #include "xe_pagefault.h"
> > #include "xe_pagefault_types.h"
> > +#include "xe_svm.h"
> > +#include "xe_trace_bo.h"
> > +#include "xe_vm.h"
> > 
> > /**
> >  * DOC: Xe page faults
> > @@ -32,9 +40,234 @@ static int xe_pagefault_entry_size(void)
> > 	return roundup_pow_of_two(sizeof(struct xe_pagefault));
> > }
> > 
> > +static int xe_pagefault_begin(struct drm_exec *exec, struct xe_vma *vma,
> > +			      struct xe_vram_region *vram, bool need_vram_move)
> > +{
> > +	struct xe_bo *bo = xe_vma_bo(vma);
> > +	struct xe_vm *vm = xe_vma_vm(vma);
> > +	int err;
> > +
> > +	err = xe_vm_lock_vma(exec, vma);
> > +	if (err)
> > +		return err;
> > +
> > +	if (!bo)
> > +		return 0;
> > +
> > +	return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) :
> > +		xe_bo_validate(bo, vm, true, exec);
> > +}
> > +
> > +static int xe_pagefault_handle_vma(struct xe_gt *gt, struct xe_vma *vma,
> > +				   bool atomic)
> > +{
> > +	struct xe_vm *vm = xe_vma_vm(vma);
> > +	struct xe_tile *tile = gt_to_tile(gt);
> > +	struct xe_validation_ctx ctx;
> > +	struct drm_exec exec;
> > +	struct dma_fence *fence;
> > +	int err, needs_vram;
> > +
> > +	lockdep_assert_held_write(&vm->lock);
> > +
> > +	needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
> > +	if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma)))
> > +		return needs_vram < 0 ? needs_vram : -EACCES;
> > +
> > +	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
> > +	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
> > +			 xe_vma_size(vma) / SZ_1K);
> > +
> > +	trace_xe_vma_pagefault(vma);
> > +
> > +	/* Check if VMA is valid, opportunistic check only */
> > +	if (xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
> > +					vma->tile_invalidated) && !atomic)
> > +		return 0;
> > +
> > +retry_userptr:
> 
> I realize this is a copy-paste-diverge from xe_gt_pagefault
> but this goto backwards is ugly for no reason.
> 
> it could be either
> 
> 	do {
> 		...
> 	} while (err == -EAGAIN)
> 
> or a separate function
> 
> 	while (err == -EAGAIN)
> 		err = try_pagefault_handle_userptr(...)
> 
> this can be done on top though.
> 

Let's get this in, then rework the logic a bit for clarity.

Matt

> Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
> 
> Lucas De Marchi
> 
> > +	if (xe_vma_is_userptr(vma) &&
> > +	    xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
> > +		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
> > +
> > +		err = xe_vma_userptr_pin_pages(uvma);
> > +		if (err)
> > +			return err;
> > +	}
> > +
> > +	/* Lock VM and BOs dma-resv */
> > +	xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
> > +	drm_exec_init(&exec, 0, 0);
> > +	drm_exec_until_all_locked(&exec) {
> > +		err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
> > +					 needs_vram == 1);
> > +		drm_exec_retry_on_contention(&exec);
> > +		xe_validation_retry_on_oom(&ctx, &err);
> > +		if (err)
> > +			goto unlock_dma_resv;
> > +
> > +		/* Bind VMA only to the GT that has faulted */
> > +		trace_xe_vma_pf_bind(vma);
> > +		xe_vm_set_validation_exec(vm, &exec);
> > +		fence = xe_vma_rebind(vm, vma, BIT(tile->id));
> > +		xe_vm_set_validation_exec(vm, NULL);
> > +		if (IS_ERR(fence)) {
> > +			err = PTR_ERR(fence);
> > +			xe_validation_retry_on_oom(&ctx, &err);
> > +			goto unlock_dma_resv;
> > +		}
> > +	}
> > +
> > +	dma_fence_wait(fence, false);
> > +	dma_fence_put(fence);
> > +
> > +unlock_dma_resv:
> > +	xe_validation_ctx_fini(&ctx);
> > +	if (err == -EAGAIN)
> > +		goto retry_userptr;
> > +
> > +	return err;
> > +}
> > +
> > +static bool
> > +xe_pagefault_access_is_atomic(enum xe_pagefault_access_type access_type)
> > +{
> > +	return access_type == XE_PAGEFAULT_ACCESS_TYPE_ATOMIC;
> > +}
> > +
> > +static struct xe_vm *xe_pagefault_asid_to_vm(struct xe_device *xe, u32 asid)
> > +{
> > +	struct xe_vm *vm;
> > +
> > +	down_read(&xe->usm.lock);
> > +	vm = xa_load(&xe->usm.asid_to_vm, asid);
> > +	if (vm && xe_vm_in_fault_mode(vm))
> > +		xe_vm_get(vm);
> > +	else
> > +		vm = ERR_PTR(-EINVAL);
> > +	up_read(&xe->usm.lock);
> > +
> > +	return vm;
> > +}
> > +
> > +static int xe_pagefault_service(struct xe_pagefault *pf)
> > +{
> > +	struct xe_gt *gt = pf->gt;
> > +	struct xe_device *xe = gt_to_xe(gt);
> > +	struct xe_vm *vm;
> > +	struct xe_vma *vma = NULL;
> > +	int err;
> > +	bool atomic;
> > +
> > +	/* Producer flagged this fault to be nacked */
> > +	if (pf->consumer.fault_level == XE_PAGEFAULT_LEVEL_NACK)
> > +		return -EFAULT;
> > +
> > +	vm = xe_pagefault_asid_to_vm(xe, pf->consumer.asid);
> > +	if (IS_ERR(vm))
> > +		return PTR_ERR(vm);
> > +
> > +	/*
> > +	 * TODO: Change to read lock? Using write lock for simplicity.
> > +	 */
> > +	down_write(&vm->lock);
> > +
> > +	if (xe_vm_is_closed(vm)) {
> > +		err = -ENOENT;
> > +		goto unlock_vm;
> > +	}
> > +
> > +	vma = xe_vm_find_vma_by_addr(vm, pf->consumer.page_addr);
> > +	if (!vma) {
> > +		err = -EINVAL;
> > +		goto unlock_vm;
> > +	}
> > +
> > +	atomic = xe_pagefault_access_is_atomic(pf->consumer.access_type);
> > +
> > +	if (xe_vma_is_cpu_addr_mirror(vma))
> > +		err = xe_svm_handle_pagefault(vm, vma, gt,
> > +					      pf->consumer.page_addr, atomic);
> > +	else
> > +		err = xe_pagefault_handle_vma(gt, vma, atomic);
> > +
> > +unlock_vm:
> > +	if (!err)
> > +		vm->usm.last_fault_vma = vma;
> > +	up_write(&vm->lock);
> > +	xe_vm_put(vm);
> > +
> > +	return err;
> > +}
> > +
> > +static bool xe_pagefault_queue_pop(struct xe_pagefault_queue *pf_queue,
> > +				   struct xe_pagefault *pf)
> > +{
> > +	bool found_fault = false;
> > +
> > +	spin_lock_irq(&pf_queue->lock);
> > +	if (pf_queue->tail != pf_queue->head) {
> > +		memcpy(pf, pf_queue->data + pf_queue->tail, sizeof(*pf));
> > +		pf_queue->tail = (pf_queue->tail + xe_pagefault_entry_size()) %
> > +			pf_queue->size;
> > +		found_fault = true;
> > +	}
> > +	spin_unlock_irq(&pf_queue->lock);
> > +
> > +	return found_fault;
> > +}
> > +
> > +static void xe_pagefault_print(struct xe_pagefault *pf)
> > +{
> > +	xe_gt_dbg(pf->gt, "\n\tASID: %d\n"
> > +		  "\tFaulted Address: 0x%08x%08x\n"
> > +		  "\tFaultType: %d\n"
> > +		  "\tAccessType: %d\n"
> > +		  "\tFaultLevel: %d\n"
> > +		  "\tEngineClass: %d %s\n"
> > +		  "\tEngineInstance: %d\n",
> > +		  pf->consumer.asid,
> > +		  upper_32_bits(pf->consumer.page_addr),
> > +		  lower_32_bits(pf->consumer.page_addr),
> > +		  pf->consumer.fault_type,
> > +		  pf->consumer.access_type,
> > +		  pf->consumer.fault_level,
> > +		  pf->consumer.engine_class,
> > +		  xe_hw_engine_class_to_str(pf->consumer.engine_class),
> > +		  pf->consumer.engine_instance);
> > +}
> > +
> > static void xe_pagefault_queue_work(struct work_struct *w)
> > {
> > -	/* TODO: Implement */
> > +	struct xe_pagefault_queue *pf_queue =
> > +		container_of(w, typeof(*pf_queue), worker);
> > +	struct xe_pagefault pf;
> > +	unsigned long threshold;
> > +
> > +#define USM_QUEUE_MAX_RUNTIME_MS      20
> > +	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
> > +
> > +	while (xe_pagefault_queue_pop(pf_queue, &pf)) {
> > +		int err;
> > +
> > +		if (!pf.gt)	/* Fault squashed during reset */
> > +			continue;
> > +
> > +		err = xe_pagefault_service(&pf);
> > +		if (err) {
> > +			xe_pagefault_print(&pf);
> > +			xe_gt_dbg(pf.gt, "Fault response: Unsuccessful %pe\n",
> > +				  ERR_PTR(err));
> > +		}
> > +
> > +		pf.producer.ops->ack_fault(&pf, err);
> > +
> > +		if (time_after(jiffies, threshold)) {
> > +			queue_work(gt_to_xe(pf.gt)->usm.pf_wq, w);
> > +			break;
> > +		}
> > +	}
> > +#undef USM_QUEUE_MAX_RUNTIME_MS
> > }
> > 
> > static int xe_pagefault_queue_init(struct xe_device *xe,
> > -- 
> > 2.34.1
> > 

  reply	other threads:[~2025-11-04 15:51 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-31 16:54 [PATCH v4 0/7] Pagefault refactor Matthew Brost
2025-10-31 16:54 ` [PATCH v4 1/7] drm/xe: Stub out new pagefault layer Matthew Brost
2025-11-04  3:57   ` Lucas De Marchi
2025-10-31 16:54 ` [PATCH v4 2/7] drm/xe: Implement xe_pagefault_init Matthew Brost
2025-11-03 13:22   ` Francois Dugast
2025-11-03 17:07     ` Matthew Brost
2025-10-31 16:54 ` [PATCH v4 3/7] drm/xe: Implement xe_pagefault_reset Matthew Brost
2025-11-04  4:02   ` Lucas De Marchi
2025-10-31 16:54 ` [PATCH v4 4/7] drm/xe: Implement xe_pagefault_handler Matthew Brost
2025-10-31 16:54 ` [PATCH v4 5/7] drm/xe: Implement xe_pagefault_queue_work Matthew Brost
2025-11-04 15:01   ` Lucas De Marchi
2025-11-04 15:50     ` Matthew Brost [this message]
2025-10-31 16:54 ` [PATCH v4 6/7] drm/xe: Add xe_guc_pagefault layer Matthew Brost
2025-11-03 18:28   ` Lucas De Marchi
2025-11-03 18:56     ` Matthew Brost
2025-11-03 19:11       ` Lucas De Marchi
2025-11-03 19:15         ` Matthew Brost
2025-11-04 16:12           ` Lucas De Marchi
2025-10-31 16:54 ` [PATCH v4 7/7] drm/xe: Remove unused GT page fault code Matthew Brost
2025-11-04 16:13   ` Lucas De Marchi
2025-10-31 18:00 ` ✗ CI.checkpatch: warning for Pagefault refactor (rev3) Patchwork
2025-10-31 18:01 ` ✓ CI.KUnit: success " Patchwork
2025-10-31 19:04 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-01  6:59 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-04 16:24 ` [PATCH v4 0/7] Pagefault refactor Francois Dugast

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aQog2h+NLPSeK5Kg@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=francois.dugast@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=lucas.demarchi@intel.com \
    --cc=stuart.summers@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox