Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: <intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH] drm/xe: Add timeout to preempt fences
Date: Tue, 25 Jun 2024 05:21:28 +0000	[thread overview]
Message-ID: <ZnpT2M4SH5TWzqTS@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <ZnpSpcRuM0dUgu3S@nvishwa1-DESK>

On Mon, Jun 24, 2024 at 10:16:21PM -0700, Niranjana Vishwanathapura wrote:
> On Mon, Jun 24, 2024 at 03:48:44PM -0700, Matthew Brost wrote:
> > To adhere to dma fencing rules that fences must signal within a
> > reasonable amount of time, add a 5 second timeout to preempt fences. If
> > this timeout occurs, kill the associated VM as this fatal to the VM.
> > 
> > Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_exec_queue_types.h |  6 ++--
> > drivers/gpu/drm/xe/xe_execlist.c         |  3 +-
> > drivers/gpu/drm/xe/xe_guc_submit.c       | 35 ++++++++++++++++++++----
> > drivers/gpu/drm/xe/xe_preempt_fence.c    | 14 +++++++++-
> > drivers/gpu/drm/xe/xe_vm.c               | 10 ++++++-
> > drivers/gpu/drm/xe/xe_vm.h               |  2 ++
> > 6 files changed, 59 insertions(+), 11 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > index 201588ec33c3..1e51c978db7a 100644
> > --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > @@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
> > 	int (*suspend)(struct xe_exec_queue *q);
> > 	/**
> > 	 * @suspend_wait: Wait for an exec queue to suspend executing, should be
> > -	 * call after suspend.
> > +	 * call after suspend. In dma-fencing path thus must return within a
> > +	 * reasonable amount of time. A non-zero return shall indicate an error
> > +	 * waiting for suspend.
> > 	 */
> > -	void (*suspend_wait)(struct xe_exec_queue *q);
> > +	int (*suspend_wait)(struct xe_exec_queue *q);
> > 	/**
> > 	 * @resume: Resume exec queue execution, exec queue must be in a suspended
> > 	 * state and dma fence returned from most recent suspend call must be
> > diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> > index db906117db6d..7502e3486eaf 100644
> > --- a/drivers/gpu/drm/xe/xe_execlist.c
> > +++ b/drivers/gpu/drm/xe/xe_execlist.c
> > @@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
> > 	return 0;
> > }
> > 
> > -static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > +static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > 
> > {
> > 	/* NIY */
> > +	return 0;
> > }
> > 
> > static void execlist_exec_queue_resume(struct xe_exec_queue *q)
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> > index 373447758a60..56e7a340696e 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > @@ -1301,6 +1301,16 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
> > 	kfree(msg);
> > }
> > 
> > +static void __suspend_fence_signal(struct xe_exec_queue *q)
> > +{
> > +	if (!q->guc->suspend_pending)
> > +		return;
> > +
> > +	q->guc->suspend_pending = false;
> > +	smp_wmb();
> > +	wake_up(&q->guc->suspend_wait);
> > +}
> > +
> > static void suspend_fence_signal(struct xe_exec_queue *q)
> > {
> > 	struct xe_guc *guc = exec_queue_to_guc(q);
> > @@ -1310,9 +1320,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
> > 		  guc_read_stopped(guc));
> > 	xe_assert(xe, q->guc->suspend_pending);
> > 
> > -	q->guc->suspend_pending = false;
> > -	smp_wmb();
> > -	wake_up(&q->guc->suspend_wait);
> > +	__suspend_fence_signal(q);
> > }
> > 
> > static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
> > @@ -1465,6 +1473,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
> > {
> > 	trace_xe_exec_queue_kill(q);
> > 	set_exec_queue_killed(q);
> > +	__suspend_fence_signal(q);
> > 	xe_guc_exec_queue_trigger_cleanup(q);
> > }
> > 
> > @@ -1561,12 +1570,26 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
> > 	return 0;
> > }
> > 
> > -static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > +static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > {
> > 	struct xe_guc *guc = exec_queue_to_guc(q);
> > +	int ret;
> > +
> > +	ret = wait_event_timeout(q->guc->suspend_wait,
> > +				 !q->guc->suspend_pending ||
> > +				 exec_queue_killed(q) ||
> > +				 guc_read_stopped(guc),
> > +				 HZ * 5);
> 
> Do we need exec_queue_killed(q) here as we are anyway checking
> for '!q->guc->suspend_pending'?
> 

Probably not? There might be a goofy race where suspend_pending is set
after exec queue is killed though, I'd have to really think about this.
For safety I'd rather keep it as is.

Matt

> Other than that, the change looks fine to me.
> 
> Niranjana
> 
> > 
> > -	wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
> > -		   guc_read_stopped(guc));
> > +	if (!ret) {
> > +		xe_gt_warn(guc_to_gt(guc),
> > +			   "Suspend fence, guc_id=%d, failed to respond",
> > +			   q->guc->id);
> > +		/* XXX: Trigger GT reset? */
> > +		return -ETIME;
> > +	}
> > +
> > +	return 0;
> > }
> > 
> > static void guc_exec_queue_resume(struct xe_exec_queue *q)
> > diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > index e8b8ae5c6485..8356d9798206 100644
> > --- a/drivers/gpu/drm/xe/xe_preempt_fence.c
> > +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > @@ -16,11 +16,23 @@ static void preempt_fence_work_func(struct work_struct *w)
> > 	struct xe_preempt_fence *pfence =
> > 		container_of(w, typeof(*pfence), preempt_work);
> > 	struct xe_exec_queue *q = pfence->q;
> > +	int err = 0;
> > 
> > 	if (pfence->error)
> > 		dma_fence_set_error(&pfence->base, pfence->error);
> > +	else if (!q->ops->reset_status(q))
> > +		err = q->ops->suspend_wait(q);
> > 	else
> > -		q->ops->suspend_wait(q);
> > +		dma_fence_set_error(&pfence->base, -ENOENT);
> > +
> > +	if (err) {
> > +		dma_fence_set_error(&pfence->base, err);
> > +
> > +		down_write(&q->vm->lock);
> > +		xe_vm_kill(q->vm, false);
> > +		up_write(&q->vm->lock);
> > +	}
> > +
> > 
> > 	dma_fence_signal(&pfence->base);
> > 	/*
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 5b166fa03684..6b8ff13f0aff 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -311,7 +311,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
> > 
> > #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
> > 
> > -static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> > +/**
> > + * xe_vm_kill() - VM Kill
> > + * @vm: The VM.
> > + * @unlocked: Flag indicates the VM's dma-resv is not held
> > + *
> > + * Kill the VM by setting banned flag indicated VM is no longer available for
> > + * use. If in preempt fence mode, also kill all exec queue unlocked with the VM.
> > + */
> > +void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> > {
> > 	struct xe_exec_queue *q;
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> > index b481608b12f1..c864dba35e1d 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.h
> > +++ b/drivers/gpu/drm/xe/xe_vm.h
> > @@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
> > 	return drm_gpuvm_resv(&vm->gpuvm);
> > }
> > 
> > +void xe_vm_kill(struct xe_vm *vm, bool unlocked);
> > +
> > /**
> >  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
> >  * @vm: The vm
> > -- 
> > 2.34.1
> > 

  reply	other threads:[~2024-06-25  5:22 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-24 22:48 [PATCH] drm/xe: Add timeout to preempt fences Matthew Brost
2024-06-24 22:54 ` ✓ CI.Patch_applied: success for " Patchwork
2024-06-24 22:54 ` ✗ CI.checkpatch: warning " Patchwork
2024-06-24 22:55 ` ✓ CI.KUnit: success " Patchwork
2024-06-24 23:07 ` ✓ CI.Build: " Patchwork
2024-06-24 23:09 ` ✗ CI.Hooks: failure " Patchwork
2024-06-24 23:11 ` ✓ CI.checksparse: success " Patchwork
2024-06-24 23:34 ` ✓ CI.BAT: " Patchwork
2024-06-25  1:39 ` ✗ CI.FULL: failure " Patchwork
2024-06-25  5:16 ` [PATCH] " Niranjana Vishwanathapura
2024-06-25  5:21   ` Matthew Brost [this message]
2024-06-25  5:35     ` Niranjana Vishwanathapura
2024-06-25  5:38       ` Matthew Brost

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZnpT2M4SH5TWzqTS@DUT025-TGLU.fm.intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=niranjana.vishwanathapura@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox