* [PATCH v2] drm/xe: Add timeout to preempt fences
@ 2024-06-25 5:50 Matthew Brost
0 siblings, 0 replies; 5+ messages in thread
From: Matthew Brost @ 2024-06-25 5:50 UTC (permalink / raw)
To: intel-xe
To adhere to dma fencing rules that fences must signal within a
reasonable amount of time, add a 5 second timeout to preempt fences. If
this timeout occurs, kill the associated VM as this fatal to the VM.
v2:
- Add comment for smp_wmb (Checkpatch)
- Fix kernel doc typo (Inspection)
- Add comment for killed check (Niranjana)
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++--
drivers/gpu/drm/xe/xe_execlist.c | 3 +-
drivers/gpu/drm/xe/xe_guc_submit.c | 41 ++++++++++++++++++++----
drivers/gpu/drm/xe/xe_preempt_fence.c | 14 +++++++-
drivers/gpu/drm/xe/xe_vm.c | 10 +++++-
drivers/gpu/drm/xe/xe_vm.h | 2 ++
6 files changed, 65 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 201588ec33c3..1e51c978db7a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
int (*suspend)(struct xe_exec_queue *q);
/**
* @suspend_wait: Wait for an exec queue to suspend executing, should be
- * call after suspend.
+ * call after suspend. In dma-fencing path thus must return within a
+ * reasonable amount of time. A non-zero return shall indicate an error
+ * waiting for suspend.
*/
- void (*suspend_wait)(struct xe_exec_queue *q);
+ int (*suspend_wait)(struct xe_exec_queue *q);
/**
* @resume: Resume exec queue execution, exec queue must be in a suspended
* state and dma fence returned from most recent suspend call must be
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index db906117db6d..7502e3486eaf 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
/* NIY */
+ return 0;
}
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 373447758a60..9df97ee94fca 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1301,6 +1301,17 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
kfree(msg);
}
+static void __suspend_fence_signal(struct xe_exec_queue *q)
+{
+ if (!q->guc->suspend_pending)
+ return;
+
+ q->guc->suspend_pending = false;
+ smp_wmb(); /* Ensure suspend_pending change is visible */
+
+ wake_up(&q->guc->suspend_wait);
+}
+
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -1310,9 +1321,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
guc_read_stopped(guc));
xe_assert(xe, q->guc->suspend_pending);
- q->guc->suspend_pending = false;
- smp_wmb();
- wake_up(&q->guc->suspend_wait);
+ __suspend_fence_signal(q);
}
static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
@@ -1465,6 +1474,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
{
trace_xe_exec_queue_kill(q);
set_exec_queue_killed(q);
+ __suspend_fence_signal(q);
xe_guc_exec_queue_trigger_cleanup(q);
}
@@ -1561,12 +1571,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ int ret;
+
+ /*
+ * Likely don't need to check exec_queue_killed() as we clear
+ * suspend_pending upon kill but to be paranoid but races in which
+ * suspend_pending is set after kill also check kill here.
+ */
+ ret = wait_event_timeout(q->guc->suspend_wait,
+ !q->guc->suspend_pending ||
+ exec_queue_killed(q) ||
+ guc_read_stopped(guc),
+ HZ * 5);
- wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
- guc_read_stopped(guc));
+ if (!ret) {
+ xe_gt_warn(guc_to_gt(guc),
+ "Suspend fence, guc_id=%d, failed to respond",
+ q->guc->id);
+ /* XXX: Trigger GT reset? */
+ return -ETIME;
+ }
+
+ return 0;
}
static void guc_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index e8b8ae5c6485..8356d9798206 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -16,11 +16,23 @@ static void preempt_fence_work_func(struct work_struct *w)
struct xe_preempt_fence *pfence =
container_of(w, typeof(*pfence), preempt_work);
struct xe_exec_queue *q = pfence->q;
+ int err = 0;
if (pfence->error)
dma_fence_set_error(&pfence->base, pfence->error);
+ else if (!q->ops->reset_status(q))
+ err = q->ops->suspend_wait(q);
else
- q->ops->suspend_wait(q);
+ dma_fence_set_error(&pfence->base, -ENOENT);
+
+ if (err) {
+ dma_fence_set_error(&pfence->base, err);
+
+ down_write(&q->vm->lock);
+ xe_vm_kill(q->vm, false);
+ up_write(&q->vm->lock);
+ }
+
dma_fence_signal(&pfence->base);
/*
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5b166fa03684..e7c15b7877b1 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -311,7 +311,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
-static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
+/**
+ * xe_vm_kill() - VM Kill
+ * @vm: The VM.
+ * @unlocked: Flag indicates the VM's dma-resv is not held
+ *
+ * Kill the VM by setting banned flag indicated VM is no longer available for
+ * use. If in preempt fence mode, also kill all exec queue attached to the VM.
+ */
+void xe_vm_kill(struct xe_vm *vm, bool unlocked)
{
struct xe_exec_queue *q;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index b481608b12f1..c864dba35e1d 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
return drm_gpuvm_resv(&vm->gpuvm);
}
+void xe_vm_kill(struct xe_vm *vm, bool unlocked);
+
/**
* xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
* @vm: The vm
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v2] drm/xe: Add timeout to preempt fences
@ 2024-06-25 5:51 Matthew Brost
2024-06-25 13:03 ` Matthew Auld
0 siblings, 1 reply; 5+ messages in thread
From: Matthew Brost @ 2024-06-25 5:51 UTC (permalink / raw)
To: intel-xe
To adhere to dma fencing rules that fences must signal within a
reasonable amount of time, add a 5 second timeout to preempt fences. If
this timeout occurs, kill the associated VM as this fatal to the VM.
v2:
- Add comment for smp_wmb (Checkpatch)
- Fix kernel doc typo (Inspection)
- Add comment for killed check (Niranjana)
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++--
drivers/gpu/drm/xe/xe_execlist.c | 3 +-
drivers/gpu/drm/xe/xe_guc_submit.c | 41 ++++++++++++++++++++----
drivers/gpu/drm/xe/xe_preempt_fence.c | 14 +++++++-
drivers/gpu/drm/xe/xe_vm.c | 10 +++++-
drivers/gpu/drm/xe/xe_vm.h | 2 ++
6 files changed, 65 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 201588ec33c3..1e51c978db7a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
int (*suspend)(struct xe_exec_queue *q);
/**
* @suspend_wait: Wait for an exec queue to suspend executing, should be
- * call after suspend.
+ * call after suspend. In dma-fencing path thus must return within a
+ * reasonable amount of time. A non-zero return shall indicate an error
+ * waiting for suspend.
*/
- void (*suspend_wait)(struct xe_exec_queue *q);
+ int (*suspend_wait)(struct xe_exec_queue *q);
/**
* @resume: Resume exec queue execution, exec queue must be in a suspended
* state and dma fence returned from most recent suspend call must be
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index db906117db6d..7502e3486eaf 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
/* NIY */
+ return 0;
}
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 373447758a60..9df97ee94fca 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1301,6 +1301,17 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
kfree(msg);
}
+static void __suspend_fence_signal(struct xe_exec_queue *q)
+{
+ if (!q->guc->suspend_pending)
+ return;
+
+ q->guc->suspend_pending = false;
+ smp_wmb(); /* Ensure suspend_pending change is visible */
+
+ wake_up(&q->guc->suspend_wait);
+}
+
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -1310,9 +1321,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
guc_read_stopped(guc));
xe_assert(xe, q->guc->suspend_pending);
- q->guc->suspend_pending = false;
- smp_wmb();
- wake_up(&q->guc->suspend_wait);
+ __suspend_fence_signal(q);
}
static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
@@ -1465,6 +1474,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
{
trace_xe_exec_queue_kill(q);
set_exec_queue_killed(q);
+ __suspend_fence_signal(q);
xe_guc_exec_queue_trigger_cleanup(q);
}
@@ -1561,12 +1571,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ int ret;
+
+ /*
+ * Likely don't need to check exec_queue_killed() as we clear
+ * suspend_pending upon kill but to be paranoid but races in which
+ * suspend_pending is set after kill also check kill here.
+ */
+ ret = wait_event_timeout(q->guc->suspend_wait,
+ !q->guc->suspend_pending ||
+ exec_queue_killed(q) ||
+ guc_read_stopped(guc),
+ HZ * 5);
- wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
- guc_read_stopped(guc));
+ if (!ret) {
+ xe_gt_warn(guc_to_gt(guc),
+ "Suspend fence, guc_id=%d, failed to respond",
+ q->guc->id);
+ /* XXX: Trigger GT reset? */
+ return -ETIME;
+ }
+
+ return 0;
}
static void guc_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index e8b8ae5c6485..8356d9798206 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -16,11 +16,23 @@ static void preempt_fence_work_func(struct work_struct *w)
struct xe_preempt_fence *pfence =
container_of(w, typeof(*pfence), preempt_work);
struct xe_exec_queue *q = pfence->q;
+ int err = 0;
if (pfence->error)
dma_fence_set_error(&pfence->base, pfence->error);
+ else if (!q->ops->reset_status(q))
+ err = q->ops->suspend_wait(q);
else
- q->ops->suspend_wait(q);
+ dma_fence_set_error(&pfence->base, -ENOENT);
+
+ if (err) {
+ dma_fence_set_error(&pfence->base, err);
+
+ down_write(&q->vm->lock);
+ xe_vm_kill(q->vm, false);
+ up_write(&q->vm->lock);
+ }
+
dma_fence_signal(&pfence->base);
/*
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5b166fa03684..e7c15b7877b1 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -311,7 +311,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
-static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
+/**
+ * xe_vm_kill() - VM Kill
+ * @vm: The VM.
+ * @unlocked: Flag indicates the VM's dma-resv is not held
+ *
+ * Kill the VM by setting banned flag indicated VM is no longer available for
+ * use. If in preempt fence mode, also kill all exec queue attached to the VM.
+ */
+void xe_vm_kill(struct xe_vm *vm, bool unlocked)
{
struct xe_exec_queue *q;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index b481608b12f1..c864dba35e1d 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
return drm_gpuvm_resv(&vm->gpuvm);
}
+void xe_vm_kill(struct xe_vm *vm, bool unlocked);
+
/**
* xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
* @vm: The vm
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v2] drm/xe: Add timeout to preempt fences
2024-06-25 5:51 Matthew Brost
@ 2024-06-25 13:03 ` Matthew Auld
2024-06-25 15:46 ` Matthew Brost
0 siblings, 1 reply; 5+ messages in thread
From: Matthew Auld @ 2024-06-25 13:03 UTC (permalink / raw)
To: Matthew Brost, intel-xe
Hi,
On 25/06/2024 06:51, Matthew Brost wrote:
> To adhere to dma fencing rules that fences must signal within a
> reasonable amount of time, add a 5 second timeout to preempt fences. If
> this timeout occurs, kill the associated VM as this fatal to the VM.
>
> v2:
> - Add comment for smp_wmb (Checkpatch)
> - Fix kernel doc typo (Inspection)
> - Add comment for killed check (Niranjana)
>
> Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> ---
> drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++--
> drivers/gpu/drm/xe/xe_execlist.c | 3 +-
> drivers/gpu/drm/xe/xe_guc_submit.c | 41 ++++++++++++++++++++----
> drivers/gpu/drm/xe/xe_preempt_fence.c | 14 +++++++-
> drivers/gpu/drm/xe/xe_vm.c | 10 +++++-
> drivers/gpu/drm/xe/xe_vm.h | 2 ++
> 6 files changed, 65 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> index 201588ec33c3..1e51c978db7a 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> @@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
> int (*suspend)(struct xe_exec_queue *q);
> /**
> * @suspend_wait: Wait for an exec queue to suspend executing, should be
> - * call after suspend.
> + * call after suspend. In dma-fencing path thus must return within a
> + * reasonable amount of time. A non-zero return shall indicate an error
> + * waiting for suspend.
> */
> - void (*suspend_wait)(struct xe_exec_queue *q);
> + int (*suspend_wait)(struct xe_exec_queue *q);
> /**
> * @resume: Resume exec queue execution, exec queue must be in a suspended
> * state and dma fence returned from most recent suspend call must be
> diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> index db906117db6d..7502e3486eaf 100644
> --- a/drivers/gpu/drm/xe/xe_execlist.c
> +++ b/drivers/gpu/drm/xe/xe_execlist.c
> @@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
> return 0;
> }
>
> -static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> +static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
>
> {
> /* NIY */
> + return 0;
> }
>
> static void execlist_exec_queue_resume(struct xe_exec_queue *q)
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 373447758a60..9df97ee94fca 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -1301,6 +1301,17 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
> kfree(msg);
> }
>
> +static void __suspend_fence_signal(struct xe_exec_queue *q)
> +{
> + if (!q->guc->suspend_pending)
> + return;
> +
> + q->guc->suspend_pending = false;
> + smp_wmb(); /* Ensure suspend_pending change is visible */
I guess it was already like that, but where is the matching smp_rmb()?
If adding smp_wmb() there should usually always be a barrier on the
reader side.
If this is just simple wake_up() / wait_event() pattern with single
dependant store/load vs wait/wakeup then I don't think we need explicit
barrier, it should be handled already by the api IIRC.
> +
> + wake_up(&q->guc->suspend_wait);
> +}
> +
> static void suspend_fence_signal(struct xe_exec_queue *q)
> {
> struct xe_guc *guc = exec_queue_to_guc(q);
> @@ -1310,9 +1321,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
> guc_read_stopped(guc));
> xe_assert(xe, q->guc->suspend_pending);
>
> - q->guc->suspend_pending = false;
> - smp_wmb();
> - wake_up(&q->guc->suspend_wait);
> + __suspend_fence_signal(q);
> }
>
> static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
> @@ -1465,6 +1474,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
> {
> trace_xe_exec_queue_kill(q);
> set_exec_queue_killed(q);
> + __suspend_fence_signal(q);
> xe_guc_exec_queue_trigger_cleanup(q);
> }
>
> @@ -1561,12 +1571,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
> return 0;
> }
>
> -static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> +static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> {
> struct xe_guc *guc = exec_queue_to_guc(q);
> + int ret;
> +
> + /*
> + * Likely don't need to check exec_queue_killed() as we clear
> + * suspend_pending upon kill but to be paranoid but races in which
> + * suspend_pending is set after kill also check kill here.
> + */
> + ret = wait_event_timeout(q->guc->suspend_wait,
> + !q->guc->suspend_pending ||
> + exec_queue_killed(q) ||
> + guc_read_stopped(guc),
> + HZ * 5);
>
> - wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
> - guc_read_stopped(guc));
> + if (!ret) {
> + xe_gt_warn(guc_to_gt(guc),
> + "Suspend fence, guc_id=%d, failed to respond",
> + q->guc->id);
> + /* XXX: Trigger GT reset? */
> + return -ETIME;
> + }
> +
> + return 0;
> }
>
> static void guc_exec_queue_resume(struct xe_exec_queue *q)
> diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
> index e8b8ae5c6485..8356d9798206 100644
> --- a/drivers/gpu/drm/xe/xe_preempt_fence.c
> +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
> @@ -16,11 +16,23 @@ static void preempt_fence_work_func(struct work_struct *w)
> struct xe_preempt_fence *pfence =
> container_of(w, typeof(*pfence), preempt_work);
> struct xe_exec_queue *q = pfence->q;
> + int err = 0;
>
> if (pfence->error)
> dma_fence_set_error(&pfence->base, pfence->error);
> + else if (!q->ops->reset_status(q))
> + err = q->ops->suspend_wait(q);
> else
> - q->ops->suspend_wait(q);
> + dma_fence_set_error(&pfence->base, -ENOENT);
> +
> + if (err) {
> + dma_fence_set_error(&pfence->base, err);
> +
> + down_write(&q->vm->lock);
> + xe_vm_kill(q->vm, false);
> + up_write(&q->vm->lock);
I think grabbing vm->lock will deadlock here, right? Calling vm_kill
might also be scary? lockdep will not see it unless we have some way of
triggering the error path here. For reference:
3cd1585e57908b6efcd967465ef7685f40b2a294
> + }
> +
>
> dma_fence_signal(&pfence->base);
> /*
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 5b166fa03684..e7c15b7877b1 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -311,7 +311,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
>
> #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
>
> -static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> +/**
> + * xe_vm_kill() - VM Kill
> + * @vm: The VM.
> + * @unlocked: Flag indicates the VM's dma-resv is not held
> + *
> + * Kill the VM by setting banned flag indicated VM is no longer available for
> + * use. If in preempt fence mode, also kill all exec queue attached to the VM.
> + */
> +void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> {
> struct xe_exec_queue *q;
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index b481608b12f1..c864dba35e1d 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
> return drm_gpuvm_resv(&vm->gpuvm);
> }
>
> +void xe_vm_kill(struct xe_vm *vm, bool unlocked);
> +
> /**
> * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
> * @vm: The vm
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] drm/xe: Add timeout to preempt fences
2024-06-25 13:03 ` Matthew Auld
@ 2024-06-25 15:46 ` Matthew Brost
2024-06-25 16:01 ` Matthew Brost
0 siblings, 1 reply; 5+ messages in thread
From: Matthew Brost @ 2024-06-25 15:46 UTC (permalink / raw)
To: Matthew Auld; +Cc: intel-xe
On Tue, Jun 25, 2024 at 02:03:38PM +0100, Matthew Auld wrote:
> Hi,
>
> On 25/06/2024 06:51, Matthew Brost wrote:
> > To adhere to dma fencing rules that fences must signal within a
> > reasonable amount of time, add a 5 second timeout to preempt fences. If
> > this timeout occurs, kill the associated VM as this fatal to the VM.
> >
> > v2:
> > - Add comment for smp_wmb (Checkpatch)
> > - Fix kernel doc typo (Inspection)
> > - Add comment for killed check (Niranjana)
> >
> > Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++--
> > drivers/gpu/drm/xe/xe_execlist.c | 3 +-
> > drivers/gpu/drm/xe/xe_guc_submit.c | 41 ++++++++++++++++++++----
> > drivers/gpu/drm/xe/xe_preempt_fence.c | 14 +++++++-
> > drivers/gpu/drm/xe/xe_vm.c | 10 +++++-
> > drivers/gpu/drm/xe/xe_vm.h | 2 ++
> > 6 files changed, 65 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > index 201588ec33c3..1e51c978db7a 100644
> > --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > @@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
> > int (*suspend)(struct xe_exec_queue *q);
> > /**
> > * @suspend_wait: Wait for an exec queue to suspend executing, should be
> > - * call after suspend.
> > + * call after suspend. In dma-fencing path thus must return within a
> > + * reasonable amount of time. A non-zero return shall indicate an error
> > + * waiting for suspend.
> > */
> > - void (*suspend_wait)(struct xe_exec_queue *q);
> > + int (*suspend_wait)(struct xe_exec_queue *q);
> > /**
> > * @resume: Resume exec queue execution, exec queue must be in a suspended
> > * state and dma fence returned from most recent suspend call must be
> > diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> > index db906117db6d..7502e3486eaf 100644
> > --- a/drivers/gpu/drm/xe/xe_execlist.c
> > +++ b/drivers/gpu/drm/xe/xe_execlist.c
> > @@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
> > return 0;
> > }
> > -static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > +static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > {
> > /* NIY */
> > + return 0;
> > }
> > static void execlist_exec_queue_resume(struct xe_exec_queue *q)
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> > index 373447758a60..9df97ee94fca 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > @@ -1301,6 +1301,17 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
> > kfree(msg);
> > }
> > +static void __suspend_fence_signal(struct xe_exec_queue *q)
> > +{
> > + if (!q->guc->suspend_pending)
> > + return;
> > +
> > + q->guc->suspend_pending = false;
> > + smp_wmb(); /* Ensure suspend_pending change is visible */
>
> I guess it was already like that, but where is the matching smp_rmb()? If
> adding smp_wmb() there should usually always be a barrier on the reader
> side.
>
> If this is just simple wake_up() / wait_event() pattern with single
> dependant store/load vs wait/wakeup then I don't think we need explicit
> barrier, it should be handled already by the api IIRC.
>
Yea, I knew some smp_* barrier usage was wrong. Let me drop this.
> > +
> > + wake_up(&q->guc->suspend_wait);
> > +}
> > +
> > static void suspend_fence_signal(struct xe_exec_queue *q)
> > {
> > struct xe_guc *guc = exec_queue_to_guc(q);
> > @@ -1310,9 +1321,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
> > guc_read_stopped(guc));
> > xe_assert(xe, q->guc->suspend_pending);
> > - q->guc->suspend_pending = false;
> > - smp_wmb();
> > - wake_up(&q->guc->suspend_wait);
> > + __suspend_fence_signal(q);
> > }
> > static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
> > @@ -1465,6 +1474,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
> > {
> > trace_xe_exec_queue_kill(q);
> > set_exec_queue_killed(q);
> > + __suspend_fence_signal(q);
> > xe_guc_exec_queue_trigger_cleanup(q);
> > }
> > @@ -1561,12 +1571,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
> > return 0;
> > }
> > -static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > +static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > {
> > struct xe_guc *guc = exec_queue_to_guc(q);
> > + int ret;
> > +
> > + /*
> > + * Likely don't need to check exec_queue_killed() as we clear
> > + * suspend_pending upon kill but to be paranoid but races in which
> > + * suspend_pending is set after kill also check kill here.
> > + */
> > + ret = wait_event_timeout(q->guc->suspend_wait,
> > + !q->guc->suspend_pending ||
> > + exec_queue_killed(q) ||
> > + guc_read_stopped(guc),
> > + HZ * 5);
> > - wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
> > - guc_read_stopped(guc));
> > + if (!ret) {
> > + xe_gt_warn(guc_to_gt(guc),
> > + "Suspend fence, guc_id=%d, failed to respond",
> > + q->guc->id);
> > + /* XXX: Trigger GT reset? */
> > + return -ETIME;
> > + }
> > +
> > + return 0;
> > }
> > static void guc_exec_queue_resume(struct xe_exec_queue *q)
> > diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > index e8b8ae5c6485..8356d9798206 100644
> > --- a/drivers/gpu/drm/xe/xe_preempt_fence.c
> > +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > @@ -16,11 +16,23 @@ static void preempt_fence_work_func(struct work_struct *w)
> > struct xe_preempt_fence *pfence =
> > container_of(w, typeof(*pfence), preempt_work);
> > struct xe_exec_queue *q = pfence->q;
> > + int err = 0;
> > if (pfence->error)
> > dma_fence_set_error(&pfence->base, pfence->error);
> > + else if (!q->ops->reset_status(q))
> > + err = q->ops->suspend_wait(q);
> > else
> > - q->ops->suspend_wait(q);
> > + dma_fence_set_error(&pfence->base, -ENOENT);
> > +
> > + if (err) {
> > + dma_fence_set_error(&pfence->base, err);
> > +
> > + down_write(&q->vm->lock);
> > + xe_vm_kill(q->vm, false);
> > + up_write(&q->vm->lock);
>
> I think grabbing vm->lock will deadlock here, right? Calling vm_kill might
> also be scary? lockdep will not see it unless we have some way of triggering
> the error path here. For reference: 3cd1585e57908b6efcd967465ef7685f40b2a294
>
Yea I think you are right. I was thinking we could grab this here as I
thought having a dedicated ordered work queue here allowed the vm->lock
to be safely taken about that thinking is wrong. Hmm, I need to rethink
this design. I might be able to refactor xe_vm_kill to not require the
vm->lock... Let me play around with this.
Matt
> > + }
> > +
> > dma_fence_signal(&pfence->base);
> > /*
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 5b166fa03684..e7c15b7877b1 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -311,7 +311,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
> > #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
> > -static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> > +/**
> > + * xe_vm_kill() - VM Kill
> > + * @vm: The VM.
> > + * @unlocked: Flag indicates the VM's dma-resv is not held
> > + *
> > + * Kill the VM by setting banned flag indicated VM is no longer available for
> > + * use. If in preempt fence mode, also kill all exec queue attached to the VM.
> > + */
> > +void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> > {
> > struct xe_exec_queue *q;
> > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> > index b481608b12f1..c864dba35e1d 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.h
> > +++ b/drivers/gpu/drm/xe/xe_vm.h
> > @@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
> > return drm_gpuvm_resv(&vm->gpuvm);
> > }
> > +void xe_vm_kill(struct xe_vm *vm, bool unlocked);
> > +
> > /**
> > * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
> > * @vm: The vm
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] drm/xe: Add timeout to preempt fences
2024-06-25 15:46 ` Matthew Brost
@ 2024-06-25 16:01 ` Matthew Brost
0 siblings, 0 replies; 5+ messages in thread
From: Matthew Brost @ 2024-06-25 16:01 UTC (permalink / raw)
To: Matthew Auld; +Cc: intel-xe
On Tue, Jun 25, 2024 at 03:46:17PM +0000, Matthew Brost wrote:
> On Tue, Jun 25, 2024 at 02:03:38PM +0100, Matthew Auld wrote:
> > Hi,
> >
> > On 25/06/2024 06:51, Matthew Brost wrote:
> > > To adhere to dma fencing rules that fences must signal within a
> > > reasonable amount of time, add a 5 second timeout to preempt fences. If
> > > this timeout occurs, kill the associated VM as this fatal to the VM.
> > >
> > > v2:
> > > - Add comment for smp_wmb (Checkpatch)
> > > - Fix kernel doc typo (Inspection)
> > > - Add comment for killed check (Niranjana)
> > >
> > > Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> > > ---
> > > drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++--
> > > drivers/gpu/drm/xe/xe_execlist.c | 3 +-
> > > drivers/gpu/drm/xe/xe_guc_submit.c | 41 ++++++++++++++++++++----
> > > drivers/gpu/drm/xe/xe_preempt_fence.c | 14 +++++++-
> > > drivers/gpu/drm/xe/xe_vm.c | 10 +++++-
> > > drivers/gpu/drm/xe/xe_vm.h | 2 ++
> > > 6 files changed, 65 insertions(+), 11 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > > index 201588ec33c3..1e51c978db7a 100644
> > > --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > > +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > > @@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
> > > int (*suspend)(struct xe_exec_queue *q);
> > > /**
> > > * @suspend_wait: Wait for an exec queue to suspend executing, should be
> > > - * call after suspend.
> > > + * call after suspend. In dma-fencing path thus must return within a
> > > + * reasonable amount of time. A non-zero return shall indicate an error
> > > + * waiting for suspend.
> > > */
> > > - void (*suspend_wait)(struct xe_exec_queue *q);
> > > + int (*suspend_wait)(struct xe_exec_queue *q);
> > > /**
> > > * @resume: Resume exec queue execution, exec queue must be in a suspended
> > > * state and dma fence returned from most recent suspend call must be
> > > diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> > > index db906117db6d..7502e3486eaf 100644
> > > --- a/drivers/gpu/drm/xe/xe_execlist.c
> > > +++ b/drivers/gpu/drm/xe/xe_execlist.c
> > > @@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
> > > return 0;
> > > }
> > > -static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > > +static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > > {
> > > /* NIY */
> > > + return 0;
> > > }
> > > static void execlist_exec_queue_resume(struct xe_exec_queue *q)
> > > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> > > index 373447758a60..9df97ee94fca 100644
> > > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > > @@ -1301,6 +1301,17 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
> > > kfree(msg);
> > > }
> > > +static void __suspend_fence_signal(struct xe_exec_queue *q)
> > > +{
> > > + if (!q->guc->suspend_pending)
> > > + return;
> > > +
> > > + q->guc->suspend_pending = false;
> > > + smp_wmb(); /* Ensure suspend_pending change is visible */
> >
> > I guess it was already like that, but where is the matching smp_rmb()? If
> > adding smp_wmb() there should usually always be a barrier on the reader
> > side.
> >
> > If this is just simple wake_up() / wait_event() pattern with single
> > dependant store/load vs wait/wakeup then I don't think we need explicit
> > barrier, it should be handled already by the api IIRC.
> >
>
> Yea, I knew some smp_* barrier usage was wrong. Let me drop this.
>
> > > +
> > > + wake_up(&q->guc->suspend_wait);
> > > +}
> > > +
> > > static void suspend_fence_signal(struct xe_exec_queue *q)
> > > {
> > > struct xe_guc *guc = exec_queue_to_guc(q);
> > > @@ -1310,9 +1321,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
> > > guc_read_stopped(guc));
> > > xe_assert(xe, q->guc->suspend_pending);
> > > - q->guc->suspend_pending = false;
> > > - smp_wmb();
> > > - wake_up(&q->guc->suspend_wait);
> > > + __suspend_fence_signal(q);
> > > }
> > > static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
> > > @@ -1465,6 +1474,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
> > > {
> > > trace_xe_exec_queue_kill(q);
> > > set_exec_queue_killed(q);
> > > + __suspend_fence_signal(q);
> > > xe_guc_exec_queue_trigger_cleanup(q);
> > > }
> > > @@ -1561,12 +1571,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
> > > return 0;
> > > }
> > > -static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > > +static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > > {
> > > struct xe_guc *guc = exec_queue_to_guc(q);
> > > + int ret;
> > > +
> > > + /*
> > > + * Likely don't need to check exec_queue_killed() as we clear
> > > + * suspend_pending upon kill but to be paranoid but races in which
> > > + * suspend_pending is set after kill also check kill here.
> > > + */
> > > + ret = wait_event_timeout(q->guc->suspend_wait,
> > > + !q->guc->suspend_pending ||
> > > + exec_queue_killed(q) ||
> > > + guc_read_stopped(guc),
> > > + HZ * 5);
> > > - wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
> > > - guc_read_stopped(guc));
> > > + if (!ret) {
> > > + xe_gt_warn(guc_to_gt(guc),
> > > + "Suspend fence, guc_id=%d, failed to respond",
> > > + q->guc->id);
> > > + /* XXX: Trigger GT reset? */
> > > + return -ETIME;
> > > + }
> > > +
> > > + return 0;
> > > }
> > > static void guc_exec_queue_resume(struct xe_exec_queue *q)
> > > diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > > index e8b8ae5c6485..8356d9798206 100644
> > > --- a/drivers/gpu/drm/xe/xe_preempt_fence.c
> > > +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > > @@ -16,11 +16,23 @@ static void preempt_fence_work_func(struct work_struct *w)
> > > struct xe_preempt_fence *pfence =
> > > container_of(w, typeof(*pfence), preempt_work);
> > > struct xe_exec_queue *q = pfence->q;
> > > + int err = 0;
> > > if (pfence->error)
> > > dma_fence_set_error(&pfence->base, pfence->error);
> > > + else if (!q->ops->reset_status(q))
> > > + err = q->ops->suspend_wait(q);
> > > else
> > > - q->ops->suspend_wait(q);
> > > + dma_fence_set_error(&pfence->base, -ENOENT);
> > > +
> > > + if (err) {
> > > + dma_fence_set_error(&pfence->base, err);
> > > +
> > > + down_write(&q->vm->lock);
> > > + xe_vm_kill(q->vm, false);
> > > + up_write(&q->vm->lock);
> >
> > I think grabbing vm->lock will deadlock here, right? Calling vm_kill might
> > also be scary? lockdep will not see it unless we have some way of triggering
> > the error path here. For reference: 3cd1585e57908b6efcd967465ef7685f40b2a294
> >
>
> Yea I think you are right. I was thinking we could grab this here as I
> thought having a dedicated ordered work queue here allowed the vm->lock
> to be safely taken about that thinking is wrong. Hmm, I need to rethink
> this design. I might be able to refactor xe_vm_kill to not require the
> vm->lock... Let me play around with this.
>
Scratch what I said wrt to dropping vm->lock requirement to kill, I'll
defer the kill preempt rebind worker via checking dma-fence error state,
set a per VM flag which indicates skip calling suspend_wait, and
document that preempt fences must use an ordered wq (at least within a
single VM).
Matt
> Matt
>
> > > + }
> > > +
> > > dma_fence_signal(&pfence->base);
> > > /*
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > index 5b166fa03684..e7c15b7877b1 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > @@ -311,7 +311,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
> > > #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
> > > -static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> > > +/**
> > > + * xe_vm_kill() - VM Kill
> > > + * @vm: The VM.
> > > + * @unlocked: Flag indicates the VM's dma-resv is not held
> > > + *
> > > + * Kill the VM by setting banned flag indicated VM is no longer available for
> > > + * use. If in preempt fence mode, also kill all exec queue attached to the VM.
> > > + */
> > > +void xe_vm_kill(struct xe_vm *vm, bool unlocked)
> > > {
> > > struct xe_exec_queue *q;
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> > > index b481608b12f1..c864dba35e1d 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.h
> > > +++ b/drivers/gpu/drm/xe/xe_vm.h
> > > @@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
> > > return drm_gpuvm_resv(&vm->gpuvm);
> > > }
> > > +void xe_vm_kill(struct xe_vm *vm, bool unlocked);
> > > +
> > > /**
> > > * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
> > > * @vm: The vm
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2024-06-25 16:02 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-06-25 5:50 [PATCH v2] drm/xe: Add timeout to preempt fences Matthew Brost
-- strict thread matches above, loose matches on Subject: below --
2024-06-25 5:51 Matthew Brost
2024-06-25 13:03 ` Matthew Auld
2024-06-25 15:46 ` Matthew Brost
2024-06-25 16:01 ` Matthew Brost
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox