Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue
@ 2023-08-25 13:15 Francois Dugast
  0 siblings, 0 replies; 4+ messages in thread
From: Francois Dugast @ 2023-08-25 13:15 UTC (permalink / raw)
  To: intel-xe; +Cc: Francois Dugast

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
 drivers/gpu/drm/xe/xe_guc_submit.c | 62 +++++++++++++++---------------
 1 file changed, 31 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index c6a9e17d6889..b2414f183a2c 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -53,18 +53,18 @@ exec_queue_to_guc(struct xe_exec_queue *q)
 }
 
 /*
- * Helpers for engine state, using an atomic as some of the bits can transition
- * as the same time (e.g. a suspend can be happning at the same time as schedule
- * engine done being processed).
+ * Helpers for exec queue state, using an atomic as some of the bits can transition
+ * as the same time (e.g. a suspend can be happening at the same time as schedule
+ * exec queue done being processed).
  */
-#define EXEC_QUEUE_STATE_REGISTERED		(1 << 0)
-#define ENGINE_STATE_ENABLED		(1 << 1)
-#define EXEC_QUEUE_STATE_PENDING_ENABLE	(1 << 2)
+#define EXEC_QUEUE_STATE_REGISTERED			(1 << 0)
+#define EXEC_QUEUE_STATE_ENABLED			(1 << 1)
+#define EXEC_QUEUE_STATE_PENDING_ENABLE		(1 << 2)
 #define EXEC_QUEUE_STATE_PENDING_DISABLE	(1 << 3)
-#define EXEC_QUEUE_STATE_DESTROYED		(1 << 4)
-#define ENGINE_STATE_SUSPENDED		(1 << 5)
-#define EXEC_QUEUE_STATE_RESET		(1 << 6)
-#define ENGINE_STATE_KILLED		(1 << 7)
+#define EXEC_QUEUE_STATE_DESTROYED			(1 << 4)
+#define EXEC_QUEUE_STATE_SUSPENDED			(1 << 5)
+#define EXEC_QUEUE_STATE_RESET				(1 << 6)
+#define EXEC_QUEUE_STATE_KILLED				(1 << 7)
 
 static bool exec_queue_registered(struct xe_exec_queue *q)
 {
@@ -83,17 +83,17 @@ static void clear_exec_queue_registered(struct xe_exec_queue *q)
 
 static bool exec_queue_enabled(struct xe_exec_queue *q)
 {
-	return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED;
+	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
 }
 
 static void set_exec_queue_enabled(struct xe_exec_queue *q)
 {
-	atomic_or(ENGINE_STATE_ENABLED, &q->guc->state);
+	atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
 }
 
 static void clear_exec_queue_enabled(struct xe_exec_queue *q)
 {
-	atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state);
+	atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
 }
 
 static bool exec_queue_pending_enable(struct xe_exec_queue *q)
@@ -148,17 +148,17 @@ static void set_exec_queue_banned(struct xe_exec_queue *q)
 
 static bool exec_queue_suspended(struct xe_exec_queue *q)
 {
-	return atomic_read(&q->guc->state) & ENGINE_STATE_SUSPENDED;
+	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
 }
 
 static void set_exec_queue_suspended(struct xe_exec_queue *q)
 {
-	atomic_or(ENGINE_STATE_SUSPENDED, &q->guc->state);
+	atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
 }
 
 static void clear_exec_queue_suspended(struct xe_exec_queue *q)
 {
-	atomic_and(~ENGINE_STATE_SUSPENDED, &q->guc->state);
+	atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
 }
 
 static bool exec_queue_reset(struct xe_exec_queue *q)
@@ -173,12 +173,12 @@ static void set_exec_queue_reset(struct xe_exec_queue *q)
 
 static bool exec_queue_killed(struct xe_exec_queue *q)
 {
-	return atomic_read(&q->guc->state) & ENGINE_STATE_KILLED;
+	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
 }
 
 static void set_exec_queue_killed(struct xe_exec_queue *q)
 {
-	atomic_or(ENGINE_STATE_KILLED, &q->guc->state);
+	atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
 }
 
 static bool exec_queue_killed_or_banned(struct xe_exec_queue *q)
@@ -252,7 +252,7 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
 
 	/*
 	 * Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
-	 * worse case user gets -ENOMEM on engine create and has to try again.
+	 * worse case user gets -ENOMEM on exec queue create and has to try again.
 	 *
 	 * FIXME: Have caller pre-alloc or post-alloc /w GFP_KERNEL to prevent
 	 * failure.
@@ -386,9 +386,9 @@ static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue
 	xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
 			field_, val_)
 
-static void __register_mlrc_engine(struct xe_guc *guc,
-				   struct xe_exec_queue *q,
-				   struct guc_ctxt_registration_info *info)
+static void __register_mlrc_exec_queue(struct xe_guc *guc,
+				       struct xe_exec_queue *q,
+				       struct guc_ctxt_registration_info *info)
 {
 #define MAX_MLRC_REG_SIZE      (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
 	u32 action[MAX_MLRC_REG_SIZE];
@@ -424,8 +424,8 @@ static void __register_mlrc_engine(struct xe_guc *guc,
 	xe_guc_ct_send(&guc->ct, action, len, 0, 0);
 }
 
-static void __register_engine(struct xe_guc *guc,
-			      struct guc_ctxt_registration_info *info)
+static void __register_exec_queue(struct xe_guc *guc,
+				  struct guc_ctxt_registration_info *info)
 {
 	u32 action[] = {
 		XE_GUC_ACTION_REGISTER_CONTEXT,
@@ -445,7 +445,7 @@ static void __register_engine(struct xe_guc *guc,
 	xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
 }
 
-static void register_engine(struct xe_exec_queue *q)
+static void register_exec_queue(struct xe_exec_queue *q)
 {
 	struct xe_guc *guc = exec_queue_to_guc(q);
 	struct xe_device *xe = guc_to_xe(guc);
@@ -484,8 +484,8 @@ static void register_engine(struct xe_exec_queue *q)
 
 	/*
 	 * We must keep a reference for LR engines if engine is registered with
-	 * the GuC as jobs signal immediately and can't destroy an engine if the
-	 * GuC has a reference to it.
+	 * the GuC as jobs signal immediately and can't destroy an exec queue if
+	 * the GuC has a reference to it.
 	 */
 	if (xe_exec_queue_is_lr(q))
 		xe_exec_queue_get(q);
@@ -493,9 +493,9 @@ static void register_engine(struct xe_exec_queue *q)
 	set_exec_queue_registered(q);
 	trace_xe_exec_queue_register(q);
 	if (xe_exec_queue_is_parallel(q))
-		__register_mlrc_engine(guc, q, &info);
+		__register_mlrc_exec_queue(guc, q, &info);
 	else
-		__register_engine(guc, &info);
+		__register_exec_queue(guc, &info);
 	init_policies(guc, q);
 }
 
@@ -662,7 +662,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
 
 	if (!exec_queue_killed_or_banned(q) && !xe_sched_job_is_error(job)) {
 		if (!exec_queue_registered(q))
-			register_engine(q);
+			register_exec_queue(q);
 		if (!lr)	/* LR jobs are emitted in the exec IOCTL */
 			q->ring_ops->emit_job(job);
 		submit_exec_queue(q);
@@ -1347,7 +1347,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
 		set_exec_queue_suspended(q);
 		suspend_fence_signal(q);
 	}
-	atomic_and(EXEC_QUEUE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
+	atomic_and(EXEC_QUEUE_STATE_DESTROYED | EXEC_QUEUE_STATE_SUSPENDED,
 		   &q->guc->state);
 	q->guc->resume_time = 0;
 	trace_xe_exec_queue_stop(q);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue
@ 2023-08-29  6:56 Niranjana Vishwanathapura
  2023-08-29  7:16 ` [Intel-xe] ✗ CI.Patch_applied: failure for fixup! drm/xe: Rename engine to exec_queue (rev2) Patchwork
  2023-08-29  8:03 ` [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue Francois Dugast
  0 siblings, 2 replies; 4+ messages in thread
From: Niranjana Vishwanathapura @ 2023-08-29  6:56 UTC (permalink / raw)
  To: intel-xe; +Cc: francois.dugast, thomas.hellstrom

Rename vm's preempt queue list name from vm->preempt.engines
to vm->preempt.queues as it is a list of exec queues.

Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
 drivers/gpu/drm/xe/xe_exec_queue.c       |  2 +-
 drivers/gpu/drm/xe/xe_exec_queue_types.h |  4 +--
 drivers/gpu/drm/xe/xe_vm.c               | 32 ++++++++++++------------
 drivers/gpu/drm/xe/xe_vm_types.h         | 10 ++++----
 4 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 25216ef93781..66420d31e7ae 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -812,7 +812,7 @@ static void exec_queue_kill_compute(struct xe_exec_queue *q)
 
 	down_write(&q->vm->lock);
 	list_del(&q->compute.link);
-	--q->vm->preempt.num_engines;
+	--q->vm->preempt.num_queues;
 	if (q->compute.pfence) {
 		dma_fence_enable_sw_signaling(q->compute.pfence);
 		dma_fence_put(q->compute.pfence);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 4a9a46d8a759..347d28442701 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -126,7 +126,7 @@ struct xe_exec_queue {
 		u32 preempt_timeout_us;
 	} sched_props;
 
-	/** @compute: compute engine state */
+	/** @compute: compute exec queue state */
 	struct {
 		/** @pfence: preemption fence */
 		struct dma_fence *pfence;
@@ -134,7 +134,7 @@ struct xe_exec_queue {
 		u64 context;
 		/** @seqno: preemption fence seqno */
 		u32 seqno;
-		/** @link: link into VM's list of engines */
+		/** @link: link into VM's list of exec queues */
 		struct list_head link;
 		/** @lock: preemption fences lock */
 		spinlock_t lock;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 15bff0783ec9..6d556acc30d3 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -172,7 +172,7 @@ static bool preempt_fences_waiting(struct xe_vm *vm)
 	lockdep_assert_held(&vm->lock);
 	xe_vm_assert_held(vm);
 
-	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
+	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
 		if (!q->compute.pfence ||
 		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 						   &q->compute.pfence->flags))) {
@@ -197,10 +197,10 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
 	lockdep_assert_held(&vm->lock);
 	xe_vm_assert_held(vm);
 
-	if (*count >= vm->preempt.num_engines)
+	if (*count >= vm->preempt.num_queues)
 		return 0;
 
-	for (; *count < vm->preempt.num_engines; ++(*count)) {
+	for (; *count < vm->preempt.num_queues; ++(*count)) {
 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
 
 		if (IS_ERR(pfence))
@@ -218,7 +218,7 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
 
 	xe_vm_assert_held(vm);
 
-	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
+	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
 		if (q->compute.pfence) {
 			long timeout = dma_fence_wait(q->compute.pfence, false);
 
@@ -237,7 +237,7 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
 	struct xe_exec_queue *q;
 
 	xe_vm_assert_held(vm);
-	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
+	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
 		if (!xe_exec_queue_is_idle(q))
 			return false;
 	}
@@ -250,7 +250,7 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
 	struct list_head *link;
 	struct xe_exec_queue *q;
 
-	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
+	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
 		struct dma_fence *fence;
 
 		link = list->next;
@@ -270,11 +270,11 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
 	struct ww_acquire_ctx ww;
 	int err;
 
-	err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
+	err = xe_bo_lock(bo, &ww, vm->preempt.num_queues, true);
 	if (err)
 		return err;
 
-	list_for_each_entry(q, &vm->preempt.engines, compute.link)
+	list_for_each_entry(q, &vm->preempt.queues, compute.link)
 		if (q->compute.pfence) {
 			dma_resv_add_fence(bo->ttm.base.resv,
 					   q->compute.pfence,
@@ -311,7 +311,7 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
 	lockdep_assert_held(&vm->lock);
 	xe_vm_assert_held(vm);
 
-	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
+	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
 		q->ops->resume(q);
 
 		dma_resv_add_fence(&vm->resv, q->compute.pfence,
@@ -346,8 +346,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
 		goto out_unlock;
 	}
 
-	list_add(&q->compute.link, &vm->preempt.engines);
-	++vm->preempt.num_engines;
+	list_add(&q->compute.link, &vm->preempt.queues);
+	++vm->preempt.num_queues;
 	q->compute.pfence = pfence;
 
 	down_read(&vm->userptr.notifier_lock);
@@ -528,7 +528,7 @@ static void xe_vm_kill(struct xe_vm *vm)
 	vm->flags |= XE_VM_FLAG_BANNED;
 	trace_xe_vm_kill(vm);
 
-	list_for_each_entry(q, &vm->preempt.engines, compute.link)
+	list_for_each_entry(q, &vm->preempt.queues, compute.link)
 		q->ops->kill(q);
 	xe_vm_unlock(vm, &ww);
 
@@ -586,7 +586,7 @@ static void preempt_rebind_work_func(struct work_struct *w)
 	}
 
 	err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
-				  false, vm->preempt.num_engines);
+				  false, vm->preempt.num_queues);
 	if (err)
 		goto out_unlock_outer;
 
@@ -1229,7 +1229,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
 
 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
 
-	INIT_LIST_HEAD(&vm->preempt.engines);
+	INIT_LIST_HEAD(&vm->preempt.queues);
 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
 
 	for_each_tile(tile, xe, id)
@@ -1416,7 +1416,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
 	struct drm_gpuva *gpuva, *next;
 	u8 id;
 
-	XE_WARN_ON(vm->preempt.num_engines);
+	XE_WARN_ON(vm->preempt.num_queues);
 
 	xe_vm_close(vm);
 	flush_async_ops(vm);
@@ -2083,7 +2083,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
 	vm = xa_load(&xef->vm.xa, args->vm_id);
 	if (XE_IOCTL_DBG(xe, !vm))
 		err = -ENOENT;
-	else if (XE_IOCTL_DBG(xe, vm->preempt.num_engines))
+	else if (XE_IOCTL_DBG(xe, vm->preempt.num_queues))
 		err = -EBUSY;
 	else
 		xa_erase(&xef->vm.xa, args->vm_id);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 3681a5ff588b..9d55cec14165 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -285,10 +285,10 @@ struct xe_vm {
 		 * an engine again
 		 */
 		s64 min_run_period_ms;
-		/** @engines: list of engines attached to this VM */
-		struct list_head engines;
-		/** @num_engines: number user engines attached to this VM */
-		int num_engines;
+		/** @queues: list of exec queues attached to this VM */
+		struct list_head queues;
+		/** @num_queues: number exec queues attached to this VM */
+		int num_queues;
 		/**
 		 * @rebind_deactivated: Whether rebind has been temporarily deactivated
 		 * due to no work available. Protected by the vm resv.
@@ -393,7 +393,7 @@ struct xe_vma_op {
 	 * operations is processed
 	 */
 	struct drm_gpuva_ops *ops;
-	/** @engine: engine for this operation */
+	/** @q: exec queue for this operation */
 	struct xe_exec_queue *q;
 	/**
 	 * @syncs: syncs for this operation, only used on first and last
-- 
2.21.0.rc0.32.g243a4c7e27


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [Intel-xe] ✗ CI.Patch_applied: failure for fixup! drm/xe: Rename engine to exec_queue (rev2)
  2023-08-29  6:56 [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue Niranjana Vishwanathapura
@ 2023-08-29  7:16 ` Patchwork
  2023-08-29  8:03 ` [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue Francois Dugast
  1 sibling, 0 replies; 4+ messages in thread
From: Patchwork @ 2023-08-29  7:16 UTC (permalink / raw)
  To: Francois Dugast; +Cc: intel-xe

== Series Details ==

Series: fixup! drm/xe: Rename engine to exec_queue (rev2)
URL   : https://patchwork.freedesktop.org/series/122916/
State : failure

== Summary ==

=== Applying kernel patches on branch 'drm-xe-next' with base: ===
Base commit: d8c08057a drm/xe: Add patch version on guc firmware init
=== git am output follows ===
Applying: fixup! drm/xe: Rename engine to exec_queue



^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue
  2023-08-29  6:56 [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue Niranjana Vishwanathapura
  2023-08-29  7:16 ` [Intel-xe] ✗ CI.Patch_applied: failure for fixup! drm/xe: Rename engine to exec_queue (rev2) Patchwork
@ 2023-08-29  8:03 ` Francois Dugast
  1 sibling, 0 replies; 4+ messages in thread
From: Francois Dugast @ 2023-08-29  8:03 UTC (permalink / raw)
  To: Niranjana Vishwanathapura; +Cc: thomas.hellstrom, intel-xe

On Mon, Aug 28, 2023 at 11:56:14PM -0700, Niranjana Vishwanathapura wrote:
> Rename vm's preempt queue list name from vm->preempt.engines
> to vm->preempt.queues as it is a list of exec queues.
> 
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_exec_queue.c       |  2 +-
>  drivers/gpu/drm/xe/xe_exec_queue_types.h |  4 +--
>  drivers/gpu/drm/xe/xe_vm.c               | 32 ++++++++++++------------
>  drivers/gpu/drm/xe/xe_vm_types.h         | 10 ++++----
>  4 files changed, 24 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 25216ef93781..66420d31e7ae 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -812,7 +812,7 @@ static void exec_queue_kill_compute(struct xe_exec_queue *q)
>  
>  	down_write(&q->vm->lock);
>  	list_del(&q->compute.link);
> -	--q->vm->preempt.num_engines;
> +	--q->vm->preempt.num_queues;
>  	if (q->compute.pfence) {
>  		dma_fence_enable_sw_signaling(q->compute.pfence);
>  		dma_fence_put(q->compute.pfence);
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> index 4a9a46d8a759..347d28442701 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> @@ -126,7 +126,7 @@ struct xe_exec_queue {
>  		u32 preempt_timeout_us;
>  	} sched_props;
>  
> -	/** @compute: compute engine state */
> +	/** @compute: compute exec queue state */
>  	struct {
>  		/** @pfence: preemption fence */
>  		struct dma_fence *pfence;
> @@ -134,7 +134,7 @@ struct xe_exec_queue {
>  		u64 context;
>  		/** @seqno: preemption fence seqno */
>  		u32 seqno;
> -		/** @link: link into VM's list of engines */
> +		/** @link: link into VM's list of exec queues */
>  		struct list_head link;
>  		/** @lock: preemption fences lock */
>  		spinlock_t lock;
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 15bff0783ec9..6d556acc30d3 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -172,7 +172,7 @@ static bool preempt_fences_waiting(struct xe_vm *vm)
>  	lockdep_assert_held(&vm->lock);
>  	xe_vm_assert_held(vm);
>  
> -	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> +	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
>  		if (!q->compute.pfence ||
>  		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
>  						   &q->compute.pfence->flags))) {
> @@ -197,10 +197,10 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
>  	lockdep_assert_held(&vm->lock);
>  	xe_vm_assert_held(vm);
>  
> -	if (*count >= vm->preempt.num_engines)
> +	if (*count >= vm->preempt.num_queues)
>  		return 0;
>  
> -	for (; *count < vm->preempt.num_engines; ++(*count)) {
> +	for (; *count < vm->preempt.num_queues; ++(*count)) {
>  		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
>  
>  		if (IS_ERR(pfence))
> @@ -218,7 +218,7 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
>  
>  	xe_vm_assert_held(vm);
>  
> -	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> +	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
>  		if (q->compute.pfence) {
>  			long timeout = dma_fence_wait(q->compute.pfence, false);
>  
> @@ -237,7 +237,7 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
>  	struct xe_exec_queue *q;
>  
>  	xe_vm_assert_held(vm);
> -	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> +	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
>  		if (!xe_exec_queue_is_idle(q))
>  			return false;
>  	}
> @@ -250,7 +250,7 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
>  	struct list_head *link;
>  	struct xe_exec_queue *q;
>  
> -	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> +	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
>  		struct dma_fence *fence;
>  
>  		link = list->next;
> @@ -270,11 +270,11 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
>  	struct ww_acquire_ctx ww;
>  	int err;
>  
> -	err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
> +	err = xe_bo_lock(bo, &ww, vm->preempt.num_queues, true);
>  	if (err)
>  		return err;
>  
> -	list_for_each_entry(q, &vm->preempt.engines, compute.link)
> +	list_for_each_entry(q, &vm->preempt.queues, compute.link)
>  		if (q->compute.pfence) {
>  			dma_resv_add_fence(bo->ttm.base.resv,
>  					   q->compute.pfence,
> @@ -311,7 +311,7 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
>  	lockdep_assert_held(&vm->lock);
>  	xe_vm_assert_held(vm);
>  
> -	list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> +	list_for_each_entry(q, &vm->preempt.queues, compute.link) {
>  		q->ops->resume(q);
>  
>  		dma_resv_add_fence(&vm->resv, q->compute.pfence,
> @@ -346,8 +346,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
>  		goto out_unlock;
>  	}
>  
> -	list_add(&q->compute.link, &vm->preempt.engines);
> -	++vm->preempt.num_engines;
> +	list_add(&q->compute.link, &vm->preempt.queues);
> +	++vm->preempt.num_queues;
>  	q->compute.pfence = pfence;
>  
>  	down_read(&vm->userptr.notifier_lock);
> @@ -528,7 +528,7 @@ static void xe_vm_kill(struct xe_vm *vm)
>  	vm->flags |= XE_VM_FLAG_BANNED;
>  	trace_xe_vm_kill(vm);
>  
> -	list_for_each_entry(q, &vm->preempt.engines, compute.link)
> +	list_for_each_entry(q, &vm->preempt.queues, compute.link)
>  		q->ops->kill(q);
>  	xe_vm_unlock(vm, &ww);
>  
> @@ -586,7 +586,7 @@ static void preempt_rebind_work_func(struct work_struct *w)
>  	}
>  
>  	err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
> -				  false, vm->preempt.num_engines);
> +				  false, vm->preempt.num_queues);
>  	if (err)
>  		goto out_unlock_outer;
>  
> @@ -1229,7 +1229,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
>  
>  	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
>  
> -	INIT_LIST_HEAD(&vm->preempt.engines);
> +	INIT_LIST_HEAD(&vm->preempt.queues);
>  	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
>  
>  	for_each_tile(tile, xe, id)
> @@ -1416,7 +1416,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
>  	struct drm_gpuva *gpuva, *next;
>  	u8 id;
>  
> -	XE_WARN_ON(vm->preempt.num_engines);
> +	XE_WARN_ON(vm->preempt.num_queues);
>  
>  	xe_vm_close(vm);
>  	flush_async_ops(vm);
> @@ -2083,7 +2083,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
>  	vm = xa_load(&xef->vm.xa, args->vm_id);
>  	if (XE_IOCTL_DBG(xe, !vm))
>  		err = -ENOENT;
> -	else if (XE_IOCTL_DBG(xe, vm->preempt.num_engines))
> +	else if (XE_IOCTL_DBG(xe, vm->preempt.num_queues))
>  		err = -EBUSY;
>  	else
>  		xa_erase(&xef->vm.xa, args->vm_id);
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index 3681a5ff588b..9d55cec14165 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -285,10 +285,10 @@ struct xe_vm {
>  		 * an engine again
>  		 */
>  		s64 min_run_period_ms;
> -		/** @engines: list of engines attached to this VM */
> -		struct list_head engines;
> -		/** @num_engines: number user engines attached to this VM */
> -		int num_engines;
> +		/** @queues: list of exec queues attached to this VM */
> +		struct list_head queues;

To be consistent with the rest of the renaming and avoid ambiguity we should
stick with exec_queue, so maybe:

    struct list_head exec_queues;

> +		/** @num_queues: number exec queues attached to this VM */
> +		int num_queues;

Same here with:

    int num_exec_queues;

Francois

>  		/**
>  		 * @rebind_deactivated: Whether rebind has been temporarily deactivated
>  		 * due to no work available. Protected by the vm resv.
> @@ -393,7 +393,7 @@ struct xe_vma_op {
>  	 * operations is processed
>  	 */
>  	struct drm_gpuva_ops *ops;
> -	/** @engine: engine for this operation */
> +	/** @q: exec queue for this operation */
>  	struct xe_exec_queue *q;
>  	/**
>  	 * @syncs: syncs for this operation, only used on first and last
> -- 
> 2.21.0.rc0.32.g243a4c7e27
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-08-29  8:03 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-29  6:56 [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue Niranjana Vishwanathapura
2023-08-29  7:16 ` [Intel-xe] ✗ CI.Patch_applied: failure for fixup! drm/xe: Rename engine to exec_queue (rev2) Patchwork
2023-08-29  8:03 ` [Intel-xe] [PATCH] fixup! drm/xe: Rename engine to exec_queue Francois Dugast
  -- strict thread matches above, loose matches on Subject: below --
2023-08-25 13:15 Francois Dugast

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox