Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Shuicheng Lin <shuicheng.lin@intel.com>
Cc: <intel-xe@lists.freedesktop.org>, <stuart.summers@intel.com>
Subject: Re: [PATCH v3] drm/xe: Limit number of jobs per exec queue
Date: Mon, 27 Oct 2025 14:47:03 -0700	[thread overview]
Message-ID: <aP/oV1mz1aDjJiVC@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20251027202118.3339905-2-shuicheng.lin@intel.com>

On Mon, Oct 27, 2025 at 08:21:19PM +0000, Shuicheng Lin wrote:
> Add a limit to the number of jobs that can be queued in a single
> exec queue to avoid potential resource exhaustion.
> 
> A new field `job_cnt` is introduced in `struct xe_exec_queue` to
> track the number of active DRM jobs, along with a maximum limit
> `XE_MAX_JOB_COUNT_PER_EXEC_QUEUE` set to 1000.
> 
> If the job count exceeds this threshold, `xe_exec_ioctl()` now
> returns `-EAGAIN` to signal that the caller should retry later.
> 
> A trace event is added to track when the limit is reached:
> "xe_exec_queue_reach_max_job_count: dev=0000:03:00.0, job count
> exceeded the maximum limit (1000) per exec queue. engine_class=0x3,
> logical_mask=0x1, guc_id=2"
> 
> v3: add assert in xe_exec_queue_destroy that q->job_cnt is zero. (Matt)
> v2 (Matt):
>  - add log to trace the limit is hit.
>  - Change max count from 0x1000 to 1000.
>  - Use atomic_t for job_cnt.
> 
> Suggested-by: Matthew Brost <matthew.brost@intel.com>

Reviewed-by: Matthew Brost <matthew.brost@intel.com>

> Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_exec.c             |  7 +++++++
>  drivers/gpu/drm/xe/xe_exec_queue.c       |  2 ++
>  drivers/gpu/drm/xe/xe_exec_queue_types.h |  5 +++++
>  drivers/gpu/drm/xe/xe_sched_job.c        |  2 ++
>  drivers/gpu/drm/xe/xe_trace.h            | 23 +++++++++++++++++++++++
>  5 files changed, 39 insertions(+)
> 
> diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
> index 521467d976f7..f4d79b4b9396 100644
> --- a/drivers/gpu/drm/xe/xe_exec.c
> +++ b/drivers/gpu/drm/xe/xe_exec.c
> @@ -21,6 +21,7 @@
>  #include "xe_sched_job.h"
>  #include "xe_sync.h"
>  #include "xe_svm.h"
> +#include "xe_trace.h"
>  #include "xe_vm.h"
>  
>  /**
> @@ -154,6 +155,12 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  		goto err_exec_queue;
>  	}
>  
> +	if (atomic_read(&q->job_cnt) >= XE_MAX_JOB_COUNT_PER_EXEC_QUEUE) {
> +		trace_xe_exec_queue_reach_max_job_count(q, XE_MAX_JOB_COUNT_PER_EXEC_QUEUE);
> +		err = -EAGAIN;
> +		goto err_exec_queue;
> +	}
> +
>  	if (args->num_syncs) {
>  		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
>  		if (!syncs) {
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 90cbc95f8e2e..1b57d7c2cc94 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -377,6 +377,8 @@ void xe_exec_queue_destroy(struct kref *ref)
>  	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
>  	struct xe_exec_queue *eq, *next;
>  
> +	xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
> +
>  	if (xe_exec_queue_uses_pxp(q))
>  		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
>  
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> index 282505fa1377..c8807268ec6c 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> @@ -162,6 +162,11 @@ struct xe_exec_queue {
>  	const struct xe_ring_ops *ring_ops;
>  	/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
>  	struct drm_sched_entity *entity;
> +
> +#define XE_MAX_JOB_COUNT_PER_EXEC_QUEUE	1000
> +	/** @job_cnt: number of drm jobs in this exec queue */
> +	atomic_t job_cnt;
> +
>  	/**
>  	 * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
>  	 * Protected by @vm's resv. Unused if @vm == NULL.
> diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
> index 6ae4cc6a3802..f1ba9c19e218 100644
> --- a/drivers/gpu/drm/xe/xe_sched_job.c
> +++ b/drivers/gpu/drm/xe/xe_sched_job.c
> @@ -146,6 +146,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
>  	for (i = 0; i < width; ++i)
>  		job->ptrs[i].batch_addr = batch_addr[i];
>  
> +	atomic_inc(&q->job_cnt);
>  	xe_pm_runtime_get_noresume(job_to_xe(job));
>  	trace_xe_sched_job_create(job);
>  	return job;
> @@ -177,6 +178,7 @@ void xe_sched_job_destroy(struct kref *ref)
>  	dma_fence_put(job->fence);
>  	drm_sched_job_cleanup(&job->drm);
>  	job_free(job);
> +	atomic_dec(&q->job_cnt);
>  	xe_exec_queue_put(q);
>  	xe_pm_runtime_put(xe);
>  }
> diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
> index 314f42fcbcbd..79a97b086cb2 100644
> --- a/drivers/gpu/drm/xe/xe_trace.h
> +++ b/drivers/gpu/drm/xe/xe_trace.h
> @@ -441,6 +441,29 @@ TRACE_EVENT(xe_eu_stall_data_read,
>  		      __entry->read_size, __entry->total_size)
>  );
>  
> +TRACE_EVENT(xe_exec_queue_reach_max_job_count,
> +	    TP_PROTO(struct xe_exec_queue *q, int max_cnt),
> +	    TP_ARGS(q, max_cnt),
> +
> +	    TP_STRUCT__entry(__string(dev, __dev_name_eq(q))
> +			     __field(enum xe_engine_class, class)
> +			     __field(u32, logical_mask)
> +			     __field(u16, guc_id)
> +			     __field(int, max_cnt)
> +			     ),
> +
> +	    TP_fast_assign(__assign_str(dev);
> +			   __entry->class = q->class;
> +			   __entry->logical_mask = q->logical_mask;
> +			   __entry->guc_id = q->guc->id;
> +			   __entry->max_cnt = max_cnt;
> +			   ),
> +
> +	    TP_printk("dev=%s, job count exceeded the maximum limit (%d) per exec queue. engine_class=0x%x, logical_mask=0x%x, guc_id=%d",
> +		      __get_str(dev), __entry->max_cnt,
> +		      __entry->class, __entry->logical_mask, __entry->guc_id)
> +);
> +
>  #endif
>  
>  /* This part must be outside protection */
> -- 
> 2.49.0
> 

  reply	other threads:[~2025-10-27 21:47 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-22 18:10 [PATCH] drm/xe: Limit number of jobs per exec queue Shuicheng Lin
2025-10-22 18:48 ` Matthew Brost
2025-10-23 15:51   ` Lin, Shuicheng
2025-10-23 19:05     ` Matthew Brost
2025-10-23  1:04 ` ✓ CI.KUnit: success for " Patchwork
2025-10-23  1:43 ` ✗ Xe.CI.BAT: failure " Patchwork
2025-10-23  8:26 ` ✗ Xe.CI.Full: " Patchwork
2025-10-25 18:10 ` [PATCH v2] " Shuicheng Lin
2025-10-25 18:20 ` ✓ CI.KUnit: success for drm/xe: Limit number of jobs per exec queue (rev2) Patchwork
2025-10-25 18:59 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-25 20:09 ` ✗ Xe.CI.Full: failure " Patchwork
2025-10-27 16:33 ` [PATCH] drm/xe: Limit number of jobs per exec queue Matthew Brost
2025-10-27 19:53   ` Matthew Brost
2025-10-27 20:21 ` [PATCH v3] " Shuicheng Lin
2025-10-27 21:47   ` Matthew Brost [this message]
2025-10-29  1:48   ` Matthew Brost
2025-10-27 21:14 ` ✓ CI.KUnit: success for drm/xe: Limit number of jobs per exec queue (rev3) Patchwork
2025-10-27 21:52 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-28  4:39 ` ✓ Xe.CI.Full: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aP/oV1mz1aDjJiVC@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=shuicheng.lin@intel.com \
    --cc=stuart.summers@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox