Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: Matthew Brost <matthew.brost@intel.com>, intel-xe@lists.freedesktop.org
Subject: Re: [PATCH v5 5/6] drm/xe: Disallow input fences on zero batch execs and zero binds
Date: Mon, 03 Nov 2025 16:22:20 +0100	[thread overview]
Message-ID: <6a696d4bbe0f96c38be2e0baa248df1340ae69cb.camel@linux.intel.com> (raw)
In-Reply-To: <8e6f759a4c430bc88eb2982ebddde4fc695f003c.camel@linux.intel.com>

On Mon, 2025-11-03 at 16:21 +0100, Thomas Hellström wrote:
> On Wed, 2025-10-29 at 13:57 -0700, Matthew Brost wrote:
> > Prevent input fences from being installed on zero batch execs or
> > zero
> > binds, which were originally added to support queue idling in Mesa
> > via
> > output fences. Although input fence support was introduced for
> > interface
> > consistency, it leads to incorrect behavior due to chained
> > composite
> > fences, which are disallowed.
> > 
> > Avoid the complexity of fixing this by removing support, as input
> > fences
> > for these cases are not used in practice.
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> 
> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>

Oh, please mention it's a uapi change drm/xe/uapi:

/Thomas



> 
> > ---
> >  drivers/gpu/drm/xe/xe_sync.c | 101 +++++++++++++------------------
> > --
> > --
> >  1 file changed, 36 insertions(+), 65 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_sync.c
> > b/drivers/gpu/drm/xe/xe_sync.c
> > index df7ca349398b..ff74528ca0c6 100644
> > --- a/drivers/gpu/drm/xe/xe_sync.c
> > +++ b/drivers/gpu/drm/xe/xe_sync.c
> > @@ -301,84 +301,55 @@ xe_sync_in_fence_get(struct xe_sync_entry
> > *sync, int num_sync,
> >  
> >  	lockdep_assert_held(&vm->lock);
> >  
> > -	/* Count in-fences */
> > -	for (i = 0; i < num_sync; ++i) {
> > -		if (sync[i].fence) {
> > -			++num_fence;
> > -			fence = sync[i].fence;
> > -		}
> > -	}
> > -
> > -	/* Easy case... */
> > -	if (!num_fence) {
> > -		if (q->flags & EXEC_QUEUE_FLAG_VM) {
> > -			struct xe_exec_queue *__q;
> > -			struct xe_tile *tile;
> > -			u8 id;
> > -
> > -			for_each_tile(tile, vm->xe, id)
> > -				num_fence += (1 +
> > XE_MAX_GT_PER_TILE);
> > -
> > -			fences = kmalloc_array(num_fence,
> > sizeof(*fences),
> > -					       GFP_KERNEL);
> > -			if (!fences)
> > -				return ERR_PTR(-ENOMEM);
> > -
> > +	/* Reject in fences */
> > +	for (i = 0; i < num_sync; ++i)
> > +		if (sync[i].fence)
> > +			return ERR_PTR(-EOPNOTSUPP);
> > +
> > +	if (q->flags & EXEC_QUEUE_FLAG_VM) {
> > +		struct xe_exec_queue *__q;
> > +		struct xe_tile *tile;
> > +		u8 id;
> > +
> > +		for_each_tile(tile, vm->xe, id)
> > +			num_fence += (1 + XE_MAX_GT_PER_TILE);
> > +
> > +		fences = kmalloc_array(num_fence, sizeof(*fences),
> > +				       GFP_KERNEL);
> > +		if (!fences)
> > +			return ERR_PTR(-ENOMEM);
> > +
> > +		fences[current_fence++] =
> > +			xe_exec_queue_last_fence_get(q, vm);
> > +		for_each_tlb_inval(i)
> > +			fences[current_fence++] =
> > +				xe_exec_queue_tlb_inval_last_fence
> > _g
> > et(q, vm, i);
> > +		list_for_each_entry(__q, &q->multi_gt_list,
> > +				    multi_gt_link) {
> >  			fences[current_fence++] =
> > -				xe_exec_queue_last_fence_get(q,
> > vm);
> > +				xe_exec_queue_last_fence_get(__q,
> > vm);
> >  			for_each_tlb_inval(i)
> >  				fences[current_fence++] =
> > -
> > 					xe_exec_queue_tlb_inval_la
> > st_fence_get(q, vm, i);
> > -			list_for_each_entry(__q, &q-
> > >multi_gt_list,
> > -					    multi_gt_link) {
> > -				fences[current_fence++] =
> > -
> > 					xe_exec_queue_last_fence_g
> > et(__q, vm);
> > -				for_each_tlb_inval(i)
> > -					fences[current_fence++] =
> > -
> > 						xe_exec_queue_tlb_
> > inval_last_fence_get(__q, vm, i);
> > -			}
> > -
> > -			xe_assert(vm->xe, current_fence ==
> > num_fence);
> > -			cf = dma_fence_array_create(num_fence,
> > fences,
> > -						   
> > dma_fence_context_alloc(1),
> > -						    1, false);
> > -			if (!cf)
> > -				goto err_out;
> > -
> > -			return &cf->base;
> > +					xe_exec_queue_tlb_inval_la
> > st
> > _fence_get(__q, vm, i);
> >  		}
> >  
> > -		fence = xe_exec_queue_last_fence_get(q, vm);
> > -		return fence;
> > -	}
> > +		xe_assert(vm->xe, current_fence == num_fence);
> > +		cf = dma_fence_array_create(num_fence, fences,
> > +					   
> > dma_fence_context_alloc(1),
> > +					    1, false);
> > +		if (!cf)
> > +			goto err_out;
> >  
> > -	/*
> > -	 * Create composite fence - FIXME - the below code doesn't
> > work. This is
> > -	 * unused in Mesa so we are ok for the moment. Perhaps we
> > just disable
> > -	 * this entire code path if number of in fences != 0.
> > -	 */
> > -	fences = kmalloc_array(num_fence + 1, sizeof(*fences),
> > GFP_KERNEL);
> > -	if (!fences)
> > -		return ERR_PTR(-ENOMEM);
> > -	for (i = 0; i < num_sync; ++i) {
> > -		if (sync[i].fence) {
> > -			dma_fence_get(sync[i].fence);
> > -			fences[current_fence++] = sync[i].fence;
> > -		}
> > +		return &cf->base;
> >  	}
> > -	fences[current_fence++] = xe_exec_queue_last_fence_get(q,
> > vm);
> > -	cf = dma_fence_array_create(num_fence, fences,
> > -				    dma_fence_context_alloc(1), 1,
> > false);
> > -	if (!cf)
> > -		goto err_out;
> >  
> > -	return &cf->base;
> > +	fence = xe_exec_queue_last_fence_get(q, vm);
> > +	return fence;
> >  
> >  err_out:
> >  	while (current_fence)
> >  		dma_fence_put(fences[--current_fence]);
> >  	kfree(fences);
> > -	kfree(cf);
> >  
> >  	return ERR_PTR(-ENOMEM);
> >  }
> 


  reply	other threads:[~2025-11-03 15:22 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-29 20:57 [PATCH v5 0/6] Fix serialization on burst of unbinds - v2 Matthew Brost
2025-10-29 20:57 ` [PATCH v5 1/6] drm/xe: Enforce correct user fence signaling order using drm_syncobjs Matthew Brost
2025-10-30  7:58   ` Thomas Hellström
2025-10-30 12:54     ` Matthew Brost
2025-10-29 20:57 ` [PATCH v5 2/6] drm/xe: Attach last fence to TLB invalidation job queues Matthew Brost
2025-10-30  8:24   ` Thomas Hellström
2025-10-29 20:57 ` [PATCH v5 3/6] drm/xe: Decouple bind queue last fence from TLB invalidations Matthew Brost
2025-10-30  9:52   ` Thomas Hellström
2025-10-29 20:57 ` [PATCH v5 4/6] drm/xe: Skip TLB invalidation waits in page fault binds Matthew Brost
2025-11-03 15:19   ` Thomas Hellström
2025-10-29 20:57 ` [PATCH v5 5/6] drm/xe: Disallow input fences on zero batch execs and zero binds Matthew Brost
2025-11-03 15:21   ` Thomas Hellström
2025-11-03 15:22     ` Thomas Hellström [this message]
2025-10-29 20:57 ` [PATCH v5 6/6] drm/xe: Remove last fence dependency check from binds Matthew Brost
2025-10-30  8:43   ` Thomas Hellström
2025-11-03 15:24   ` Thomas Hellström

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6a696d4bbe0f96c38be2e0baa248df1340ae69cb.camel@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox