Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "K V P, Satyanarayana" <satyanarayana.k.v.p@intel.com>
To: Matthew Brost <matthew.brost@intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
	Michal Wajdeczko <michal.wajdeczko@intel.com>,
	Matthew Auld <matthew.auld@intel.com>
Subject: Re: [PATCH v4 1/3] drm/xe/migrate: Atomicize CCS copy command setup
Date: Wed, 8 Oct 2025 15:20:36 +0530	[thread overview]
Message-ID: <b71ff0c5-6e30-4a84-a894-1b574d10c3be@intel.com> (raw)
In-Reply-To: <aOQfeQ5YYcOVmafh@lstrano-desk.jf.intel.com>



On 07-10-2025 01:28, Matthew Brost wrote:
> On Mon, Oct 06, 2025 at 08:54:45PM +0530, Satyanarayana K V P wrote:
>> The CCS copy command is a 5-dword sequence. If the vCPU halts during
>> save/restore while this sequence is being programmed, partial writes may
>> trigger page faults when saving IGPU CCS metadata. Use the VMOVDQU
>> instruction to write the sequence atomically.
>>
>> Since VMOVDQU operates on 256-bit chunks, update EMIT_COPY_CCS_DW to emit
>> 8 dwords instead of 5 dwords.
>>
>> Update emit_flush_invalidate() to use VMOVDQU operating with 128-bit
>> chunks.
>>
>> Signed-off-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
>> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
>> Cc: Matthew Brost <matthew.brost@intel.com>
>> Cc: Matthew Auld <matthew.auld@intel.com>
>>
>> ---
>> V3 -> V4:
>> - Fixed review comments. (Wajdeczko)
>> - Fix issues reported by patchworks.
>>
>> V2 -> V3:
>> - Added support for 128 bit and 256 bit instructions with memcpy_vmovdqu
>> - Updated emit_flush_invalidate() to use vmovdqu instruction.
>>
>> V1 -> V2:
>> - Use memcpy_vmovdqu only for x86 arch and for VF. Else use memcpy
>>    (Auld, Matthew)
>> - Fix issues reported by patchworks.
>> ---
>>   drivers/gpu/drm/xe/xe_migrate.c | 92 +++++++++++++++++++++++++--------
>>   1 file changed, 71 insertions(+), 21 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
>> index c39c3b423d05..b960fdcecd88 100644
>> --- a/drivers/gpu/drm/xe/xe_migrate.c
>> +++ b/drivers/gpu/drm/xe/xe_migrate.c
>> @@ -5,7 +5,9 @@
>>   
>>   #include "xe_migrate.h"
>>   
>> +#include <asm/fpu/api.h>
>>   #include <linux/bitfield.h>
>> +#include <linux/cpufeature.h>
>>   #include <linux/sizes.h>
>>   
>>   #include <drm/drm_managed.h>
>> @@ -644,18 +646,50 @@ static void emit_pte(struct xe_migrate *m,
>>   	}
>>   }
>>   
>> -#define EMIT_COPY_CCS_DW 5
>> +static void memcpy_vmovdqu(void *dst, const void *src, u32 size)
>> +{
>> +	kernel_fpu_begin();
>> +
>> +#ifdef CONFIG_X86
>> +	if (size == SZ_128) {
>> +		asm("vmovdqu (%0), %%xmm0\n"
>> +		    "vmovups %%xmm0,   (%1)\n"
>> +		    :: "r" (src), "r" (dst) : "memory");
>> +	} else if (size == SZ_256) {
>> +		asm("vmovdqu (%0), %%ymm0\n"
>> +		    "vmovups %%ymm0,   (%1)\n"
>> +		    :: "r" (src), "r" (dst) : "memory");
>> +	}
>> +#endif
>> +	kernel_fpu_end();
> 
> I think you can hide this entire function by #ifdef CONFIG_X86.
Kept the body of function under #ifdef. Otherwise we may compilation 
error when CONFIG_X86 is not defined.>
>> +}
>> +
>> +static void emit_atomic(struct xe_gt *gt, void *dst, const void *src, u32 size)
>> +{
>> +	u32 instr_size = size * BITS_PER_BYTE;
>> +
>> +	xe_assert(gt_to_xe(gt), !(instr_size != SZ_128 && instr_size != SZ_256));
> 
> I think it is slightly more clear to write it like this:
> 
> xe_assert(gt_to_xe(gt), instr_size == SZ_128 || instr_size == SZ_256);
> 
> I suspect Michal would insist on xe_gt_assert here too.
Since CCS save/restore is per device, xe_assert() should hold good.>
>> +
>> +	if (IS_SRIOV_VF(gt_to_xe(gt)) && static_cpu_has(X86_FEATURE_AVX))
> 
> Should this be VF CCS initialized check rather than generic VF check?
Fixed in new revision.>
>> +		memcpy_vmovdqu(dst, src, instr_size);
>> +	else
>> +		memcpy(dst, src, size);
>> +}
>> +
>> +#define EMIT_COPY_CCS_DW 8
>>   static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
>>   			  u64 dst_ofs, bool dst_is_indirect,
>>   			  u64 src_ofs, bool src_is_indirect,
>>   			  u32 size)
>>   {
>> +	u32 dw[EMIT_COPY_CCS_DW] = {MI_NOOP};
>>   	struct xe_device *xe = gt_to_xe(gt);
>>   	u32 *cs = bb->cs + bb->len;
>>   	u32 num_ccs_blks;
>>   	u32 num_pages;
>>   	u32 ccs_copy_size;
>>   	u32 mocs;
>> +	u32 i = 0;
>>   
>>   	if (GRAPHICS_VERx100(xe) >= 2000) {
>>   		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
>> @@ -673,15 +707,23 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
>>   		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
>>   	}
>>   
>> -	*cs++ = XY_CTRL_SURF_COPY_BLT |
>> -		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
>> -		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
>> -		ccs_copy_size;
>> -	*cs++ = lower_32_bits(src_ofs);
>> -	*cs++ = upper_32_bits(src_ofs) | mocs;
>> -	*cs++ = lower_32_bits(dst_ofs);
>> -	*cs++ = upper_32_bits(dst_ofs) | mocs;
>> +	dw[i++] = XY_CTRL_SURF_COPY_BLT |
>> +		  (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
>> +		  (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
>> +		  ccs_copy_size;
>> +	dw[i++] = lower_32_bits(src_ofs);
>> +	dw[i++] = upper_32_bits(src_ofs) | mocs;
>> +	dw[i++] = lower_32_bits(dst_ofs);
>> +	dw[i++] = upper_32_bits(dst_ofs) | mocs;
>>   
>> +	/*
>> +	 * The CCS copy command is a 5-dword sequence. If the vCPU halts during
>> +	 * save/restore while this sequence is being issued, partial writes may trigger
>> +	 * page faults when saving iGPU CCS metadata. Use the VMOVDQU instruction to
>> +	 * write the sequence atomically.
>> +	 */
>> +	emit_atomic(gt, cs, dw, sizeof(u32) * EMIT_COPY_CCS_DW);
> 
> sizeof(dw) to check this consistent with below or change below to match
> the logic here.
Fixed in new revision.>
>> +	cs += EMIT_COPY_CCS_DW;
>>   	bb->len = cs - bb->cs;
>>   }
>>   
>> @@ -993,18 +1035,26 @@ static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
>>   	return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
>>   }
>>   
>> -static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
>> +/*
>> + * The MI_FLUSH_DW command is a 4-dword sequence. If the vCPU halts during
>> + * save/restore while this sequence is being issued, partial writes may
>> + * trigger page faults when saving iGPU CCS metadata. Use
>> + * emit_atomic() to write the sequence atomically.
>> + */
>> +static int emit_flush_invalidate(struct xe_exec_queue *q, u32 *dw, int i, u32 flags)
> 
> s/dw/cs ?
Fixed in new revision.>
>>   {
>>   	u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
>> +	u32 tmp_dw[SZ_4] = {MI_NOOP}, j = 0;
> 
> #define EMIT_FLUSH_INVALIDATE_DW 4 ?
> 
> s/tmp_dw/cs ?
Fixed in new revision.>
>> +
>> +	tmp_dw[j++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
>> +		      MI_FLUSH_IMM_DW | flags;
>> +	tmp_dw[j++] = lower_32_bits(addr);
>> +	tmp_dw[j++] = upper_32_bits(addr);
>> +	tmp_dw[j++] = MI_NOOP;
>>   
>> -	dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
>> -		  MI_FLUSH_IMM_DW | flags;
>> -	dw[i++] = lower_32_bits(addr);
>> -	dw[i++] = upper_32_bits(addr);
>> -	dw[i++] = MI_NOOP;
>> -	dw[i++] = MI_NOOP;
>> +	emit_atomic(q->gt, &dw[i], tmp_dw, sizeof(tmp_dw));
>>   
>> -	return i;
>> +	return i + j;
>>   }
>>   
>>   /**
>> @@ -1049,7 +1099,7 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
>>   	/* Calculate Batch buffer size */
>>   	batch_size = 0;
>>   	while (size) {
>> -		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
>> +		batch_size += 8; /* Flush + ggtt addr + 1 NOP */
>>   		u64 ccs_ofs, ccs_size;
>>   		u32 ccs_pt;
>>   
>> @@ -1090,7 +1140,7 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
>>   	 * sizes here again before copy command is emitted.
>>   	 */
>>   	while (size) {
>> -		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
>> +		batch_size += 8; /* Flush + ggtt addr + 1 NOP */
> 
> EMIT_FLUSH_INVALIDATE_DW * 2 ?
> 
>>   		u32 flush_flags = 0;
>>   		u64 ccs_ofs, ccs_size;
>>   		u32 ccs_pt;
>> @@ -1113,11 +1163,11 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
>>   
>>   		emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
>>   
>> -		bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
>> +		bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
>>   		flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
>>   						  src_L0_ofs, dst_is_pltt,
>>   						  src_L0, ccs_ofs, true);
>> -		bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
>> +		bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
> 
> Side note: I don't think the second emit_flush_invalidate is actually
> necessary here. Removing it is probably out of scope for this series,
> but once this is merged and testing is stable, we can try removing it in
> a follow-up and see what happens.
> 
> Matt
> 
>>   
>>   		size -= src_L0;
>>   	}
>> -- 
>> 2.51.0
>>


  reply	other threads:[~2025-10-08  9:50 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-06 15:24 [PATCH v4 0/3] drm/xe/migrate: Atomicize CCS copy command setup Satyanarayana K V P
2025-10-06 15:24 ` [PATCH v4 1/3] " Satyanarayana K V P
2025-10-06 19:58   ` Matthew Brost
2025-10-08  9:50     ` K V P, Satyanarayana [this message]
2025-10-06 15:24 ` [PATCH v4 2/3] drm/xe/migrate: Make emit_pte() header write atomic Satyanarayana K V P
2025-10-06 19:42   ` Matthew Brost
2025-10-06 15:24 ` [PATCH v4 3/3] drm/xe/vf: Clear CCS read/write buffers in atomic way Satyanarayana K V P
2025-10-06 20:12   ` Matthew Brost
2025-10-08  1:40     ` Matthew Brost

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b71ff0c5-6e30-4a84-a894-1b574d10c3be@intel.com \
    --to=satyanarayana.k.v.p@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    --cc=matthew.brost@intel.com \
    --cc=michal.wajdeczko@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox