AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdgpu: move VM PTE MTYPE override to per-PTE granularity
@ 2026-04-17 13:50 Philip Yang
  2026-04-17 14:04 ` Christian König
  0 siblings, 1 reply; 3+ messages in thread
From: Philip Yang @ 2026-04-17 13:50 UTC (permalink / raw)
  To: amd-gfx
  Cc: Felix.Kuehling, christian.koenig, Kent.Russell, Andrew.Martin,
	Philip Yang

Refactor the NUMA-aware MTYPE override for VM page table entries:

- Move the override_vm_pte_flags call from the centralized
  amdgpu_vm_pte_update_flags() into the individual CPU and SDMA update
  backends, enabling per-PTE MTYPE override including for scattered
  pages (pages_addr path).

- Move APU, IP version, and direct-mapped eligibility checks from
  runtime (gmc_v9_0_override_vm_pte_flags) to init time
  (gmc_v9_0_set_gmc_funcs), selecting between gmc_funcs structs with
  and without the override function pointer to avoid repeated runtime
  checks on every PTE update.

- Guard allow_override on whether gmc_funcs->override_vm_pte_flags is
  actually implemented.

- Move amdgpu_device_check_iommu_direct_map() earlier in device init
  so ram_is_direct_mapped is available when gmc_funcs are selected
  during IP early init.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  5 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c      |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h      |  4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c  | 11 ++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c   |  9 ----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 ++++-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c       | 50 ++++++++++++---------
 7 files changed, 55 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bc7e96b58d3f..b139475f65cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3860,6 +3860,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	 * completed before the need for a different level is detected.
 	 */
 	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
+
+	amdgpu_device_check_iommu_direct_map(adev);
+
 	/* early init functions */
 	r = amdgpu_device_ip_early_init(adev);
 	if (r)
@@ -4117,8 +4120,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	if (px)
 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
 
-	amdgpu_device_check_iommu_direct_map(adev);
-
 	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
 	r = register_pm_notifier(&adev->pm_nb);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 63156289ae7f..853204b5bd73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1163,7 +1163,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	params.pages_addr = pages_addr;
 	params.unlocked = unlocked;
 	params.needs_flush = flush_tlb;
-	params.allow_override = allow_override;
+	params.allow_override = allow_override && adev->gmc.gmc_funcs->override_vm_pte_flags;
 	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
 
 	amdgpu_vm_eviction_lock(vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f33ea7f8509b..326522917131 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -296,8 +296,8 @@ struct amdgpu_vm_update_params {
 	bool needs_flush;
 
 	/**
-	 * @allow_override: true for memory that is not uncached: allows MTYPE
-	 * to be overridden for NUMA local memory.
+	 * @allow_override: true for memory that is not uncached and gmc override function is
+	 * implemented to allow MTYPE to be overridden for NUMA local memory.
 	 */
 	bool allow_override;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index f078db3fef79..fa5d4ac2ef39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -88,12 +88,21 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
 
 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
 
+	if (!p->pages_addr && p->allow_override)
+		amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
+
 	for (i = 0; i < count; i++) {
+		u64 oflags = flags;
+
 		value = p->pages_addr ?
 			amdgpu_vm_map_gart(p->pages_addr, addr) :
 			addr;
+
+		if (p->pages_addr && p->allow_override)
+			amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, value, &oflags);
+
 		amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
-				       i, value, flags);
+				       i, value, oflags);
 		addr += incr;
 	}
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 31a437ce9570..883cc275f354 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -707,15 +707,6 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
 	if (level == AMDGPU_VM_PTB)
 		amdgpu_vm_pte_update_noretry_flags(adev, &flags);
 
-	/* APUs mapping system memory may need different MTYPEs on different
-	 * NUMA nodes. Only do this for contiguous ranges that can be assumed
-	 * to be on the same NUMA node.
-	 */
-	if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
-	    adev->gmc.gmc_funcs->override_vm_pte_flags &&
-	    num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
-		amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
-
 	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
 					 flags);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 36805dcfa159..37f0c0027075 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -257,6 +257,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
 		}
 
 		if (!p->pages_addr) {
+			if (p->allow_override)
+				amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
+
 			/* set page commands needed */
 			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
 						incr, flags);
@@ -275,8 +278,14 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
 		p->num_dw_left -= nptes * 2;
 		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
 		for (i = 0; i < nptes; ++i, addr += incr) {
+			u64 oflags = flags;
+
 			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
-			pte[i] |= flags;
+
+			if (p->allow_override)
+				amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, pte[i], &oflags);
+
+			pte[i] |= oflags;
 		}
 
 		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e7b78027002b..479611e269b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1204,21 +1204,6 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
 {
 	int local_node, nid;
 
-	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
-	 * memory can use more efficient MTYPEs.
-	 */
-	if (!(adev->flags & AMD_IS_APU) ||
-	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
-		return;
-
-	/* Only direct-mapped memory allows us to determine the NUMA node from
-	 * the DMA address.
-	 */
-	if (!adev->ram_is_direct_mapped) {
-		dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
-		return;
-	}
-
 	/* MTYPE_NC is the same default and can be overridden.
 	 * MTYPE_UC will be present if the memory is extended-coherent
 	 * and can also be overridden.
@@ -1231,11 +1216,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
 		return;
 	}
 
-	/* FIXME: Only supported on native mode for now. For carve-out, the
-	 * NUMA affinity of the GPU/VM needs to come from the PCI info because
-	 * memory partitions are not associated with different NUMA nodes.
-	 */
-	if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
+	if (vm->mem_id >= 0) {
 		local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
 	} else {
 		dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
@@ -1328,6 +1309,19 @@ static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
+	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
+	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
+	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
+	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
+	.get_vm_pde = gmc_v9_0_get_vm_pde,
+	.get_vm_pte = gmc_v9_0_get_vm_pte,
+	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
+	.query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
+	.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
+	.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
+};
+
+static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_override_funcs = {
 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
@@ -1343,7 +1337,21 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
 
 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
 {
-	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
+	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes, local system
+	 * memory can use more efficient MTYPEs.
+	 *
+	 * APUs mapping system memory may need different MTYPEs on different
+	 * NUMA nodes.
+	 *
+	 * Only direct-mapped memory allows us to determine the NUMA node from
+	 * the DMA address.
+	 */
+	if ((adev->gmc.is_app_apu && num_possible_nodes() > 1) &&
+	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+	    adev->ram_is_direct_mapped)
+		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_override_funcs;
+	else
+		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
 }
 
 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
-- 
2.50.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] drm/amdgpu: move VM PTE MTYPE override to per-PTE granularity
  2026-04-17 13:50 [PATCH] drm/amdgpu: move VM PTE MTYPE override to per-PTE granularity Philip Yang
@ 2026-04-17 14:04 ` Christian König
  2026-04-17 18:40   ` Philip Yang
  0 siblings, 1 reply; 3+ messages in thread
From: Christian König @ 2026-04-17 14:04 UTC (permalink / raw)
  To: Philip Yang, amd-gfx; +Cc: Felix.Kuehling, Kent.Russell, Andrew.Martin

On 4/17/26 15:50, Philip Yang wrote:
> Refactor the NUMA-aware MTYPE override for VM page table entries:
> 
> - Move the override_vm_pte_flags call from the centralized
>   amdgpu_vm_pte_update_flags() into the individual CPU and SDMA update
>   backends, enabling per-PTE MTYPE override including for scattered
>   pages (pages_addr path).
> 
> - Move APU, IP version, and direct-mapped eligibility checks from
>   runtime (gmc_v9_0_override_vm_pte_flags) to init time
>   (gmc_v9_0_set_gmc_funcs), selecting between gmc_funcs structs with
>   and without the override function pointer to avoid repeated runtime
>   checks on every PTE update.
> 
> - Guard allow_override on whether gmc_funcs->override_vm_pte_flags is
>   actually implemented.
> 
> - Move amdgpu_device_check_iommu_direct_map() earlier in device init
>   so ram_is_direct_mapped is available when gmc_funcs are selected
>   during IP early init.
> 
> Signed-off-by: Philip Yang <Philip.Yang@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  5 ++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c      |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h      |  4 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c  | 11 ++++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c   |  9 ----
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 ++++-
>  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c       | 50 ++++++++++++---------
>  7 files changed, 55 insertions(+), 37 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index bc7e96b58d3f..b139475f65cb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3860,6 +3860,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>  	 * completed before the need for a different level is detected.
>  	 */
>  	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);

> +
> +	amdgpu_device_check_iommu_direct_map(adev);
> +
>  	/* early init functions */
>  	r = amdgpu_device_ip_early_init(adev);
>  	if (r)
> @@ -4117,8 +4120,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>  	if (px)
>  		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
>  
> -	amdgpu_device_check_iommu_direct_map(adev);
> -

That should probably be a separate patch.

>  	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
>  	r = register_pm_notifier(&adev->pm_nb);
>  	if (r)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 63156289ae7f..853204b5bd73 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -1163,7 +1163,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>  	params.pages_addr = pages_addr;
>  	params.unlocked = unlocked;
>  	params.needs_flush = flush_tlb;
> -	params.allow_override = allow_override;
> +	params.allow_override = allow_override && adev->gmc.gmc_funcs->override_vm_pte_flags;

Prerequisite check first please!

And we should probably rename params.allow_override as well. Maybe something like override_pte_flags similar to the name of the callback?

>  	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
>  
>  	amdgpu_vm_eviction_lock(vm);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index f33ea7f8509b..326522917131 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -296,8 +296,8 @@ struct amdgpu_vm_update_params {
>  	bool needs_flush;
>  
>  	/**
> -	 * @allow_override: true for memory that is not uncached: allows MTYPE
> -	 * to be overridden for NUMA local memory.
> +	 * @allow_override: true for memory that is not uncached and gmc override function is
> +	 * implemented to allow MTYPE to be overridden for NUMA local memory.
>  	 */
>  	bool allow_override;
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
> index f078db3fef79..fa5d4ac2ef39 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
> @@ -88,12 +88,21 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
>  
>  	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
>  
> +	if (!p->pages_addr && p->allow_override)
> +		amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
> +
>  	for (i = 0; i < count; i++) {
> +		u64 oflags = flags;
> +
>  		value = p->pages_addr ?
>  			amdgpu_vm_map_gart(p->pages_addr, addr) :
>  			addr;
> +
> +		if (p->pages_addr && p->allow_override)
> +			amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, value, &oflags);
> +
>  		amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
> -				       i, value, flags);
> +				       i, value, oflags);
>  		addr += incr;
>  	}
>  	return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
> index 31a437ce9570..883cc275f354 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
> @@ -707,15 +707,6 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
>  	if (level == AMDGPU_VM_PTB)
>  		amdgpu_vm_pte_update_noretry_flags(adev, &flags);
>  
> -	/* APUs mapping system memory may need different MTYPEs on different
> -	 * NUMA nodes. Only do this for contiguous ranges that can be assumed
> -	 * to be on the same NUMA node.
> -	 */
> -	if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
> -	    adev->gmc.gmc_funcs->override_vm_pte_flags &&
> -	    num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
> -		amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
> -
>  	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
>  					 flags);
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> index 36805dcfa159..37f0c0027075 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
> @@ -257,6 +257,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
>  		}
>  
>  		if (!p->pages_addr) {
> +			if (p->allow_override)
> +				amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
> +
>  			/* set page commands needed */
>  			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
>  						incr, flags);
> @@ -275,8 +278,14 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
>  		p->num_dw_left -= nptes * 2;
>  		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
>  		for (i = 0; i < nptes; ++i, addr += incr) {
> +			u64 oflags = flags;
> +
>  			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
> -			pte[i] |= flags;
> +
> +			if (p->allow_override)
> +				amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, pte[i], &oflags);
> +
> +			pte[i] |= oflags;
>  		}
>  
>  		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index e7b78027002b..479611e269b8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -1204,21 +1204,6 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
>  {
>  	int local_node, nid;
>  
> -	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
> -	 * memory can use more efficient MTYPEs.
> -	 */
> -	if (!(adev->flags & AMD_IS_APU) ||
> -	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
> -		return;
> -
> -	/* Only direct-mapped memory allows us to determine the NUMA node from
> -	 * the DMA address.
> -	 */
> -	if (!adev->ram_is_direct_mapped) {
> -		dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
> -		return;
> -	}
> -
>  	/* MTYPE_NC is the same default and can be overridden.
>  	 * MTYPE_UC will be present if the memory is extended-coherent
>  	 * and can also be overridden.
> @@ -1231,11 +1216,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
>  		return;
>  	}
>  
> -	/* FIXME: Only supported on native mode for now. For carve-out, the
> -	 * NUMA affinity of the GPU/VM needs to come from the PCI info because
> -	 * memory partitions are not associated with different NUMA nodes.
> -	 */
> -	if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
> +	if (vm->mem_id >= 0) {
>  		local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
>  	} else {
>  		dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
> @@ -1328,6 +1309,19 @@ static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
>  }
>  
>  static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
> +	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
> +	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
> +	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
> +	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
> +	.get_vm_pde = gmc_v9_0_get_vm_pde,
> +	.get_vm_pte = gmc_v9_0_get_vm_pte,
> +	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
> +	.query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
> +	.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
> +	.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
> +};
> +
> +static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_override_funcs = {
>  	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
>  	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
>  	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
> @@ -1343,7 +1337,21 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
>  
>  static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
>  {
> -	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
> +	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes, local system
> +	 * memory can use more efficient MTYPEs.
> +	 *
> +	 * APUs mapping system memory may need different MTYPEs on different
> +	 * NUMA nodes.
> +	 *
> +	 * Only direct-mapped memory allows us to determine the NUMA node from
> +	 * the DMA address.
> +	 */
> +	if ((adev->gmc.is_app_apu && num_possible_nodes() > 1) &&
> +	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
> +	    adev->ram_is_direct_mapped)
> +		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_override_funcs;
> +	else
> +		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;

I think a flag in adev->gmc would probably do as well, no need to duplicate the call table.

Regards,
Christian.

>  }
>  
>  static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] drm/amdgpu: move VM PTE MTYPE override to per-PTE granularity
  2026-04-17 14:04 ` Christian König
@ 2026-04-17 18:40   ` Philip Yang
  0 siblings, 0 replies; 3+ messages in thread
From: Philip Yang @ 2026-04-17 18:40 UTC (permalink / raw)
  To: Christian König, Philip Yang, amd-gfx
  Cc: Felix.Kuehling, Kent.Russell, Andrew.Martin

[-- Attachment #1: Type: text/plain, Size: 11247 bytes --]



On 2026-04-17 10:04, Christian König wrote:
> On 4/17/26 15:50, Philip Yang wrote:
>> Refactor the NUMA-aware MTYPE override for VM page table entries:
>>
>> - Move the override_vm_pte_flags call from the centralized
>>    amdgpu_vm_pte_update_flags() into the individual CPU and SDMA update
>>    backends, enabling per-PTE MTYPE override including for scattered
>>    pages (pages_addr path).
>>
>> - Move APU, IP version, and direct-mapped eligibility checks from
>>    runtime (gmc_v9_0_override_vm_pte_flags) to init time
>>    (gmc_v9_0_set_gmc_funcs), selecting between gmc_funcs structs with
>>    and without the override function pointer to avoid repeated runtime
>>    checks on every PTE update.
>>
>> - Guard allow_override on whether gmc_funcs->override_vm_pte_flags is
>>    actually implemented.
>>
>> - Move amdgpu_device_check_iommu_direct_map() earlier in device init
>>    so ram_is_direct_mapped is available when gmc_funcs are selected
>>    during IP early init.
>>
>> Signed-off-by: Philip Yang<Philip.Yang@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  5 ++-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c      |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h      |  4 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c  | 11 ++++-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c   |  9 ----
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 ++++-
>>   drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c       | 50 ++++++++++++---------
>>   7 files changed, 55 insertions(+), 37 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index bc7e96b58d3f..b139475f65cb 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -3860,6 +3860,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>   	 * completed before the need for a different level is detected.
>>   	 */
>>   	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
>> +
>> +	amdgpu_device_check_iommu_direct_map(adev);
>> +
>>   	/* early init functions */
>>   	r = amdgpu_device_ip_early_init(adev);
>>   	if (r)
>> @@ -4117,8 +4120,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>   	if (px)
>>   		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
>>   
>> -	amdgpu_device_check_iommu_direct_map(adev);
>> -
> That should probably be a separate patch.
ok
>
>>   	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
>>   	r = register_pm_notifier(&adev->pm_nb);
>>   	if (r)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 63156289ae7f..853204b5bd73 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -1163,7 +1163,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>>   	params.pages_addr = pages_addr;
>>   	params.unlocked = unlocked;
>>   	params.needs_flush = flush_tlb;
>> -	params.allow_override = allow_override;
>> +	params.allow_override = allow_override && adev->gmc.gmc_funcs->override_vm_pte_flags;
> Prerequisite check first please!
adev->gmc.gmc_funcs->override_vm_pte_flags is not NULL only if all 
prerequisites are true.
>
> And we should probably rename params.allow_override as well. Maybe something like override_pte_flags similar to the name of the callback?
will rename to params->override_pte_flags, don't change allow_override 
parameter of function amdgpu_vm_update_range
>
>>   	INIT_LIST_HEAD(&params.tlb_flush_waitlist);
>>   
>>   	amdgpu_vm_eviction_lock(vm);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> index f33ea7f8509b..326522917131 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
>> @@ -296,8 +296,8 @@ struct amdgpu_vm_update_params {
>>   	bool needs_flush;
>>   
>>   	/**
>> -	 * @allow_override: true for memory that is not uncached: allows MTYPE
>> -	 * to be overridden for NUMA local memory.
>> +	 * @allow_override: true for memory that is not uncached and gmc override function is
>> +	 * implemented to allow MTYPE to be overridden for NUMA local memory.
>>   	 */
>>   	bool allow_override;
>>   
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
>> index f078db3fef79..fa5d4ac2ef39 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
>> @@ -88,12 +88,21 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
>>   
>>   	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
>>   
>> +	if (!p->pages_addr && p->allow_override)
>> +		amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
>> +
>>   	for (i = 0; i < count; i++) {
>> +		u64 oflags = flags;
>> +
>>   		value = p->pages_addr ?
>>   			amdgpu_vm_map_gart(p->pages_addr, addr) :
>>   			addr;
>> +
>> +		if (p->pages_addr && p->allow_override)
>> +			amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, value, &oflags);
>> +
>>   		amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
>> -				       i, value, flags);
>> +				       i, value, oflags);
>>   		addr += incr;
>>   	}
>>   	return 0;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> index 31a437ce9570..883cc275f354 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
>> @@ -707,15 +707,6 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
>>   	if (level == AMDGPU_VM_PTB)
>>   		amdgpu_vm_pte_update_noretry_flags(adev, &flags);
>>   
>> -	/* APUs mapping system memory may need different MTYPEs on different
>> -	 * NUMA nodes. Only do this for contiguous ranges that can be assumed
>> -	 * to be on the same NUMA node.
>> -	 */
>> -	if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
>> -	    adev->gmc.gmc_funcs->override_vm_pte_flags &&
>> -	    num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
>> -		amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
>> -
>>   	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
>>   					 flags);
>>   }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
>> index 36805dcfa159..37f0c0027075 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
>> @@ -257,6 +257,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
>>   		}
>>   
>>   		if (!p->pages_addr) {
>> +			if (p->allow_override)
>> +				amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
>> +
>>   			/* set page commands needed */
>>   			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
>>   						incr, flags);
>> @@ -275,8 +278,14 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
>>   		p->num_dw_left -= nptes * 2;
>>   		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
>>   		for (i = 0; i < nptes; ++i, addr += incr) {
>> +			u64 oflags = flags;
>> +
>>   			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
>> -			pte[i] |= flags;
>> +
>> +			if (p->allow_override)
>> +				amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, pte[i], &oflags);
>> +
>> +			pte[i] |= oflags;
>>   		}
>>   
>>   		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> index e7b78027002b..479611e269b8 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> @@ -1204,21 +1204,6 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
>>   {
>>   	int local_node, nid;
>>   
>> -	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
>> -	 * memory can use more efficient MTYPEs.
>> -	 */
>> -	if (!(adev->flags & AMD_IS_APU) ||
>> -	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
>> -		return;
>> -
>> -	/* Only direct-mapped memory allows us to determine the NUMA node from
>> -	 * the DMA address.
>> -	 */
>> -	if (!adev->ram_is_direct_mapped) {
>> -		dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
>> -		return;
>> -	}
>> -
>>   	/* MTYPE_NC is the same default and can be overridden.
>>   	 * MTYPE_UC will be present if the memory is extended-coherent
>>   	 * and can also be overridden.
>> @@ -1231,11 +1216,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
>>   		return;
>>   	}
>>   
>> -	/* FIXME: Only supported on native mode for now. For carve-out, the
>> -	 * NUMA affinity of the GPU/VM needs to come from the PCI info because
>> -	 * memory partitions are not associated with different NUMA nodes.
>> -	 */
>> -	if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
>> +	if (vm->mem_id >= 0) {
>>   		local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
>>   	} else {
>>   		dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
>> @@ -1328,6 +1309,19 @@ static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
>>   }
>>   
>>   static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
>> +	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
>> +	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
>> +	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
>> +	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
>> +	.get_vm_pde = gmc_v9_0_get_vm_pde,
>> +	.get_vm_pte = gmc_v9_0_get_vm_pte,
>> +	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
>> +	.query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
>> +	.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
>> +	.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
>> +};
>> +
>> +static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_override_funcs = {
>>   	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
>>   	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
>>   	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
>> @@ -1343,7 +1337,21 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
>>   
>>   static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
>>   {
>> -	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
>> +	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes, local system
>> +	 * memory can use more efficient MTYPEs.
>> +	 *
>> +	 * APUs mapping system memory may need different MTYPEs on different
>> +	 * NUMA nodes.
>> +	 *
>> +	 * Only direct-mapped memory allows us to determine the NUMA node from
>> +	 * the DMA address.
>> +	 */
>> +	if ((adev->gmc.is_app_apu && num_possible_nodes() > 1) &&
>> +	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
>> +	    adev->ram_is_direct_mapped)
>> +		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_override_funcs;
>> +	else
>> +		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
> I think a flag in adev->gmc would probably do as well, no need to duplicate the call table.
will remove the duplicated const call table, add flag 
adev->gmc.override_pte instead.

Regards,
Philip
>
> Regards,
> Christian.
>
>>   }
>>   
>>   static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)

[-- Attachment #2: Type: text/html, Size: 12769 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-04-17 18:40 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-17 13:50 [PATCH] drm/amdgpu: move VM PTE MTYPE override to per-PTE granularity Philip Yang
2026-04-17 14:04 ` Christian König
2026-04-17 18:40   ` Philip Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox