public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <christian.koenig@amd.com>
To: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>,
	Alex Deucher <alexander.deucher@amd.com>,
	David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>
Cc: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH v2 14/20] drm/amdgpu: introduce amdgpu_sdma_set_vm_pte_scheds
Date: Mon, 17 Nov 2025 10:30:02 +0100	[thread overview]
Message-ID: <b339a6bb-ecd1-4e86-a861-cc2b78f153be@amd.com> (raw)
In-Reply-To: <20251113160632.5889-15-pierre-eric.pelloux-prayer@amd.com>

On 11/13/25 17:05, Pierre-Eric Pelloux-Prayer wrote:
> All sdma versions used the same logic, so add a helper and move the
> common code to a single place.
> 
> ---
> v2: pass amdgpu_vm_pte_funcs as well
> ---
> 
> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h      |  2 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 17 +++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/cik_sdma.c    |  9 +--------
>  drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c   |  9 +--------
>  drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c   |  9 +--------
>  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c   | 13 +------------
>  drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 13 +------------
>  drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c   | 12 ++----------
>  drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c   | 12 ++----------
>  drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c   |  9 +--------
>  drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c   |  9 +--------
>  drivers/gpu/drm/amd/amdgpu/si_dma.c      |  9 +--------
>  12 files changed, 31 insertions(+), 92 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 50079209c472..3fab3dc9f3e4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1613,6 +1613,8 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
>  bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
>  ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
>  ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
> +void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
> +				   const struct amdgpu_vm_pte_funcs *vm_pte_funcs);
>  
>  /* atpx handler */
>  #if defined(CONFIG_VGA_SWITCHEROO)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index aaa44199e9f4..3d29c3642d1a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -3210,3 +3210,20 @@ void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
>  		task_info->process_name, task_info->tgid,
>  		task_info->task.comm, task_info->task.pid);
>  }
> +
> +void amdgpu_sdma_set_vm_pte_scheds(struct amdgpu_device *adev,
> +				   const struct amdgpu_vm_pte_funcs *vm_pte_funcs)
> +{
> +	struct drm_gpu_scheduler *sched;
> +	int i;
> +
> +	for (i = 0; i < adev->sdma.num_instances; i++) {
> +		if (adev->sdma.has_page_queue)
> +			sched = &adev->sdma.instance[i].page.sched;
> +		else
> +			sched = &adev->sdma.instance[i].ring.sched;
> +		adev->vm_manager.vm_pte_scheds[i] = sched;
> +	}
> +	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	adev->vm_manager.vm_pte_funcs = vm_pte_funcs;
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> index 9e8715b4739d..5fe162f52c92 100644
> --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> @@ -1347,14 +1347,7 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
>  
>  static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		adev->vm_manager.vm_pte_scheds[i] =
> -			&adev->sdma.instance[i].ring.sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &cik_sdma_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version cik_sdma_ip_block =
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> index 92ce580647cd..63636643db3d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> @@ -1242,14 +1242,7 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
>  
>  static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {

I think we can also get rid of all those sdma_v*_set_vm_pte_funcs which are only a single line calls.

Regards,
Christian.

> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		adev->vm_manager.vm_pte_scheds[i] =
> -			&adev->sdma.instance[i].ring.sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v2_4_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> index 1c076bd1cf73..0153626b5df2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> @@ -1684,14 +1684,7 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
>  
>  static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		adev->vm_manager.vm_pte_scheds[i] =
> -			 &adev->sdma.instance[i].ring.sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v3_0_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index f38004e6064e..96a67b30854c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -2625,18 +2625,7 @@ static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
>  
>  static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	struct drm_gpu_scheduler *sched;
> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		if (adev->sdma.has_page_queue)
> -			sched = &adev->sdma.instance[i].page.sched;
> -		else
> -			sched = &adev->sdma.instance[i].ring.sched;
> -		adev->vm_manager.vm_pte_scheds[i] = sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_0_vm_pte_funcs);
>  }
>  
>  static void sdma_v4_0_get_ras_error_count(uint32_t value,
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> index 36b1ca73c2ed..04dc8a8f4d66 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> @@ -2326,18 +2326,7 @@ static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
>  
>  static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	struct drm_gpu_scheduler *sched;
> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		if (adev->sdma.has_page_queue)
> -			sched = &adev->sdma.instance[i].page.sched;
> -		else
> -			sched = &adev->sdma.instance[i].ring.sched;
> -		adev->vm_manager.vm_pte_scheds[i] = sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_4_2_vm_pte_funcs);
>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> index 7dc67a22a7a0..19c717f2c602 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> @@ -2081,16 +2081,8 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
>  
>  static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	unsigned i;
> -
> -	if (adev->vm_manager.vm_pte_funcs == NULL) {
> -		adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
> -		for (i = 0; i < adev->sdma.num_instances; i++) {
> -			adev->vm_manager.vm_pte_scheds[i] =
> -				&adev->sdma.instance[i].ring.sched;
> -		}
> -		adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -	}
> +	if (adev->vm_manager.vm_pte_funcs == NULL)
> +		amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_0_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> index 3bd44c24f692..7a07b8f4e86d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> @@ -2091,16 +2091,8 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
>  
>  static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	unsigned i;
> -
> -	if (adev->vm_manager.vm_pte_funcs == NULL) {
> -		adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
> -		for (i = 0; i < adev->sdma.num_instances; i++) {
> -			adev->vm_manager.vm_pte_scheds[i] =
> -				&adev->sdma.instance[i].ring.sched;
> -		}
> -		adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> -	}
> +	if (adev->vm_manager.vm_pte_funcs == NULL)
> +		amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_2_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> index db6e41967f12..8f8228c7adee 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> @@ -1897,14 +1897,7 @@ static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
>  
>  static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		adev->vm_manager.vm_pte_scheds[i] =
> -			&adev->sdma.instance[i].ring.sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v6_0_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
> index 326ecc8d37d2..cf412d8fb0ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
> @@ -1839,14 +1839,7 @@ static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
>  
>  static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &sdma_v7_0_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		adev->vm_manager.vm_pte_scheds[i] =
> -			&adev->sdma.instance[i].ring.sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v7_0_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version sdma_v7_0_ip_block = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> index 7f18e4875287..863e00086c30 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> @@ -840,14 +840,7 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
>  
>  static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
>  {
> -	unsigned i;
> -
> -	adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		adev->vm_manager.vm_pte_scheds[i] =
> -			&adev->sdma.instance[i].ring.sched;
> -	}
> -	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
> +	amdgpu_sdma_set_vm_pte_scheds(adev, &si_dma_vm_pte_funcs);
>  }
>  
>  const struct amdgpu_ip_block_version si_dma_ip_block =


  reply	other threads:[~2025-11-17  9:30 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-13 16:05 [PATCH v2 00/20] drm/amdgpu: use all SDMA instances for TTM clears and moves Pierre-Eric Pelloux-Prayer
2025-11-13 16:05 ` [PATCH v2 01/20] drm/amdgpu: give each kernel job a unique id Pierre-Eric Pelloux-Prayer
2025-11-14 12:26   ` Christian König
2025-11-14 14:36     ` Pierre-Eric Pelloux-Prayer
2025-11-14 14:57       ` Christian König
2025-11-13 16:05 ` [PATCH v2 02/20] drm/ttm: rework pipelined eviction fence handling Pierre-Eric Pelloux-Prayer
2025-11-14 12:47   ` Christian König
2025-11-18 15:00   ` Thomas Hellström
2025-11-19 14:57     ` Christian König
2025-11-13 16:05 ` [PATCH v2 03/20] drm/amdgpu: remove direct_submit arg from amdgpu_copy_buffer Pierre-Eric Pelloux-Prayer
2025-11-14 12:48   ` Christian König
2025-11-13 16:05 ` [PATCH v2 04/20] drm/amdgpu: introduce amdgpu_ttm_buffer_entity Pierre-Eric Pelloux-Prayer
2025-11-14 12:57   ` Christian König
2025-11-14 20:18     ` Felix Kuehling
2025-11-13 16:05 ` [PATCH v2 05/20] drm/amdgpu: pass the entity to use to ttm functions Pierre-Eric Pelloux-Prayer
2025-11-14 13:07   ` Christian König
2025-11-14 14:41     ` Pierre-Eric Pelloux-Prayer
2025-11-17  9:41       ` Pierre-Eric Pelloux-Prayer
2025-11-14 20:20   ` Felix Kuehling
2025-11-13 16:05 ` [PATCH v2 06/20] drm/amdgpu: statically assign gart windows to ttm entities Pierre-Eric Pelloux-Prayer
2025-11-14 15:15   ` Christian König
2025-11-14 20:24   ` Felix Kuehling
2025-11-19  9:55     ` Pierre-Eric Pelloux-Prayer
2025-11-13 16:05 ` [PATCH v2 07/20] drm/amdgpu: allocate multiple clear entities Pierre-Eric Pelloux-Prayer
2025-11-17  8:41   ` Christian König
2025-11-13 16:05 ` [PATCH v2 08/20] drm/amdgpu: allocate multiple move entities Pierre-Eric Pelloux-Prayer
2025-11-14 20:57   ` Felix Kuehling
2025-11-13 16:05 ` [PATCH v2 09/20] drm/amdgpu: pass optional dependency to amdgpu_fill_buffer Pierre-Eric Pelloux-Prayer
2025-11-17  8:43   ` Christian König
2025-11-13 16:05 ` [PATCH v2 10/20] drm/admgpu: handle resv dependencies in amdgpu_ttm_map_buffer Pierre-Eric Pelloux-Prayer
2025-11-17  8:44   ` Christian König
2025-11-19  8:28     ` Pierre-Eric Pelloux-Prayer
2025-11-13 16:05 ` [PATCH v2 11/20] drm/amdgpu: round robin through clear_entities in amdgpu_fill_buffer Pierre-Eric Pelloux-Prayer
2025-11-17  8:47   ` Christian König
2025-11-13 16:05 ` [PATCH v2 12/20] drm/amdgpu: use TTM_NUM_MOVE_FENCES when reserving fences Pierre-Eric Pelloux-Prayer
2025-11-14 20:57   ` Felix Kuehling
2025-11-17  9:07   ` Christian König
2025-11-13 16:05 ` [PATCH v2 13/20] drm/amdgpu: use multiple entities in amdgpu_move_blit Pierre-Eric Pelloux-Prayer
2025-11-17  9:12   ` Christian König
2025-11-13 16:05 ` [PATCH v2 14/20] drm/amdgpu: introduce amdgpu_sdma_set_vm_pte_scheds Pierre-Eric Pelloux-Prayer
2025-11-17  9:30   ` Christian König [this message]
2025-11-13 16:05 ` [PATCH v2 15/20] drm/amdgpu: pass all the sdma scheds to amdgpu_mman Pierre-Eric Pelloux-Prayer
2025-11-14 21:23   ` Felix Kuehling
2025-11-17  9:46   ` Christian König
2025-11-19  9:34     ` Pierre-Eric Pelloux-Prayer
2025-11-19 10:49       ` Christian König
2025-11-13 16:05 ` [PATCH v2 16/20] drm/amdgpu: give ttm entities access to all the sdma scheds Pierre-Eric Pelloux-Prayer
2025-11-17  9:54   ` Christian König
2025-11-13 16:05 ` [PATCH v2 17/20] drm/amdgpu: get rid of amdgpu_ttm_clear_buffer Pierre-Eric Pelloux-Prayer
2025-11-13 16:05 ` [PATCH v2 18/20] drm/amdgpu: rename amdgpu_fill_buffer as amdgpu_ttm_clear_buffer Pierre-Eric Pelloux-Prayer
2025-11-17  9:56   ` Christian König
2025-11-13 16:06 ` [PATCH v2 19/20] drm/amdgpu: use larger gart window when possible Pierre-Eric Pelloux-Prayer
2025-11-13 16:06 ` [PATCH v2 20/20] drm/amdgpu: double AMDGPU_GTT_MAX_TRANSFER_SIZE Pierre-Eric Pelloux-Prayer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b339a6bb-ecd1-4e86-a861-cc2b78f153be@amd.com \
    --to=christian.koenig@amd.com \
    --cc=airlied@gmail.com \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pierre-eric.pelloux-prayer@amd.com \
    --cc=simona@ffwll.ch \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox