From: "Christian König" <christian.koenig@amd.com>
To: "Pan, Xinhui" <Xinhui.Pan@amd.com>,
"amd-gfx@lists.freedesktop.org" <amd-gfx@lists.freedesktop.org>
Cc: "Deucher, Alexander" <Alexander.Deucher@amd.com>
Subject: Re: 回复: [PATCH 4/4] drm/amdgpu: VCN avoid memory allocation during IB test
Date: Fri, 10 Sep 2021 09:55:42 +0200 [thread overview]
Message-ID: <05062a68-581b-6fa5-bbee-3a7286d8309a@amd.com> (raw)
In-Reply-To: <DM4PR12MB51658D52B6B2F20D69749DE587D69@DM4PR12MB5165.namprd12.prod.outlook.com>
Try that plugin here https://github.com/vivien/vim-linux-coding-style
I'm using it for years and it really helpful.
Christian.
Am 10.09.21 um 09:53 schrieb Pan, Xinhui:
> [AMD Official Use Only]
>
> I am using vim with
> set tabstop=8
> set shiftwidth=8
> set softtabstop=8
>
> ________________________________________
> 发件人: Koenig, Christian <Christian.Koenig@amd.com>
> 发送时间: 2021年9月10日 14:33
> 收件人: Pan, Xinhui; amd-gfx@lists.freedesktop.org
> 抄送: Deucher, Alexander
> 主题: Re: [PATCH 4/4] drm/amdgpu: VCN avoid memory allocation during IB test
>
>
>
> Am 10.09.21 um 02:38 schrieb xinhui pan:
>> alloc extra msg from direct IB pool.
>>
>> Signed-off-by: xinhui pan <xinhui.pan@amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 99 +++++++++++--------------
>> 1 file changed, 45 insertions(+), 54 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> index 561296a85b43..b60d5f01fdae 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
>> @@ -541,15 +541,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
>> }
>>
>> static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> - struct dma_fence **fence)
>> + struct amdgpu_ib *ib_msg,
>> + struct dma_fence **fence)
> The parameter indentation here and at a few other places doesn't look
> correct to me, what editor are you using BTW?
>
> Apart from that the patch is Reviewed-by: Christian König
> <christian.koenig@amd.com>.
>
> Regards,
> Christian.
>
>> {
>> struct amdgpu_device *adev = ring->adev;
>> struct dma_fence *f = NULL;
>> struct amdgpu_job *job;
>> struct amdgpu_ib *ib;
>> - uint64_t addr;
>> - void *msg = NULL;
>> + uint64_t addr = ib_msg->gpu_addr;
>> int i, r;
>>
>> r = amdgpu_job_alloc_with_ib(adev, 64,
>> @@ -558,8 +557,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> - msg = amdgpu_bo_kptr(bo);
>> ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
>> ib->ptr[1] = addr;
>> ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
>> @@ -576,9 +573,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
>> + amdgpu_ib_free(adev, ib_msg, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -588,27 +583,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
>>
>> err_free:
>> amdgpu_job_free(job);
>> -
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
>> + amdgpu_ib_free(adev, ib_msg, f);
>> return r;
>> }
>>
>> static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_ib *ib)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> - *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> + memset(ib, 0, sizeof(*ib));
>> + r = amdgpu_ib_get(adev, NULL, PAGE_SIZE,
>> + AMDGPU_IB_POOL_DIRECT,
>> + ib);
>> if (r)
>> return r;
>>
>> + msg = ib->ptr;
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000038);
>> msg[2] = cpu_to_le32(0x00000001);
>> @@ -630,19 +624,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>> }
>>
>> static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
>> - struct amdgpu_bo **bo)
>> + struct amdgpu_ib *ib)
>> {
>> struct amdgpu_device *adev = ring->adev;
>> uint32_t *msg;
>> int r, i;
>>
>> - *bo = NULL;
>> - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - bo, NULL, (void **)&msg);
>> + memset(ib, 0, sizeof(*ib));
>> + r = amdgpu_ib_get(adev, NULL, PAGE_SIZE,
>> + AMDGPU_IB_POOL_DIRECT,
>> + ib);
>> if (r)
>> return r;
>>
>> + msg = ib->ptr;
>> msg[0] = cpu_to_le32(0x00000028);
>> msg[1] = cpu_to_le32(0x00000018);
>> msg[2] = cpu_to_le32(0x00000000);
>> @@ -658,21 +653,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>> int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_ib ib;
>> long r;
>>
>> - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
>> if (r)
>> goto error;
>>
>> - r = amdgpu_vcn_dec_send_msg(ring, bo, NULL);
>> + r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
>> if (r)
>> goto error;
>> - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
>> + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
>> if (r)
>> goto error;
>>
>> - r = amdgpu_vcn_dec_send_msg(ring, bo, &fence);
>> + r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
>> if (r)
>> goto error;
>>
>> @@ -688,8 +683,8 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> }
>>
>> static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>> - struct amdgpu_bo *bo,
>> - struct dma_fence **fence)
>> + struct amdgpu_ib *ib_msg,
>> + struct dma_fence **fence)
>> {
>> struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
>> const unsigned int ib_size_dw = 64;
>> @@ -697,7 +692,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>> struct dma_fence *f = NULL;
>> struct amdgpu_job *job;
>> struct amdgpu_ib *ib;
>> - uint64_t addr;
>> + uint64_t addr = ib_msg->gpu_addr;
>> int i, r;
>>
>> r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
>> @@ -706,7 +701,6 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>> goto err;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> ib->length_dw = 0;
>>
>> ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
>> @@ -726,9 +720,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>> if (r)
>> goto err_free;
>>
>> - amdgpu_bo_fence(bo, f, false);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_ib_free(adev, ib_msg, f);
>>
>> if (fence)
>> *fence = dma_fence_get(f);
>> @@ -738,31 +730,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>>
>> err_free:
>> amdgpu_job_free(job);
>> -
>> err:
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_unref(&bo);
>> + amdgpu_ib_free(adev, ib_msg, f);
>> return r;
>> }
>>
>> int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> {
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo;
>> + struct amdgpu_ib ib;
>> long r;
>>
>> - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
>> + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
>> if (r)
>> goto error;
>>
>> - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL);
>> + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
>> if (r)
>> goto error;
>> - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
>> + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
>> if (r)
>> goto error;
>>
>> - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence);
>> + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
>> if (r)
>> goto error;
>>
>> @@ -809,7 +799,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
>> }
>>
>> static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_ib *ib_msg,
>> struct dma_fence **fence)
>> {
>> const unsigned ib_size_dw = 16;
>> @@ -825,7 +815,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>> return r;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = ib_msg->gpu_addr;
>>
>> ib->length_dw = 0;
>> ib->ptr[ib->length_dw++] = 0x00000018;
>> @@ -863,7 +853,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>> }
>>
>> static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
>> - struct amdgpu_bo *bo,
>> + struct amdgpu_ib *ib_msg,
>> struct dma_fence **fence)
>> {
>> const unsigned ib_size_dw = 16;
>> @@ -879,7 +869,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>> return r;
>>
>> ib = &job->ibs[0];
>> - addr = amdgpu_bo_gpu_offset(bo);
>> + addr = ib_msg->gpu_addr;
>>
>> ib->length_dw = 0;
>> ib->ptr[ib->length_dw++] = 0x00000018;
>> @@ -918,21 +908,23 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>>
>> int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> {
>> + struct amdgpu_device *adev = ring->adev;
>> struct dma_fence *fence = NULL;
>> - struct amdgpu_bo *bo = NULL;
>> + struct amdgpu_ib ib;
>> long r;
>>
>> - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
>> - AMDGPU_GEM_DOMAIN_VRAM,
>> - &bo, NULL, NULL);
>> + memset(&ib, 0, sizeof(ib));
>> + r = amdgpu_ib_get(adev, NULL, 128 << 10,
>> + AMDGPU_IB_POOL_DIRECT,
>> + &ib);
>> if (r)
>> return r;
>>
>> - r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
>> + r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
>> if (r)
>> goto error;
>>
>> - r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
>> + r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
>> if (r)
>> goto error;
>>
>> @@ -943,9 +935,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
>> r = 0;
>>
>> error:
>> + amdgpu_ib_free(adev, &ib, fence);
>> dma_fence_put(fence);
>> - amdgpu_bo_unreserve(bo);
>> - amdgpu_bo_free_kernel(&bo, NULL, NULL);
>>
>> return r;
>> }
next prev parent reply other threads:[~2021-09-10 7:55 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-10 0:38 [PATCH 1/4] drm/amdgpu: Increase direct IB pool size xinhui pan
2021-09-10 0:38 ` [PATCH 2/4] drm/amdgpu: UVD avoid memory allocation during IB test xinhui pan
2021-09-10 6:24 ` Christian König
2021-09-10 8:18 ` 回复: " Pan, Xinhui
2021-09-10 8:53 ` Christian König
2021-09-10 9:42 ` 回复: " Pan, Xinhui
2021-09-10 10:02 ` Christian König
2021-09-10 10:10 ` 回复: " Pan, Xinhui
2021-09-10 11:10 ` Christian König
2021-09-10 11:48 ` 回复: " Pan, Xinhui
2021-09-10 12:23 ` Christian König
2021-09-10 0:38 ` [PATCH 3/4] drm/amdgpu: VCE " xinhui pan
2021-09-10 6:29 ` Christian König
2021-09-10 0:38 ` [PATCH 4/4] drm/amdgpu: VCN " xinhui pan
2021-09-10 6:33 ` Christian König
2021-09-10 7:53 ` 回复: " Pan, Xinhui
2021-09-10 7:55 ` Christian König [this message]
2021-09-10 6:15 ` [PATCH 1/4] drm/amdgpu: Increase direct IB pool size Christian König
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=05062a68-581b-6fa5-bbee-3a7286d8309a@amd.com \
--to=christian.koenig@amd.com \
--cc=Alexander.Deucher@amd.com \
--cc=Xinhui.Pan@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox