From: Huang Rui <ray.huang-5C7GfCeVMHo@public.gmane.org>
To: "Christian König"
<ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>,
"Monk Liu" <monk.liu-5C7GfCeVMHo@public.gmane.org>,
"Frank Min" <frank.min-5C7GfCeVMHo@public.gmane.org>
Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH 2/8] drm/amdgpu: fix sdma v4 startup under SRIOV
Date: Tue, 9 Oct 2018 17:17:09 +0800 [thread overview]
Message-ID: <20181009091708.GB8763@hr-amur2> (raw)
In-Reply-To: <20181008133521.3237-2-christian.koenig-5C7GfCeVMHo@public.gmane.org>
On Mon, Oct 08, 2018 at 03:35:15PM +0200, Christian König wrote:
> Under SRIOV we were enabling the ring buffer before it was initialized.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 234 ++++++++++++++++-----------------
> 1 file changed, 116 insertions(+), 118 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index c20d413f277c..5ecf6c9252c4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -673,13 +673,14 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
> * sdma_v4_0_gfx_resume - setup and start the async dma engines
> *
> * @adev: amdgpu_device pointer
> + * @i: instance to resume
> *
> * Set up the gfx DMA ring buffers and enable them (VEGA10).
> * Returns 0 for success, error for failure.
> */
> -static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
> +static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
> {
> - struct amdgpu_ring *ring;
> + struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
> u32 rb_cntl, ib_cntl, wptr_poll_cntl;
> u32 rb_bufsz;
> u32 wb_offset;
> @@ -687,129 +688,108 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
> u32 doorbell_offset;
> u32 temp;
> u64 wptr_gpu_addr;
> - int i, r;
>
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - ring = &adev->sdma.instance[i].ring;
> - wb_offset = (ring->rptr_offs * 4);
> + wb_offset = (ring->rptr_offs * 4);
>
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
>
> - /* Set ring buffer size in dwords */
> - rb_bufsz = order_base_2(ring->ring_size / 4);
> - rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
> - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
> + /* Set ring buffer size in dwords */
> + rb_bufsz = order_base_2(ring->ring_size / 4);
> + rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
> + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
> #ifdef __BIG_ENDIAN
> - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
> - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
> - RPTR_WRITEBACK_SWAP_ENABLE, 1);
> + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
> + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
> + RPTR_WRITEBACK_SWAP_ENABLE, 1);
> #endif
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
>
> - /* Initialize the ring buffer's read and write pointers */
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
> + /* Initialize the ring buffer's read and write pointers */
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
>
> - /* set the wb address whether it's enabled or not */
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
> - upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
> - lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
> + /* set the wb address whether it's enabled or not */
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
> + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
> + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
>
> - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
> + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
>
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
>
> - ring->wptr = 0;
> + ring->wptr = 0;
>
> - /* before programing wptr to a less value, need set minor_ptr_update first */
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
> + /* before programing wptr to a less value, need set minor_ptr_update first */
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
>
> - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
> - }
> + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
> + }
>
> - doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
> - doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
> + doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
> + doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
>
> - if (ring->use_doorbell) {
> - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
> - doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
> - OFFSET, ring->doorbell_index);
> - } else {
> - doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
> - }
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
> - adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
> - ring->doorbell_index);
> -
> - if (amdgpu_sriov_vf(adev))
> - sdma_v4_0_ring_set_wptr(ring);
> -
> - /* set minor_ptr_update to 0 after wptr programed */
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
> -
> - /* set utc l1 enable flag always to 1 */
> - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
> - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
> -
> - if (!amdgpu_sriov_vf(adev)) {
> - /* unhalt engine */
> - temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
> - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
> - }
> + if (ring->use_doorbell) {
> + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
> + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
> + OFFSET, ring->doorbell_index);
> + } else {
> + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
> + }
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
> + adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
> + ring->doorbell_index);
>
> - /* setup the wptr shadow polling */
> - wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
> - lower_32_bits(wptr_gpu_addr));
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
> - upper_32_bits(wptr_gpu_addr));
> - wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
> - if (amdgpu_sriov_vf(adev))
> - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
> - else
> - wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
> + if (amdgpu_sriov_vf(adev))
> + sdma_v4_0_ring_set_wptr(ring);
>
> - /* enable DMA RB */
> - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
> + /* set minor_ptr_update to 0 after wptr programed */
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
>
> - ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
> - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
> -#ifdef __BIG_ENDIAN
> - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
> -#endif
> - /* enable DMA IBs */
> - WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
> + /* set utc l1 enable flag always to 1 */
> + temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
> + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
>
> - ring->ready = true;
> + if (!amdgpu_sriov_vf(adev)) {
> + /* unhalt engine */
> + temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
> + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
> + }
>
> - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
> - sdma_v4_0_ctx_switch_enable(adev, true);
> - sdma_v4_0_enable(adev, true);
> - }
> + /* setup the wptr shadow polling */
> + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
> + lower_32_bits(wptr_gpu_addr));
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
> + upper_32_bits(wptr_gpu_addr));
> + wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
> + if (amdgpu_sriov_vf(adev))
> + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
> + else
> + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
>
> - r = amdgpu_ring_test_ring(ring);
> - if (r) {
> - ring->ready = false;
> - return r;
> - }
> + /* enable DMA RB */
> + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
>
> - if (adev->mman.buffer_funcs_ring == ring)
> - amdgpu_ttm_set_buffer_funcs_status(adev, true);
> -
> - }
> + ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
> + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
> +#ifdef __BIG_ENDIAN
> + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
> +#endif
> + /* enable DMA IBs */
> + WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
>
> - return 0;
> + ring->ready = true;
> }
>
> static void
> @@ -941,33 +921,51 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
> */
> static int sdma_v4_0_start(struct amdgpu_device *adev)
> {
> - int r = 0;
> + struct amdgpu_ring *ring;
> + int i, r;
>
> if (amdgpu_sriov_vf(adev)) {
> sdma_v4_0_ctx_switch_enable(adev, false);
> sdma_v4_0_enable(adev, false);
> + } else {
>
> - /* set RB registers */
> - r = sdma_v4_0_gfx_resume(adev);
> - return r;
> + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
> + r = sdma_v4_0_load_microcode(adev);
> + if (r)
> + return r;
> + }
> +
> + /* unhalt the MEs */
> + sdma_v4_0_enable(adev, true);
> + /* enable sdma ring preemption */
> + sdma_v4_0_ctx_switch_enable(adev, true);
> }
>
> - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
> - r = sdma_v4_0_load_microcode(adev);
> + /* start the gfx rings and rlc compute queues */
> + for (i = 0; i < adev->sdma.num_instances; i++)
> + sdma_v4_0_gfx_resume(adev, i);
> +
> + if (amdgpu_sriov_vf(adev)) {
> + sdma_v4_0_ctx_switch_enable(adev, true);
> + sdma_v4_0_enable(adev, true);
> + } else {
> + r = sdma_v4_0_rlc_resume(adev);
> if (r)
> return r;
> }
+ Monk, Frank,
I probably cannot judge here, under SRIOV, I saw you disable ctx switch
before. Do you have any concern if we enabled it here.
Others, looks good for me. Christian, may we know which kind of jobs will
use sdma page queue(ring), you know, we just sdma gfx queue(ring) before?
Thanks,
Ray
>
> - /* unhalt the MEs */
> - sdma_v4_0_enable(adev, true);
> - /* enable sdma ring preemption */
> - sdma_v4_0_ctx_switch_enable(adev, true);
> + for (i = 0; i < adev->sdma.num_instances; i++) {
> + ring = &adev->sdma.instance[i].ring;
>
> - /* start the gfx rings and rlc compute queues */
> - r = sdma_v4_0_gfx_resume(adev);
> - if (r)
> - return r;
> - r = sdma_v4_0_rlc_resume(adev);
> + r = amdgpu_ring_test_ring(ring);
> + if (r) {
> + ring->ready = false;
> + return r;
> + }
> +
> + if (adev->mman.buffer_funcs_ring == ring)
> + amdgpu_ttm_set_buffer_funcs_status(adev, true);
> + }
>
> return r;
> }
> --
> 2.14.1
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
next prev parent reply other threads:[~2018-10-09 9:17 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-08 13:35 [PATCH 1/8] drm/amdgpu: fix incorrect use of amdgpu_irq_add_id in si_dma.c Christian König
[not found] ` <20181008133521.3237-1-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-08 13:35 ` [PATCH 2/8] drm/amdgpu: fix sdma v4 startup under SRIOV Christian König
[not found] ` <20181008133521.3237-2-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09 9:17 ` Huang Rui [this message]
2018-10-09 10:56 ` Christian König
[not found] ` <5ae6a2fe-80d6-858e-dcd2-2d44ab0b76ce-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-10-09 11:45 ` Liu, Monk
[not found] ` <CY4PR1201MB024521C2A0EA4BAE7272EA6584E70-1iTaO6aE1DBfNQakwlCMTGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2018-10-09 13:03 ` Koenig, Christian
[not found] ` <dbab4a65-d9ec-8ac7-75bb-86033de043f5-5C7GfCeVMHo@public.gmane.org>
2018-10-10 6:53 ` Liu, Monk
[not found] ` <CY4PR1201MB0245F26FFD7EE7558A7401B984E00-1iTaO6aE1DBfNQakwlCMTGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2018-10-10 7:24 ` Ma, Sigil
[not found] ` <CY4PR12MB1351B3D0E5E5A75BFAF7F0D487E00-rpdhrqHFk04aRV2spazHLQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2018-10-10 7:52 ` Liu, Monk
[not found] ` <CY4PR1201MB024507BB222336DFA92304C784E00-1iTaO6aE1DBfNQakwlCMTGrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2018-10-12 14:27 ` Koenig, Christian
[not found] ` <8d7c9d2e-6d4d-34d3-d8dc-102e253610f2-5C7GfCeVMHo@public.gmane.org>
2018-10-16 12:34 ` Christian König
[not found] ` <2512cfee-a603-75c4-bf10-9ae0b4b8c5c7-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-10-16 14:42 ` Huang Rui
2018-10-08 13:35 ` [PATCH 3/8] drm/amdgpu: add basics for SDMA page queue support Christian König
[not found] ` <20181008133521.3237-3-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09 9:31 ` Huang Rui
2018-10-08 13:35 ` [PATCH 4/8] drm/amdgpu: remove non gfx specific handling from sdma_v4_0_gfx_resume Christian König
[not found] ` <20181008133521.3237-4-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09 9:34 ` Huang Rui
2018-10-08 13:35 ` [PATCH 5/8] drm/amdgpu: remove SRIOV " Christian König
[not found] ` <20181008133521.3237-5-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09 9:35 ` Huang Rui
2018-10-08 13:35 ` [PATCH 6/8] drm/amdgpu: add some [WR]REG32_SDMA macros to sdma_v4_0.c Christian König
[not found] ` <20181008133521.3237-6-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09 9:36 ` Huang Rui
2018-10-08 13:35 ` [PATCH 7/8] drm/amdgpu: activate paging queue on SDMA v4 Christian König
[not found] ` <20181008133521.3237-7-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09 9:40 ` Huang Rui
2018-10-08 13:35 ` [PATCH 8/8] drm/amdgpu: use paging queue for VM page table updates Christian König
[not found] ` <20181008133521.3237-8-christian.koenig-5C7GfCeVMHo@public.gmane.org>
2018-10-09 9:43 ` Huang Rui
2018-10-09 8:37 ` [PATCH 1/8] drm/amdgpu: fix incorrect use of amdgpu_irq_add_id in si_dma.c Huang Rui
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181009091708.GB8763@hr-amur2 \
--to=ray.huang-5c7gfcevmho@public.gmane.org \
--cc=amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
--cc=ckoenig.leichtzumerken-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org \
--cc=frank.min-5C7GfCeVMHo@public.gmane.org \
--cc=monk.liu-5C7GfCeVMHo@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox