* [PATCH v3] drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence
@ 2021-12-15 6:35 Huang Rui
2021-12-15 10:57 ` Christian König
2021-12-15 13:30 ` kernel test robot
0 siblings, 2 replies; 4+ messages in thread
From: Huang Rui @ 2021-12-15 6:35 UTC (permalink / raw)
To: dri-devel, Christian König, Daniel Vetter, Sumit Semwal
Cc: amd-gfx, linux-media, Alex Deucher, Monk Liu, Andrey Grodzovsky,
Huang Rui
The job embedded fence donesn't initialize the flags at
dma_fence_init(). Then we will go a wrong way in
amdgpu_fence_get_timeline_name callback and trigger a null pointer panic
once we enabled the trace event here. So introduce new amdgpu_fence
object to indicate the job embedded fence.
[ 156.131790] BUG: kernel NULL pointer dereference, address: 00000000000002a0
[ 156.131804] #PF: supervisor read access in kernel mode
[ 156.131811] #PF: error_code(0x0000) - not-present page
[ 156.131817] PGD 0 P4D 0
[ 156.131824] Oops: 0000 [#1] PREEMPT SMP PTI
[ 156.131832] CPU: 6 PID: 1404 Comm: sdma0 Tainted: G OE 5.16.0-rc1-custom #1
[ 156.131842] Hardware name: Gigabyte Technology Co., Ltd. Z170XP-SLI/Z170XP-SLI-CF, BIOS F20 11/04/2016
[ 156.131848] RIP: 0010:strlen+0x0/0x20
[ 156.131859] Code: 89 c0 c3 0f 1f 80 00 00 00 00 48 01 fe eb 0f 0f b6 07 38 d0 74 10 48 83 c7 01 84 c0 74 05 48 39 f7 75 ec 31 c0 c3 48 89 f8 c3 <80> 3f 00 74 10 48 89 f8 48 83 c0 01 80 38 00 75 f7 48 29 f8 c3 31
[ 156.131872] RSP: 0018:ffff9bd0018dbcf8 EFLAGS: 00010206
[ 156.131880] RAX: 00000000000002a0 RBX: ffff8d0305ef01b0 RCX: 000000000000000b
[ 156.131888] RDX: ffff8d03772ab924 RSI: ffff8d0305ef01b0 RDI: 00000000000002a0
[ 156.131895] RBP: ffff9bd0018dbd60 R08: ffff8d03002094d0 R09: 0000000000000000
[ 156.131901] R10: 000000000000005e R11: 0000000000000065 R12: ffff8d03002094d0
[ 156.131907] R13: 000000000000001f R14: 0000000000070018 R15: 0000000000000007
[ 156.131914] FS: 0000000000000000(0000) GS:ffff8d062ed80000(0000) knlGS:0000000000000000
[ 156.131923] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 156.131929] CR2: 00000000000002a0 CR3: 000000001120a005 CR4: 00000000003706e0
[ 156.131937] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 156.131942] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 156.131949] Call Trace:
[ 156.131953] <TASK>
[ 156.131957] ? trace_event_raw_event_dma_fence+0xcc/0x200
[ 156.131973] ? ring_buffer_unlock_commit+0x23/0x130
[ 156.131982] dma_fence_init+0x92/0xb0
[ 156.131993] amdgpu_fence_emit+0x10d/0x2b0 [amdgpu]
[ 156.132302] amdgpu_ib_schedule+0x2f9/0x580 [amdgpu]
[ 156.132586] amdgpu_job_run+0xed/0x220 [amdgpu]
Signed-off-by: Huang Rui <ray.huang@amd.com>
---
V1 -> V2: add another amdgpu_fence_ops which is for job-embedded fence.
V2 -> V3: use amdgpu_fence_driver_clear_job_fences abstract the job fence
clearing operation.
---
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 11 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 126 ++++++++++++++-------
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 +-
3 files changed, 90 insertions(+), 51 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5625f7736e37..fecf7a09e5a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4456,7 +4456,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
- int i, j, r = 0;
+ int i, r = 0;
struct amdgpu_job *job = NULL;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@@ -4478,15 +4478,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
/*clear job fence from fence drv to avoid force_completion
*leave NULL and vm flush fence in fence drv */
- for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
- struct dma_fence *old, **ptr;
+ amdgpu_fence_driver_clear_job_fences(ring);
- ptr = &ring->fence_drv.fences[j];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
- RCU_INIT_POINTER(*ptr, NULL);
- }
- }
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3b7e86ea7167..db41d16838b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
+static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops)
+ if (__f->base.ops == &amdgpu_fence_ops ||
+ __f->base.ops == &amdgpu_job_fence_ops)
return __f;
return NULL;
@@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
}
seq = ++ring->fence_drv.sync_seq;
- if (job != NULL && job->job_run_counter) {
+ if (job && job->job_run_counter) {
/* reinit seq for resubmitted jobs */
fence->seqno = seq;
} else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
- }
-
- if (job != NULL) {
- /* mark this fence has a parent job */
- set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
+ if (job)
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ else
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
@@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
}
+/**
+ * amdgpu_fence_clear_job_fences - clear job embedded fences of ring
+ *
+ * @ring: fence of the ring to be cleared
+ *
+ */
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
+{
+ int i;
+ struct dma_fence *old, **ptr;
+
+ for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
+ ptr = &ring->fence_drv.fences[i];
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && old->ops == &amdgpu_job_fence_ops)
+ RCU_INIT_POINTER(*ptr, NULL);
+ }
+}
+
/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
@@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_ring *ring;
+ return (const char *)to_amdgpu_fence(f)->ring->name;
+}
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
- ring = to_amdgpu_ring(job->base.sched);
- } else {
- ring = to_amdgpu_fence(f)->ring;
- }
- return (const char *)ring->name;
+ return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
/**
@@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_ring *ring;
+ if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ return true;
+}
- ring = to_amdgpu_ring(job->base.sched);
- } else {
- ring = to_amdgpu_fence(f)->ring;
- }
+/**
+ * amdgpu_job_fence_enable_signaling - enable signalling on job fence
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_enable_signaling above, it
+ * only handles the job embedded fence.
+ */
+static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
- if (!timer_pending(&ring->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(ring);
+ if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
return true;
}
@@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- /* free job if fence has a parent job */
- struct amdgpu_job *job;
-
- job = container_of(f, struct amdgpu_job, hw_fence);
- kfree(job);
- } else {
/* free fence_slab if it's separated fence*/
- struct amdgpu_fence *fence;
+ kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+}
- fence = to_amdgpu_fence(f);
- kmem_cache_free(amdgpu_fence_slab, fence);
- }
+/**
+ * amdgpu_job_fence_free - free up the job with embedded fence
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the job with embedded fence after the RCU grace period.
+ */
+static void amdgpu_job_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+
+ /* free job if fence has a parent job */
+ kfree(container_of(f, struct amdgpu_job, hw_fence));
}
/**
@@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
+/**
+ * amdgpu_job_fence_release - callback that job embedded fence can be freed
+ *
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_release above, it
+ * only handles the job embedded fence.
+ */
+static void amdgpu_job_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_job_fence_free);
+}
+
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
+static const struct dma_fence_ops amdgpu_job_fence_ops = {
+ .get_driver_name = amdgpu_fence_get_driver_name,
+ .get_timeline_name = amdgpu_job_fence_get_timeline_name,
+ .enable_signaling = amdgpu_job_fence_enable_signaling,
+ .release = amdgpu_job_fence_release,
+};
/*
* Fence debugfs
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4d380e79752c..fae7d185ad0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
-/* fence flag bit to indicate the face is embedded in job*/
-#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
-
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
@@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
--
2.25.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v3] drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence
2021-12-15 6:35 [PATCH v3] drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence Huang Rui
@ 2021-12-15 10:57 ` Christian König
2021-12-15 13:30 ` kernel test robot
1 sibling, 0 replies; 4+ messages in thread
From: Christian König @ 2021-12-15 10:57 UTC (permalink / raw)
To: Huang Rui, dri-devel, Daniel Vetter, Sumit Semwal
Cc: amd-gfx, linux-media, Alex Deucher, Monk Liu, Andrey Grodzovsky
Am 15.12.21 um 07:35 schrieb Huang Rui:
> The job embedded fence donesn't initialize the flags at
> dma_fence_init(). Then we will go a wrong way in
> amdgpu_fence_get_timeline_name callback and trigger a null pointer panic
> once we enabled the trace event here. So introduce new amdgpu_fence
> object to indicate the job embedded fence.
>
> [ 156.131790] BUG: kernel NULL pointer dereference, address: 00000000000002a0
> [ 156.131804] #PF: supervisor read access in kernel mode
> [ 156.131811] #PF: error_code(0x0000) - not-present page
> [ 156.131817] PGD 0 P4D 0
> [ 156.131824] Oops: 0000 [#1] PREEMPT SMP PTI
> [ 156.131832] CPU: 6 PID: 1404 Comm: sdma0 Tainted: G OE 5.16.0-rc1-custom #1
> [ 156.131842] Hardware name: Gigabyte Technology Co., Ltd. Z170XP-SLI/Z170XP-SLI-CF, BIOS F20 11/04/2016
> [ 156.131848] RIP: 0010:strlen+0x0/0x20
> [ 156.131859] Code: 89 c0 c3 0f 1f 80 00 00 00 00 48 01 fe eb 0f 0f b6 07 38 d0 74 10 48 83 c7 01 84 c0 74 05 48 39 f7 75 ec 31 c0 c3 48 89 f8 c3 <80> 3f 00 74 10 48 89 f8 48 83 c0 01 80 38 00 75 f7 48 29 f8 c3 31
> [ 156.131872] RSP: 0018:ffff9bd0018dbcf8 EFLAGS: 00010206
> [ 156.131880] RAX: 00000000000002a0 RBX: ffff8d0305ef01b0 RCX: 000000000000000b
> [ 156.131888] RDX: ffff8d03772ab924 RSI: ffff8d0305ef01b0 RDI: 00000000000002a0
> [ 156.131895] RBP: ffff9bd0018dbd60 R08: ffff8d03002094d0 R09: 0000000000000000
> [ 156.131901] R10: 000000000000005e R11: 0000000000000065 R12: ffff8d03002094d0
> [ 156.131907] R13: 000000000000001f R14: 0000000000070018 R15: 0000000000000007
> [ 156.131914] FS: 0000000000000000(0000) GS:ffff8d062ed80000(0000) knlGS:0000000000000000
> [ 156.131923] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [ 156.131929] CR2: 00000000000002a0 CR3: 000000001120a005 CR4: 00000000003706e0
> [ 156.131937] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> [ 156.131942] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
> [ 156.131949] Call Trace:
> [ 156.131953] <TASK>
> [ 156.131957] ? trace_event_raw_event_dma_fence+0xcc/0x200
> [ 156.131973] ? ring_buffer_unlock_commit+0x23/0x130
> [ 156.131982] dma_fence_init+0x92/0xb0
> [ 156.131993] amdgpu_fence_emit+0x10d/0x2b0 [amdgpu]
> [ 156.132302] amdgpu_ib_schedule+0x2f9/0x580 [amdgpu]
> [ 156.132586] amdgpu_job_run+0xed/0x220 [amdgpu]
>
> Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
> ---
>
> V1 -> V2: add another amdgpu_fence_ops which is for job-embedded fence.
> V2 -> V3: use amdgpu_fence_driver_clear_job_fences abstract the job fence
> clearing operation.
>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 11 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 126 ++++++++++++++-------
> drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 +-
> 3 files changed, 90 insertions(+), 51 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 5625f7736e37..fecf7a09e5a2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -4456,7 +4456,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
> int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
> struct amdgpu_reset_context *reset_context)
> {
> - int i, j, r = 0;
> + int i, r = 0;
> struct amdgpu_job *job = NULL;
> bool need_full_reset =
> test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
> @@ -4478,15 +4478,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
>
> /*clear job fence from fence drv to avoid force_completion
> *leave NULL and vm flush fence in fence drv */
> - for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
> - struct dma_fence *old, **ptr;
> + amdgpu_fence_driver_clear_job_fences(ring);
>
> - ptr = &ring->fence_drv.fences[j];
> - old = rcu_dereference_protected(*ptr, 1);
> - if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
> - RCU_INIT_POINTER(*ptr, NULL);
> - }
> - }
> /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
> amdgpu_fence_driver_force_completion(ring);
> }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> index 3b7e86ea7167..db41d16838b9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> @@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
> * Cast helper
> */
> static const struct dma_fence_ops amdgpu_fence_ops;
> +static const struct dma_fence_ops amdgpu_job_fence_ops;
> static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
> {
> struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
>
> - if (__f->base.ops == &amdgpu_fence_ops)
> + if (__f->base.ops == &amdgpu_fence_ops ||
> + __f->base.ops == &amdgpu_job_fence_ops)
> return __f;
>
> return NULL;
> @@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
> }
>
> seq = ++ring->fence_drv.sync_seq;
> - if (job != NULL && job->job_run_counter) {
> + if (job && job->job_run_counter) {
> /* reinit seq for resubmitted jobs */
> fence->seqno = seq;
> } else {
> - dma_fence_init(fence, &amdgpu_fence_ops,
> - &ring->fence_drv.lock,
> - adev->fence_context + ring->idx,
> - seq);
> - }
> -
> - if (job != NULL) {
> - /* mark this fence has a parent job */
> - set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
> + if (job)
> + dma_fence_init(fence, &amdgpu_job_fence_ops,
> + &ring->fence_drv.lock,
> + adev->fence_context + ring->idx, seq);
> + else
> + dma_fence_init(fence, &amdgpu_fence_ops,
> + &ring->fence_drv.lock,
> + adev->fence_context + ring->idx, seq);
> }
>
> amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
> @@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
> }
> }
>
> +/**
> + * amdgpu_fence_clear_job_fences - clear job embedded fences of ring
> + *
> + * @ring: fence of the ring to be cleared
> + *
> + */
> +void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
> +{
> + int i;
> + struct dma_fence *old, **ptr;
> +
> + for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
> + ptr = &ring->fence_drv.fences[i];
> + old = rcu_dereference_protected(*ptr, 1);
> + if (old && old->ops == &amdgpu_job_fence_ops)
> + RCU_INIT_POINTER(*ptr, NULL);
> + }
> +}
> +
> /**
> * amdgpu_fence_driver_force_completion - force signal latest fence of ring
> *
> @@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
>
> static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
> {
> - struct amdgpu_ring *ring;
> + return (const char *)to_amdgpu_fence(f)->ring->name;
> +}
>
> - if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
> - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
> +static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
> +{
> + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
>
> - ring = to_amdgpu_ring(job->base.sched);
> - } else {
> - ring = to_amdgpu_fence(f)->ring;
> - }
> - return (const char *)ring->name;
> + return (const char *)to_amdgpu_ring(job->base.sched)->name;
> }
>
> /**
> @@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
> */
> static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
> {
> - struct amdgpu_ring *ring;
> + if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
> + amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
>
> - if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
> - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
> + return true;
> +}
>
> - ring = to_amdgpu_ring(job->base.sched);
> - } else {
> - ring = to_amdgpu_fence(f)->ring;
> - }
> +/**
> + * amdgpu_job_fence_enable_signaling - enable signalling on job fence
> + * @f: fence
> + *
> + * This is the simliar function with amdgpu_fence_enable_signaling above, it
> + * only handles the job embedded fence.
> + */
> +static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
> +{
> + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
>
> - if (!timer_pending(&ring->fence_drv.fallback_timer))
> - amdgpu_fence_schedule_fallback(ring);
> + if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
> + amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
>
> return true;
> }
> @@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
> {
> struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
>
> - if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
> - /* free job if fence has a parent job */
> - struct amdgpu_job *job;
> -
> - job = container_of(f, struct amdgpu_job, hw_fence);
> - kfree(job);
> - } else {
> /* free fence_slab if it's separated fence*/
> - struct amdgpu_fence *fence;
> + kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
> +}
>
> - fence = to_amdgpu_fence(f);
> - kmem_cache_free(amdgpu_fence_slab, fence);
> - }
> +/**
> + * amdgpu_job_fence_free - free up the job with embedded fence
> + *
> + * @rcu: RCU callback head
> + *
> + * Free up the job with embedded fence after the RCU grace period.
> + */
> +static void amdgpu_job_fence_free(struct rcu_head *rcu)
> +{
> + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
> +
> + /* free job if fence has a parent job */
> + kfree(container_of(f, struct amdgpu_job, hw_fence));
> }
>
> /**
> @@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
> call_rcu(&f->rcu, amdgpu_fence_free);
> }
>
> +/**
> + * amdgpu_job_fence_release - callback that job embedded fence can be freed
> + *
> + * @f: fence
> + *
> + * This is the simliar function with amdgpu_fence_release above, it
> + * only handles the job embedded fence.
> + */
> +static void amdgpu_job_fence_release(struct dma_fence *f)
> +{
> + call_rcu(&f->rcu, amdgpu_job_fence_free);
> +}
> +
> static const struct dma_fence_ops amdgpu_fence_ops = {
> .get_driver_name = amdgpu_fence_get_driver_name,
> .get_timeline_name = amdgpu_fence_get_timeline_name,
> @@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
> .release = amdgpu_fence_release,
> };
>
> +static const struct dma_fence_ops amdgpu_job_fence_ops = {
> + .get_driver_name = amdgpu_fence_get_driver_name,
> + .get_timeline_name = amdgpu_job_fence_get_timeline_name,
> + .enable_signaling = amdgpu_job_fence_enable_signaling,
> + .release = amdgpu_job_fence_release,
> +};
>
> /*
> * Fence debugfs
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> index 4d380e79752c..fae7d185ad0d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> @@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
> #define AMDGPU_FENCE_FLAG_INT (1 << 1)
> #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
>
> -/* fence flag bit to indicate the face is embedded in job*/
> -#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
> -
> #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
>
> #define AMDGPU_IB_POOL_SIZE (1024 * 1024)
> @@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
> struct dma_fence **fences;
> };
>
> +void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
> void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
>
> int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v3] drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence
2021-12-15 6:35 [PATCH v3] drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence Huang Rui
2021-12-15 10:57 ` Christian König
@ 2021-12-15 13:30 ` kernel test robot
2021-12-16 4:34 ` Huang Rui
1 sibling, 1 reply; 4+ messages in thread
From: kernel test robot @ 2021-12-15 13:30 UTC (permalink / raw)
To: Huang Rui, dri-devel, Christian König, Daniel Vetter,
Sumit Semwal
Cc: kbuild-all, amd-gfx, linux-media, Alex Deucher, Monk Liu,
Andrey Grodzovsky, Huang Rui
Hi Huang,
I love your patch! Perhaps something to improve:
[auto build test WARNING on drm/drm-next]
[also build test WARNING on drm-intel/for-linux-next drm-tip/drm-tip v5.16-rc5 next-20211214]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Huang-Rui/drm-amdgpu-introduce-new-amdgpu_fence-object-to-indicate-the-job-embedded-fence/20211215-143731
base: git://anongit.freedesktop.org/drm/drm drm-next
config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20211215/202112152115.sqAqnvG7-lkp@intel.com/config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
# https://github.com/0day-ci/linux/commit/a47becf231b123760625c45242e89f5e5b5b4915
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Huang-Rui/drm-amdgpu-introduce-new-amdgpu_fence-object-to-indicate-the-job-embedded-fence/20211215-143731
git checkout a47becf231b123760625c45242e89f5e5b5b4915
# save the config file to linux build tree
mkdir build_dir
make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash drivers/gpu/drm/amd/amdgpu/
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
>> drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c:631: warning: expecting prototype for amdgpu_fence_clear_job_fences(). Prototype was for amdgpu_fence_driver_clear_job_fences() instead
vim +631 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
623
624 /**
625 * amdgpu_fence_clear_job_fences - clear job embedded fences of ring
626 *
627 * @ring: fence of the ring to be cleared
628 *
629 */
630 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
> 631 {
632 int i;
633 struct dma_fence *old, **ptr;
634
635 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
636 ptr = &ring->fence_drv.fences[i];
637 old = rcu_dereference_protected(*ptr, 1);
638 if (old && old->ops == &amdgpu_job_fence_ops)
639 RCU_INIT_POINTER(*ptr, NULL);
640 }
641 }
642
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v3] drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence
2021-12-15 13:30 ` kernel test robot
@ 2021-12-16 4:34 ` Huang Rui
0 siblings, 0 replies; 4+ messages in thread
From: Huang Rui @ 2021-12-16 4:34 UTC (permalink / raw)
To: kernel test robot
Cc: dri-devel@lists.freedesktop.org, Koenig, Christian, Daniel Vetter,
Sumit Semwal, kbuild-all@lists.01.org,
amd-gfx@lists.freedesktop.org, linux-media@vger.kernel.org,
Deucher, Alexander, Liu, Monk, Grodzovsky, Andrey
On Wed, Dec 15, 2021 at 09:30:31PM +0800, kernel test robot wrote:
> Hi Huang,
>
> I love your patch! Perhaps something to improve:
>
> [auto build test WARNING on drm/drm-next]
> [also build test WARNING on drm-intel/for-linux-next drm-tip/drm-tip v5.16-rc5 next-20211214]
> [If your patch is applied to the wrong git tree, kindly drop us a note.
> And when submitting patch, we suggest to use '--base' as documented in
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit-scm.com%2Fdocs%2Fgit-format-patch&data=04%7C01%7Cray.huang%40amd.com%7C57b8f278a2a14c8dab7908d9bfcf2649%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637751718697946000%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&sdata=GHmkohA1RYdTxSAsM%2BGd1QhX%2BOREjXw0xuALuROXd7I%3D&reserved=0]
>
> url: https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgithub.com%2F0day-ci%2Flinux%2Fcommits%2FHuang-Rui%2Fdrm-amdgpu-introduce-new-amdgpu_fence-object-to-indicate-the-job-embedded-fence%2F20211215-143731&data=04%7C01%7Cray.huang%40amd.com%7C57b8f278a2a14c8dab7908d9bfcf2649%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637751718697946000%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&sdata=LqI6BC9bWIb9iq9BKIpIZ8R%2FYB2m4x3OkytvPxundbw%3D&reserved=0
> base: git://anongit.freedesktop.org/drm/drm drm-next
> config: x86_64-allyesconfig (https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fdownload.01.org%2F0day-ci%2Farchive%2F20211215%2F202112152115.sqAqnvG7-lkp%40intel.com%2Fconfig&data=04%7C01%7Cray.huang%40amd.com%7C57b8f278a2a14c8dab7908d9bfcf2649%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637751718697946000%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&sdata=5fJoEJtN2294YWevl8ys2VTHWh1AgaYtDOKuvg3Qi6w%3D&reserved=0)
> compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
> reproduce (this is a W=1 build):
> # https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgithub.com%2F0day-ci%2Flinux%2Fcommit%2Fa47becf231b123760625c45242e89f5e5b5b4915&data=04%7C01%7Cray.huang%40amd.com%7C57b8f278a2a14c8dab7908d9bfcf2649%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637751718697946000%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&sdata=qbMYgGRhW%2BTUqKoJAodp5V2VNybzmekZLCinmUtQhho%3D&reserved=0
> git remote add linux-review https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgithub.com%2F0day-ci%2Flinux&data=04%7C01%7Cray.huang%40amd.com%7C57b8f278a2a14c8dab7908d9bfcf2649%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637751718697946000%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&sdata=eX6JF%2FjinJTZj9CzP4tvTA3Chd8NODf85oNlSdCpq14%3D&reserved=0
> git fetch --no-tags linux-review Huang-Rui/drm-amdgpu-introduce-new-amdgpu_fence-object-to-indicate-the-job-embedded-fence/20211215-143731
> git checkout a47becf231b123760625c45242e89f5e5b5b4915
> # save the config file to linux build tree
> mkdir build_dir
> make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash drivers/gpu/drm/amd/amdgpu/
>
> If you fix the issue, kindly add following tag as appropriate
> Reported-by: kernel test robot <lkp@intel.com>
>
> All warnings (new ones prefixed by >>):
>
> >> drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c:631: warning: expecting prototype for amdgpu_fence_clear_job_fences(). Prototype was for amdgpu_fence_driver_clear_job_fences() instead
>
Nice catch! Thank you. It's my typo and will fix this.
Thanks,
Ray
>
> vim +631 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
>
> 623
> 624 /**
> 625 * amdgpu_fence_clear_job_fences - clear job embedded fences of ring
> 626 *
> 627 * @ring: fence of the ring to be cleared
> 628 *
> 629 */
> 630 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
> > 631 {
> 632 int i;
> 633 struct dma_fence *old, **ptr;
> 634
> 635 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
> 636 ptr = &ring->fence_drv.fences[i];
> 637 old = rcu_dereference_protected(*ptr, 1);
> 638 if (old && old->ops == &amdgpu_job_fence_ops)
> 639 RCU_INIT_POINTER(*ptr, NULL);
> 640 }
> 641 }
> 642
>
> ---
> 0-DAY CI Kernel Test Service, Intel Corporation
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.01.org%2Fhyperkitty%2Flist%2Fkbuild-all%40lists.01.org&data=04%7C01%7Cray.huang%40amd.com%7C57b8f278a2a14c8dab7908d9bfcf2649%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637751718697946000%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000&sdata=KIv5jCARD1lYITkfLGINkMIF93OGw2MAf6yAAs64KXU%3D&reserved=0
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-12-16 4:34 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-12-15 6:35 [PATCH v3] drm/amdgpu: introduce new amdgpu_fence object to indicate the job embedded fence Huang Rui
2021-12-15 10:57 ` Christian König
2021-12-15 13:30 ` kernel test robot
2021-12-16 4:34 ` Huang Rui
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox