* [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages @ 2025-09-24 10:01 Sunil Khatri 2025-09-24 10:01 ` [Patch v2 2/2] drm/amdgpu: move variable declaration to top of amdgpu_cs_parser_bos Sunil Khatri 2025-09-24 16:57 ` [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages Kuehling, Felix 0 siblings, 2 replies; 9+ messages in thread From: Sunil Khatri @ 2025-09-24 10:01 UTC (permalink / raw) To: Christian König, Felix Kuehling, Alex Deucher, amd-gfx; +Cc: Sunil Khatri update the amdgpu_ttm_tt_get_user_pages and all dependent function along with it callers to use a user allocated hmm_range buffer instead hmm layer allocates the buffer. This is a need to get hmm_range pointers easily accessible without accessing the bo and that is a requirement for the userqueue to lock the userptrs effectively. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 ++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 11 +---------- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- 8 files changed, 38 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7c54fe6b0f5d..4babd37712fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return 0; } - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (unlikely(!range)) { + ret = -ENOMEM; + goto unregister_out; + } + + ret = amdgpu_ttm_tt_get_user_pages(bo, range); if (ret) { + kfree(range); if (ret == -EAGAIN) pr_debug("Failed to get user pages, try again\n"); else @@ -2567,9 +2574,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, } } + mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL); + if (unlikely(!mem->range)) + return -ENOMEM; /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); if (ret) { + kfree(mem->range); + mem->range = NULL; pr_debug("Failed %d to get user pages\n", ret); /* Return -EFAULT bad address error as success. It will diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 744e6ff69814..31eea1c7dac3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -884,9 +884,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { bool userpage_invalidated = false; struct amdgpu_bo *bo = e->bo; + e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); + if (unlikely(!e->range)) + return -ENOMEM; + int i; - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); if (r) goto out_free_user_pages; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8524aa55e057..12f0597a3659 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -571,10 +571,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, &range); - if (r) + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (unlikely(!range)) + return -ENOMEM; + r = amdgpu_ttm_tt_get_user_pages(bo, range); + if (r) { + kfree(range); goto release_object; - + } r = amdgpu_bo_reserve(bo, true); if (r) goto user_pages_done; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 2c6a6b858112..53d405a92a14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range) + struct hmm_range *hmm_range) { - struct hmm_range *hmm_range; unsigned long end; unsigned long timeout; unsigned long *pfns; int r = 0; - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); - if (unlikely(!hmm_range)) - return -ENOMEM; - pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); if (unlikely(!pfns)) { r = -ENOMEM; @@ -221,15 +216,11 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, hmm_range->start = start; hmm_range->hmm_pfns = pfns; - *phmm_range = hmm_range; - return 0; out_free_pfns: kvfree(pfns); out_free_range: - kfree(hmm_range); - if (r == -EBUSY) r = -EAGAIN; return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h index 953e1d06de20..c54e3c64251a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -34,7 +34,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range); + struct hmm_range *hmm_range); bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); #if defined(CONFIG_HMM_MIRROR) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 901e0c39a594..046ff2346dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -705,10 +705,11 @@ struct amdgpu_ttm_tt { * memory and start HMM tracking CPU page table update * * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only - * once afterwards to stop HMM tracking + * once afterwards to stop HMM tracking. Its the caller responsibility to ensure + * that range is a valid memory and it is freed too. */ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct hmm_range *range) { struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); @@ -718,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, bool readonly; int r = 0; - /* Make sure get_user_pages_done() can cleanup gracefully */ - *range = NULL; - mm = bo->notifier.mm; if (unlikely(!mm)) { DRM_DEBUG_DRIVER("BO is not registered?\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6ac94469ed40..a8379b925878 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -191,14 +191,14 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range); + struct hmm_range *range); void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, struct hmm_range *range); bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, struct hmm_range *range); #else static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct hmm_range *range) { return -EPERM; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 273f42e3afdd..9f0f14ea93e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } WRITE_ONCE(p->svms.faulting_task, current); + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, readonly, owner, - &hmm_range); + hmm_range); WRITE_ONCE(p->svms.faulting_task, NULL); - if (r) + if (r) { + kfree(hmm_range); pr_debug("failed %d to get svm range pages\n", r); + } } else { r = -EFAULT; } -- 2.34.1 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* [Patch v2 2/2] drm/amdgpu: move variable declaration to top of amdgpu_cs_parser_bos 2025-09-24 10:01 [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages Sunil Khatri @ 2025-09-24 10:01 ` Sunil Khatri 2025-09-24 12:09 ` Christian König 2025-09-24 16:57 ` [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages Kuehling, Felix 1 sibling, 1 reply; 9+ messages in thread From: Sunil Khatri @ 2025-09-24 10:01 UTC (permalink / raw) To: Christian König, Felix Kuehling, Alex Deucher, amd-gfx; +Cc: Sunil Khatri In function amdgpu_cs_parser_bos, declare the variables in the beginning of the function and not during the initialization. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 31eea1c7dac3..a9bdc368c981 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -854,6 +854,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_list_entry *e; struct drm_gem_object *obj; + struct amdgpu_bo *bo; + struct mm_struct *usermm; + bool userpage_invalidated; unsigned long index; unsigned int i; int r; @@ -882,14 +885,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, * amdgpu_ttm_backend_bind() to flush and invalidate new pages */ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - bool userpage_invalidated = false; - struct amdgpu_bo *bo = e->bo; + userpage_invalidated = false; + bo = e->bo; e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); if (unlikely(!e->range)) return -ENOMEM; - int i; - r = amdgpu_ttm_tt_get_user_pages(bo, e->range); if (r) goto out_free_user_pages; @@ -930,8 +931,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct mm_struct *usermm; - usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm); if (usermm && usermm != current->mm) { r = -EPERM; -- 2.34.1 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [Patch v2 2/2] drm/amdgpu: move variable declaration to top of amdgpu_cs_parser_bos 2025-09-24 10:01 ` [Patch v2 2/2] drm/amdgpu: move variable declaration to top of amdgpu_cs_parser_bos Sunil Khatri @ 2025-09-24 12:09 ` Christian König 0 siblings, 0 replies; 9+ messages in thread From: Christian König @ 2025-09-24 12:09 UTC (permalink / raw) To: Sunil Khatri, Felix Kuehling, Alex Deucher, amd-gfx On 24.09.25 12:01, Sunil Khatri wrote: > In function amdgpu_cs_parser_bos, declare the variables in the beginning > of the function and not during the initialization. > > Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 +++++------ > 1 file changed, 5 insertions(+), 6 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > index 31eea1c7dac3..a9bdc368c981 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > @@ -854,6 +854,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, > struct amdgpu_vm *vm = &fpriv->vm; > struct amdgpu_bo_list_entry *e; > struct drm_gem_object *obj; > + struct amdgpu_bo *bo; > + struct mm_struct *usermm; > + bool userpage_invalidated; > unsigned long index; > unsigned int i; > int r; > @@ -882,14 +885,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, > * amdgpu_ttm_backend_bind() to flush and invalidate new pages > */ > amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { > - bool userpage_invalidated = false; > - struct amdgpu_bo *bo = e->bo; > + userpage_invalidated = false; > + bo = e->bo; Those can stay as they are. (Ok there should be an empty line between the declaration of variables and code). > e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); > if (unlikely(!e->range)) > return -ENOMEM; > > - int i; > - But that here looks really fishy. It basically means that we have an "int it" declared in this block which overrides the "unsigned int i" declared. Please make a patch just removing this "int it" here and feel free to just add my rb and push to amd-staging-drm-next. I'm really wondering how the heck that happened? Thanks, Christian. > r = amdgpu_ttm_tt_get_user_pages(bo, e->range); > if (r) > goto out_free_user_pages; > @@ -930,8 +931,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, > } > > amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { > - struct mm_struct *usermm; > - > usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm); > if (usermm && usermm != current->mm) { > r = -EPERM; ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages 2025-09-24 10:01 [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages Sunil Khatri 2025-09-24 10:01 ` [Patch v2 2/2] drm/amdgpu: move variable declaration to top of amdgpu_cs_parser_bos Sunil Khatri @ 2025-09-24 16:57 ` Kuehling, Felix 2025-09-26 10:53 ` Khatri, Sunil 1 sibling, 1 reply; 9+ messages in thread From: Kuehling, Felix @ 2025-09-24 16:57 UTC (permalink / raw) To: Sunil Khatri, Christian König, Alex Deucher, amd-gfx On 2025-09-24 06:01, Sunil Khatri wrote: > update the amdgpu_ttm_tt_get_user_pages and all dependent function > along with it callers to use a user allocated hmm_range buffer instead > hmm layer allocates the buffer. > > This is a need to get hmm_range pointers easily accessible > without accessing the bo and that is a requirement for the > userqueue to lock the userptrs effectively. > > Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> What's the reason for this change? In the current code, the hmm_range is allocated by amdgpu_hmm_range_get_pages and freed by amdgpu_hmm_range_get_pages_done. Your change is breaking that symmetry. Regards, Felix > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 ++++++++++++++-- > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++- > drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- > drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 11 +---------- > drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++----- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++-- > drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- > 8 files changed, 38 insertions(+), 26 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c > index 7c54fe6b0f5d..4babd37712fb 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c > @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, > return 0; > } > > - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); > + range = kzalloc(sizeof(*range), GFP_KERNEL); > + if (unlikely(!range)) { > + ret = -ENOMEM; > + goto unregister_out; > + } > + > + ret = amdgpu_ttm_tt_get_user_pages(bo, range); > if (ret) { > + kfree(range); > if (ret == -EAGAIN) > pr_debug("Failed to get user pages, try again\n"); > else > @@ -2567,9 +2574,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, > } > } > > + mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL); > + if (unlikely(!mem->range)) > + return -ENOMEM; > /* Get updated user pages */ > - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); > + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); > if (ret) { > + kfree(mem->range); > + mem->range = NULL; > pr_debug("Failed %d to get user pages\n", ret); > > /* Return -EFAULT bad address error as success. It will > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > index 744e6ff69814..31eea1c7dac3 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > @@ -884,9 +884,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, > amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { > bool userpage_invalidated = false; > struct amdgpu_bo *bo = e->bo; > + e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); > + if (unlikely(!e->range)) > + return -ENOMEM; > + > int i; > > - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); > + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); > if (r) > goto out_free_user_pages; > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > index 8524aa55e057..12f0597a3659 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > @@ -571,10 +571,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, > goto release_object; > > if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { > - r = amdgpu_ttm_tt_get_user_pages(bo, &range); > - if (r) > + range = kzalloc(sizeof(*range), GFP_KERNEL); > + if (unlikely(!range)) > + return -ENOMEM; > + r = amdgpu_ttm_tt_get_user_pages(bo, range); > + if (r) { > + kfree(range); > goto release_object; > - > + } > r = amdgpu_bo_reserve(bo, true); > if (r) > goto user_pages_done; > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c > index 2c6a6b858112..53d405a92a14 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c > @@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) > int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, > uint64_t start, uint64_t npages, bool readonly, > void *owner, > - struct hmm_range **phmm_range) > + struct hmm_range *hmm_range) > { > - struct hmm_range *hmm_range; > unsigned long end; > unsigned long timeout; > unsigned long *pfns; > int r = 0; > > - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); > - if (unlikely(!hmm_range)) > - return -ENOMEM; > - > pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); > if (unlikely(!pfns)) { > r = -ENOMEM; > @@ -221,15 +216,11 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, > hmm_range->start = start; > hmm_range->hmm_pfns = pfns; > > - *phmm_range = hmm_range; > - > return 0; > > out_free_pfns: > kvfree(pfns); > out_free_range: > - kfree(hmm_range); > - > if (r == -EBUSY) > r = -EAGAIN; > return r; > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h > index 953e1d06de20..c54e3c64251a 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h > @@ -34,7 +34,7 @@ > int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, > uint64_t start, uint64_t npages, bool readonly, > void *owner, > - struct hmm_range **phmm_range); > + struct hmm_range *hmm_range); > bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); > > #if defined(CONFIG_HMM_MIRROR) > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > index 901e0c39a594..046ff2346dab 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > @@ -705,10 +705,11 @@ struct amdgpu_ttm_tt { > * memory and start HMM tracking CPU page table update > * > * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only > - * once afterwards to stop HMM tracking > + * once afterwards to stop HMM tracking. Its the caller responsibility to ensure > + * that range is a valid memory and it is freed too. > */ > int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, > - struct hmm_range **range) > + struct hmm_range *range) > { > struct ttm_tt *ttm = bo->tbo.ttm; > struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); > @@ -718,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, > bool readonly; > int r = 0; > > - /* Make sure get_user_pages_done() can cleanup gracefully */ > - *range = NULL; > - > mm = bo->notifier.mm; > if (unlikely(!mm)) { > DRM_DEBUG_DRIVER("BO is not registered?\n"); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > index 6ac94469ed40..a8379b925878 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > @@ -191,14 +191,14 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); > > #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) > int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, > - struct hmm_range **range); > + struct hmm_range *range); > void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, > struct hmm_range *range); > bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, > struct hmm_range *range); > #else > static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, > - struct hmm_range **range) > + struct hmm_range *range) > { > return -EPERM; > } > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c > index 273f42e3afdd..9f0f14ea93e5 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c > @@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm, > } > > WRITE_ONCE(p->svms.faulting_task, current); > + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); > r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, > readonly, owner, > - &hmm_range); > + hmm_range); > WRITE_ONCE(p->svms.faulting_task, NULL); > - if (r) > + if (r) { > + kfree(hmm_range); > pr_debug("failed %d to get svm range pages\n", r); > + } > } else { > r = -EFAULT; > } ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages 2025-09-24 16:57 ` [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages Kuehling, Felix @ 2025-09-26 10:53 ` Khatri, Sunil 2025-10-01 5:41 ` Kuehling, Felix 2025-10-03 20:23 ` Chen, Xiaogang 0 siblings, 2 replies; 9+ messages in thread From: Khatri, Sunil @ 2025-09-26 10:53 UTC (permalink / raw) To: Kuehling, Felix, Sunil Khatri, Christian König, Alex Deucher, amd-gfx On 9/24/2025 10:27 PM, Kuehling, Felix wrote: > On 2025-09-24 06:01, Sunil Khatri wrote: >> update the amdgpu_ttm_tt_get_user_pages and all dependent function >> along with it callers to use a user allocated hmm_range buffer instead >> hmm layer allocates the buffer. >> >> This is a need to get hmm_range pointers easily accessible >> without accessing the bo and that is a requirement for the >> userqueue to lock the userptrs effectively. >> >> Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> > > What's the reason for this change? In the current code, the hmm_range > is allocated by amdgpu_hmm_range_get_pages and freed by > amdgpu_hmm_range_get_pages_done. Your change is breaking that symmetry. Sorry i missed your comment. For userqueues locking the userptr bos and making sure we have valid userptrs at the time of validation seems too complicated, so along with christian we decided to use hmm_range list instead and have reference to userptr bo and via hmm_range private field to be set to bo. Also i did made sure that wherever we are doing get pages and allocating range the freeing part is taken care of. Specially for freeing the memory is still done by amdgpu_hmm_range_get_pages_done only. Please share if anywhere i missed something. Also Christian brought out the point to have separate alloc/free for hmm_range which i am working on and will share soon. Regards Sunil Khatri > Regards, > Felix > > >> --- >> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 ++++++++++++++-- >> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++- >> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- >> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 11 +---------- >> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 2 +- >> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++----- >> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++-- >> drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- >> 8 files changed, 38 insertions(+), 26 deletions(-) >> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >> index 7c54fe6b0f5d..4babd37712fb 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >> @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem >> *mem, uint64_t user_addr, >> return 0; >> } >> - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); >> + range = kzalloc(sizeof(*range), GFP_KERNEL); >> + if (unlikely(!range)) { >> + ret = -ENOMEM; >> + goto unregister_out; >> + } >> + >> + ret = amdgpu_ttm_tt_get_user_pages(bo, range); >> if (ret) { >> + kfree(range); >> if (ret == -EAGAIN) >> pr_debug("Failed to get user pages, try again\n"); >> else >> @@ -2567,9 +2574,14 @@ static int update_invalid_user_pages(struct >> amdkfd_process_info *process_info, >> } >> } >> + mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL); >> + if (unlikely(!mem->range)) >> + return -ENOMEM; >> /* Get updated user pages */ >> - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); >> + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); >> if (ret) { >> + kfree(mem->range); >> + mem->range = NULL; >> pr_debug("Failed %d to get user pages\n", ret); >> /* Return -EFAULT bad address error as success. It will >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >> index 744e6ff69814..31eea1c7dac3 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >> @@ -884,9 +884,13 @@ static int amdgpu_cs_parser_bos(struct >> amdgpu_cs_parser *p, >> amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { >> bool userpage_invalidated = false; >> struct amdgpu_bo *bo = e->bo; >> + e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); >> + if (unlikely(!e->range)) >> + return -ENOMEM; >> + >> int i; >> - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); >> + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); >> if (r) >> goto out_free_user_pages; >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >> index 8524aa55e057..12f0597a3659 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >> @@ -571,10 +571,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device >> *dev, void *data, >> goto release_object; >> if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { >> - r = amdgpu_ttm_tt_get_user_pages(bo, &range); >> - if (r) >> + range = kzalloc(sizeof(*range), GFP_KERNEL); >> + if (unlikely(!range)) >> + return -ENOMEM; >> + r = amdgpu_ttm_tt_get_user_pages(bo, range); >> + if (r) { >> + kfree(range); >> goto release_object; >> - >> + } >> r = amdgpu_bo_reserve(bo, true); >> if (r) >> goto user_pages_done; >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >> index 2c6a6b858112..53d405a92a14 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >> @@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) >> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, >> uint64_t start, uint64_t npages, bool readonly, >> void *owner, >> - struct hmm_range **phmm_range) >> + struct hmm_range *hmm_range) >> { >> - struct hmm_range *hmm_range; >> unsigned long end; >> unsigned long timeout; >> unsigned long *pfns; >> int r = 0; >> - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >> - if (unlikely(!hmm_range)) >> - return -ENOMEM; >> - >> pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); >> if (unlikely(!pfns)) { >> r = -ENOMEM; >> @@ -221,15 +216,11 @@ int amdgpu_hmm_range_get_pages(struct >> mmu_interval_notifier *notifier, >> hmm_range->start = start; >> hmm_range->hmm_pfns = pfns; >> - *phmm_range = hmm_range; >> - >> return 0; >> out_free_pfns: >> kvfree(pfns); >> out_free_range: >> - kfree(hmm_range); >> - >> if (r == -EBUSY) >> r = -EAGAIN; >> return r; >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >> index 953e1d06de20..c54e3c64251a 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >> @@ -34,7 +34,7 @@ >> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, >> uint64_t start, uint64_t npages, bool readonly, >> void *owner, >> - struct hmm_range **phmm_range); >> + struct hmm_range *hmm_range); >> bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); >> #if defined(CONFIG_HMM_MIRROR) >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >> index 901e0c39a594..046ff2346dab 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >> @@ -705,10 +705,11 @@ struct amdgpu_ttm_tt { >> * memory and start HMM tracking CPU page table update >> * >> * Calling function must call amdgpu_ttm_tt_userptr_range_done() >> once and only >> - * once afterwards to stop HMM tracking >> + * once afterwards to stop HMM tracking. Its the caller >> responsibility to ensure >> + * that range is a valid memory and it is freed too. >> */ >> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >> - struct hmm_range **range) >> + struct hmm_range *range) >> { >> struct ttm_tt *ttm = bo->tbo.ttm; >> struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); >> @@ -718,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo >> *bo, >> bool readonly; >> int r = 0; >> - /* Make sure get_user_pages_done() can cleanup gracefully */ >> - *range = NULL; >> - >> mm = bo->notifier.mm; >> if (unlikely(!mm)) { >> DRM_DEBUG_DRIVER("BO is not registered?\n"); >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >> index 6ac94469ed40..a8379b925878 100644 >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >> @@ -191,14 +191,14 @@ uint64_t amdgpu_ttm_domain_start(struct >> amdgpu_device *adev, uint32_t type); >> #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) >> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >> - struct hmm_range **range); >> + struct hmm_range *range); >> void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, >> struct hmm_range *range); >> bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, >> struct hmm_range *range); >> #else >> static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >> - struct hmm_range **range) >> + struct hmm_range *range) >> { >> return -EPERM; >> } >> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >> index 273f42e3afdd..9f0f14ea93e5 100644 >> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >> @@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct >> mm_struct *mm, >> } >> WRITE_ONCE(p->svms.faulting_task, current); >> + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >> r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, >> npages, >> readonly, owner, >> - &hmm_range); >> + hmm_range); >> WRITE_ONCE(p->svms.faulting_task, NULL); >> - if (r) >> + if (r) { >> + kfree(hmm_range); >> pr_debug("failed %d to get svm range pages\n", r); >> + } >> } else { >> r = -EFAULT; >> } ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages 2025-09-26 10:53 ` Khatri, Sunil @ 2025-10-01 5:41 ` Kuehling, Felix 2025-10-01 7:32 ` Christian König 2025-10-03 20:23 ` Chen, Xiaogang 1 sibling, 1 reply; 9+ messages in thread From: Kuehling, Felix @ 2025-10-01 5:41 UTC (permalink / raw) To: Khatri, Sunil, Sunil Khatri, Christian König, Alex Deucher, amd-gfx On 2025-09-26 06:53, Khatri, Sunil wrote: > > On 9/24/2025 10:27 PM, Kuehling, Felix wrote: >> On 2025-09-24 06:01, Sunil Khatri wrote: >>> update the amdgpu_ttm_tt_get_user_pages and all dependent function >>> along with it callers to use a user allocated hmm_range buffer instead >>> hmm layer allocates the buffer. >>> >>> This is a need to get hmm_range pointers easily accessible >>> without accessing the bo and that is a requirement for the >>> userqueue to lock the userptrs effectively. >>> >>> Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> >> >> What's the reason for this change? In the current code, the hmm_range >> is allocated by amdgpu_hmm_range_get_pages and freed by >> amdgpu_hmm_range_get_pages_done. Your change is breaking that symmetry. > Sorry i missed your comment. For userqueues locking the userptr bos > and making sure we have valid userptrs at the time of validation seems > too complicated, so along with christian we decided to use hmm_range > list instead and have reference to userptr bo and via hmm_range > private field to be set to bo. I don't think this will work. See my reply to your other code review. hmm_range->dev_private_owner is not a device-private field that you can use to store random driver-data associated with the HMM range. Regards, Felix > > Also i did made sure that wherever we are doing get pages and > allocating range the freeing part is taken care of. Specially for > freeing the memory is still done by amdgpu_hmm_range_get_pages_done > only. Please share if anywhere i missed something. Also Christian > brought out the point to have separate alloc/free for hmm_range which > i am working on and will share soon. > > Regards > Sunil Khatri > >> Regards, >> Felix >> >> >>> --- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 >>> ++++++++++++++-- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 11 +---------- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 2 +- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++----- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++-- >>> drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- >>> 8 files changed, 38 insertions(+), 26 deletions(-) >>> >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> index 7c54fe6b0f5d..4babd37712fb 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem >>> *mem, uint64_t user_addr, >>> return 0; >>> } >>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); >>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>> + if (unlikely(!range)) { >>> + ret = -ENOMEM; >>> + goto unregister_out; >>> + } >>> + >>> + ret = amdgpu_ttm_tt_get_user_pages(bo, range); >>> if (ret) { >>> + kfree(range); >>> if (ret == -EAGAIN) >>> pr_debug("Failed to get user pages, try again\n"); >>> else >>> @@ -2567,9 +2574,14 @@ static int update_invalid_user_pages(struct >>> amdkfd_process_info *process_info, >>> } >>> } >>> + mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL); >>> + if (unlikely(!mem->range)) >>> + return -ENOMEM; >>> /* Get updated user pages */ >>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); >>> + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); >>> if (ret) { >>> + kfree(mem->range); >>> + mem->range = NULL; >>> pr_debug("Failed %d to get user pages\n", ret); >>> /* Return -EFAULT bad address error as success. It will >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> index 744e6ff69814..31eea1c7dac3 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> @@ -884,9 +884,13 @@ static int amdgpu_cs_parser_bos(struct >>> amdgpu_cs_parser *p, >>> amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { >>> bool userpage_invalidated = false; >>> struct amdgpu_bo *bo = e->bo; >>> + e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); >>> + if (unlikely(!e->range)) >>> + return -ENOMEM; >>> + >>> int i; >>> - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); >>> + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); >>> if (r) >>> goto out_free_user_pages; >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> index 8524aa55e057..12f0597a3659 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> @@ -571,10 +571,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device >>> *dev, void *data, >>> goto release_object; >>> if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { >>> - r = amdgpu_ttm_tt_get_user_pages(bo, &range); >>> - if (r) >>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>> + if (unlikely(!range)) >>> + return -ENOMEM; >>> + r = amdgpu_ttm_tt_get_user_pages(bo, range); >>> + if (r) { >>> + kfree(range); >>> goto release_object; >>> - >>> + } >>> r = amdgpu_bo_reserve(bo, true); >>> if (r) >>> goto user_pages_done; >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> index 2c6a6b858112..53d405a92a14 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> @@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) >>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier >>> *notifier, >>> uint64_t start, uint64_t npages, bool readonly, >>> void *owner, >>> - struct hmm_range **phmm_range) >>> + struct hmm_range *hmm_range) >>> { >>> - struct hmm_range *hmm_range; >>> unsigned long end; >>> unsigned long timeout; >>> unsigned long *pfns; >>> int r = 0; >>> - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>> - if (unlikely(!hmm_range)) >>> - return -ENOMEM; >>> - >>> pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); >>> if (unlikely(!pfns)) { >>> r = -ENOMEM; >>> @@ -221,15 +216,11 @@ int amdgpu_hmm_range_get_pages(struct >>> mmu_interval_notifier *notifier, >>> hmm_range->start = start; >>> hmm_range->hmm_pfns = pfns; >>> - *phmm_range = hmm_range; >>> - >>> return 0; >>> out_free_pfns: >>> kvfree(pfns); >>> out_free_range: >>> - kfree(hmm_range); >>> - >>> if (r == -EBUSY) >>> r = -EAGAIN; >>> return r; >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> index 953e1d06de20..c54e3c64251a 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> @@ -34,7 +34,7 @@ >>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier >>> *notifier, >>> uint64_t start, uint64_t npages, bool readonly, >>> void *owner, >>> - struct hmm_range **phmm_range); >>> + struct hmm_range *hmm_range); >>> bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); >>> #if defined(CONFIG_HMM_MIRROR) >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> index 901e0c39a594..046ff2346dab 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> @@ -705,10 +705,11 @@ struct amdgpu_ttm_tt { >>> * memory and start HMM tracking CPU page table update >>> * >>> * Calling function must call amdgpu_ttm_tt_userptr_range_done() >>> once and only >>> - * once afterwards to stop HMM tracking >>> + * once afterwards to stop HMM tracking. Its the caller >>> responsibility to ensure >>> + * that range is a valid memory and it is freed too. >>> */ >>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>> - struct hmm_range **range) >>> + struct hmm_range *range) >>> { >>> struct ttm_tt *ttm = bo->tbo.ttm; >>> struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); >>> @@ -718,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct >>> amdgpu_bo *bo, >>> bool readonly; >>> int r = 0; >>> - /* Make sure get_user_pages_done() can cleanup gracefully */ >>> - *range = NULL; >>> - >>> mm = bo->notifier.mm; >>> if (unlikely(!mm)) { >>> DRM_DEBUG_DRIVER("BO is not registered?\n"); >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> index 6ac94469ed40..a8379b925878 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> @@ -191,14 +191,14 @@ uint64_t amdgpu_ttm_domain_start(struct >>> amdgpu_device *adev, uint32_t type); >>> #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) >>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>> - struct hmm_range **range); >>> + struct hmm_range *range); >>> void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, >>> struct hmm_range *range); >>> bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, >>> struct hmm_range *range); >>> #else >>> static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>> - struct hmm_range **range) >>> + struct hmm_range *range) >>> { >>> return -EPERM; >>> } >>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> index 273f42e3afdd..9f0f14ea93e5 100644 >>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> @@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct >>> mm_struct *mm, >>> } >>> WRITE_ONCE(p->svms.faulting_task, current); >>> + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>> r = amdgpu_hmm_range_get_pages(&prange->notifier, >>> addr, npages, >>> readonly, owner, >>> - &hmm_range); >>> + hmm_range); >>> WRITE_ONCE(p->svms.faulting_task, NULL); >>> - if (r) >>> + if (r) { >>> + kfree(hmm_range); >>> pr_debug("failed %d to get svm range pages\n", r); >>> + } >>> } else { >>> r = -EFAULT; >>> } ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages 2025-10-01 5:41 ` Kuehling, Felix @ 2025-10-01 7:32 ` Christian König 0 siblings, 0 replies; 9+ messages in thread From: Christian König @ 2025-10-01 7:32 UTC (permalink / raw) To: Kuehling, Felix, Khatri, Sunil, Sunil Khatri, Alex Deucher, amd-gfx On 01.10.25 07:41, Kuehling, Felix wrote: > On 2025-09-26 06:53, Khatri, Sunil wrote: >> >> On 9/24/2025 10:27 PM, Kuehling, Felix wrote: >>> On 2025-09-24 06:01, Sunil Khatri wrote: >>>> update the amdgpu_ttm_tt_get_user_pages and all dependent function >>>> along with it callers to use a user allocated hmm_range buffer instead >>>> hmm layer allocates the buffer. >>>> >>>> This is a need to get hmm_range pointers easily accessible >>>> without accessing the bo and that is a requirement for the >>>> userqueue to lock the userptrs effectively. >>>> >>>> Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> >>> >>> What's the reason for this change? In the current code, the hmm_range is allocated by amdgpu_hmm_range_get_pages and freed by amdgpu_hmm_range_get_pages_done. Your change is breaking that symmetry. >> Sorry i missed your comment. For userqueues locking the userptr bos and making sure we have valid userptrs at the time of validation seems too complicated, so along with christian we decided to use hmm_range list instead and have reference to userptr bo and via hmm_range private field to be set to bo. > > I don't think this will work. See my reply to your other code review. hmm_range->dev_private_owner is not a device-private field that you can use to store random driver-data associated with the HMM range. Ah! Thanks for that very valuable information. I thought it was just a void* drivers could use for their purpose. Mhm, that makes the implementation even more complicated that it already is. Regards, Christian. > > Regards, > Felix > > >> >> Also i did made sure that wherever we are doing get pages and allocating range the freeing part is taken care of. Specially for freeing the memory is still done by amdgpu_hmm_range_get_pages_done only. Please share if anywhere i missed something. Also Christian brought out the point to have separate alloc/free for hmm_range which i am working on and will share soon. >> >> Regards >> Sunil Khatri >> >>> Regards, >>> Felix >>> >>> >>>> --- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 ++++++++++++++-- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 11 +---------- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 2 +- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++----- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++-- >>>> drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- >>>> 8 files changed, 38 insertions(+), 26 deletions(-) >>>> >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>>> index 7c54fe6b0f5d..4babd37712fb 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>>> @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, >>>> return 0; >>>> } >>>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); >>>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>>> + if (unlikely(!range)) { >>>> + ret = -ENOMEM; >>>> + goto unregister_out; >>>> + } >>>> + >>>> + ret = amdgpu_ttm_tt_get_user_pages(bo, range); >>>> if (ret) { >>>> + kfree(range); >>>> if (ret == -EAGAIN) >>>> pr_debug("Failed to get user pages, try again\n"); >>>> else >>>> @@ -2567,9 +2574,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, >>>> } >>>> } >>>> + mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL); >>>> + if (unlikely(!mem->range)) >>>> + return -ENOMEM; >>>> /* Get updated user pages */ >>>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); >>>> + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); >>>> if (ret) { >>>> + kfree(mem->range); >>>> + mem->range = NULL; >>>> pr_debug("Failed %d to get user pages\n", ret); >>>> /* Return -EFAULT bad address error as success. It will >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>>> index 744e6ff69814..31eea1c7dac3 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>>> @@ -884,9 +884,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, >>>> amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { >>>> bool userpage_invalidated = false; >>>> struct amdgpu_bo *bo = e->bo; >>>> + e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); >>>> + if (unlikely(!e->range)) >>>> + return -ENOMEM; >>>> + >>>> int i; >>>> - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); >>>> + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); >>>> if (r) >>>> goto out_free_user_pages; >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>>> index 8524aa55e057..12f0597a3659 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>>> @@ -571,10 +571,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, >>>> goto release_object; >>>> if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { >>>> - r = amdgpu_ttm_tt_get_user_pages(bo, &range); >>>> - if (r) >>>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>>> + if (unlikely(!range)) >>>> + return -ENOMEM; >>>> + r = amdgpu_ttm_tt_get_user_pages(bo, range); >>>> + if (r) { >>>> + kfree(range); >>>> goto release_object; >>>> - >>>> + } >>>> r = amdgpu_bo_reserve(bo, true); >>>> if (r) >>>> goto user_pages_done; >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>>> index 2c6a6b858112..53d405a92a14 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>>> @@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) >>>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, >>>> uint64_t start, uint64_t npages, bool readonly, >>>> void *owner, >>>> - struct hmm_range **phmm_range) >>>> + struct hmm_range *hmm_range) >>>> { >>>> - struct hmm_range *hmm_range; >>>> unsigned long end; >>>> unsigned long timeout; >>>> unsigned long *pfns; >>>> int r = 0; >>>> - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>>> - if (unlikely(!hmm_range)) >>>> - return -ENOMEM; >>>> - >>>> pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); >>>> if (unlikely(!pfns)) { >>>> r = -ENOMEM; >>>> @@ -221,15 +216,11 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, >>>> hmm_range->start = start; >>>> hmm_range->hmm_pfns = pfns; >>>> - *phmm_range = hmm_range; >>>> - >>>> return 0; >>>> out_free_pfns: >>>> kvfree(pfns); >>>> out_free_range: >>>> - kfree(hmm_range); >>>> - >>>> if (r == -EBUSY) >>>> r = -EAGAIN; >>>> return r; >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>>> index 953e1d06de20..c54e3c64251a 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>>> @@ -34,7 +34,7 @@ >>>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, >>>> uint64_t start, uint64_t npages, bool readonly, >>>> void *owner, >>>> - struct hmm_range **phmm_range); >>>> + struct hmm_range *hmm_range); >>>> bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); >>>> #if defined(CONFIG_HMM_MIRROR) >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>> index 901e0c39a594..046ff2346dab 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>> @@ -705,10 +705,11 @@ struct amdgpu_ttm_tt { >>>> * memory and start HMM tracking CPU page table update >>>> * >>>> * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only >>>> - * once afterwards to stop HMM tracking >>>> + * once afterwards to stop HMM tracking. Its the caller responsibility to ensure >>>> + * that range is a valid memory and it is freed too. >>>> */ >>>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>>> - struct hmm_range **range) >>>> + struct hmm_range *range) >>>> { >>>> struct ttm_tt *ttm = bo->tbo.ttm; >>>> struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); >>>> @@ -718,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>>> bool readonly; >>>> int r = 0; >>>> - /* Make sure get_user_pages_done() can cleanup gracefully */ >>>> - *range = NULL; >>>> - >>>> mm = bo->notifier.mm; >>>> if (unlikely(!mm)) { >>>> DRM_DEBUG_DRIVER("BO is not registered?\n"); >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>> index 6ac94469ed40..a8379b925878 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>> @@ -191,14 +191,14 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); >>>> #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) >>>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>>> - struct hmm_range **range); >>>> + struct hmm_range *range); >>>> void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, >>>> struct hmm_range *range); >>>> bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, >>>> struct hmm_range *range); >>>> #else >>>> static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>>> - struct hmm_range **range) >>>> + struct hmm_range *range) >>>> { >>>> return -EPERM; >>>> } >>>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>>> index 273f42e3afdd..9f0f14ea93e5 100644 >>>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>>> @@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm, >>>> } >>>> WRITE_ONCE(p->svms.faulting_task, current); >>>> + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>>> r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, >>>> readonly, owner, >>>> - &hmm_range); >>>> + hmm_range); >>>> WRITE_ONCE(p->svms.faulting_task, NULL); >>>> - if (r) >>>> + if (r) { >>>> + kfree(hmm_range); >>>> pr_debug("failed %d to get svm range pages\n", r); >>>> + } >>>> } else { >>>> r = -EFAULT; >>>> } ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages 2025-09-26 10:53 ` Khatri, Sunil 2025-10-01 5:41 ` Kuehling, Felix @ 2025-10-03 20:23 ` Chen, Xiaogang 2025-10-06 10:35 ` Khatri, Sunil 1 sibling, 1 reply; 9+ messages in thread From: Chen, Xiaogang @ 2025-10-03 20:23 UTC (permalink / raw) To: Khatri, Sunil, Kuehling, Felix, Sunil Khatri, Christian König, Alex Deucher, amd-gfx On 9/26/2025 5:53 AM, Khatri, Sunil wrote: > > On 9/24/2025 10:27 PM, Kuehling, Felix wrote: >> On 2025-09-24 06:01, Sunil Khatri wrote: >>> update the amdgpu_ttm_tt_get_user_pages and all dependent function >>> along with it callers to use a user allocated hmm_range buffer instead >>> hmm layer allocates the buffer. >>> >>> This is a need to get hmm_range pointers easily accessible >>> without accessing the bo and that is a requirement for the >>> userqueue to lock the userptrs effectively. >>> >>> Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> >> >> What's the reason for this change? In the current code, the hmm_range >> is allocated by amdgpu_hmm_range_get_pages and freed by >> amdgpu_hmm_range_get_pages_done. Your change is breaking that symmetry. > Sorry i missed your comment. For userqueues locking the userptr bos > and making sure we have valid userptrs at the time of validation seems > too complicated, so along with christian we decided to use hmm_range > list instead and have reference to userptr bo and via hmm_range > private field to be set to bo. > > Also i did made sure that wherever we are doing get pages and > allocating range the freeing part is taken care of. Specially for > freeing the memory is still done by amdgpu_hmm_range_get_pages_done > only. Please share if anywhere i missed something. Also Christian > brought out the point to have separate alloc/free for hmm_range which > i am working on and will share soon. This patch has other components to allocate hmm_range, freed it at amdgpu_hmm_range_get_pages_done. This inconsistency makes other components handle error case awkward. It is better to let other component free hmm_range no matter amdgpu_hmm_range_get_pages success or not. And amdgpu_hmm_range_get_pages(done) alloc/free hmm_range->hmm_pfns. That would be easy to understand and have less chance to make mistake. Regards Xiaogang > > Regards > Sunil Khatri > >> Regards, >> Felix >> >> >>> --- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 >>> ++++++++++++++-- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 11 +---------- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 2 +- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++----- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++-- >>> drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- >>> 8 files changed, 38 insertions(+), 26 deletions(-) >>> >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> index 7c54fe6b0f5d..4babd37712fb 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>> @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem >>> *mem, uint64_t user_addr, >>> return 0; >>> } >>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); >>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>> + if (unlikely(!range)) { >>> + ret = -ENOMEM; >>> + goto unregister_out; >>> + } >>> + >>> + ret = amdgpu_ttm_tt_get_user_pages(bo, range); >>> if (ret) { >>> + kfree(range); >>> if (ret == -EAGAIN) >>> pr_debug("Failed to get user pages, try again\n"); >>> else >>> @@ -2567,9 +2574,14 @@ static int update_invalid_user_pages(struct >>> amdkfd_process_info *process_info, >>> } >>> } >>> + mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL); >>> + if (unlikely(!mem->range)) >>> + return -ENOMEM; >>> /* Get updated user pages */ >>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); >>> + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); >>> if (ret) { >>> + kfree(mem->range); >>> + mem->range = NULL; >>> pr_debug("Failed %d to get user pages\n", ret); >>> /* Return -EFAULT bad address error as success. It will >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> index 744e6ff69814..31eea1c7dac3 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> @@ -884,9 +884,13 @@ static int amdgpu_cs_parser_bos(struct >>> amdgpu_cs_parser *p, >>> amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { >>> bool userpage_invalidated = false; >>> struct amdgpu_bo *bo = e->bo; >>> + e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); >>> + if (unlikely(!e->range)) >>> + return -ENOMEM; >>> + >>> int i; >>> - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); >>> + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); >>> if (r) >>> goto out_free_user_pages; >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> index 8524aa55e057..12f0597a3659 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> @@ -571,10 +571,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device >>> *dev, void *data, >>> goto release_object; >>> if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { >>> - r = amdgpu_ttm_tt_get_user_pages(bo, &range); >>> - if (r) >>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>> + if (unlikely(!range)) >>> + return -ENOMEM; >>> + r = amdgpu_ttm_tt_get_user_pages(bo, range); >>> + if (r) { >>> + kfree(range); >>> goto release_object; >>> - >>> + } >>> r = amdgpu_bo_reserve(bo, true); >>> if (r) >>> goto user_pages_done; >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> index 2c6a6b858112..53d405a92a14 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>> @@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) >>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier >>> *notifier, >>> uint64_t start, uint64_t npages, bool readonly, >>> void *owner, >>> - struct hmm_range **phmm_range) >>> + struct hmm_range *hmm_range) >>> { >>> - struct hmm_range *hmm_range; >>> unsigned long end; >>> unsigned long timeout; >>> unsigned long *pfns; >>> int r = 0; >>> - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>> - if (unlikely(!hmm_range)) >>> - return -ENOMEM; >>> - >>> pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); >>> if (unlikely(!pfns)) { >>> r = -ENOMEM; >>> @@ -221,15 +216,11 @@ int amdgpu_hmm_range_get_pages(struct >>> mmu_interval_notifier *notifier, >>> hmm_range->start = start; >>> hmm_range->hmm_pfns = pfns; >>> - *phmm_range = hmm_range; >>> - >>> return 0; >>> out_free_pfns: >>> kvfree(pfns); >>> out_free_range: >>> - kfree(hmm_range); >>> - >>> if (r == -EBUSY) >>> r = -EAGAIN; >>> return r; >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> index 953e1d06de20..c54e3c64251a 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>> @@ -34,7 +34,7 @@ >>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier >>> *notifier, >>> uint64_t start, uint64_t npages, bool readonly, >>> void *owner, >>> - struct hmm_range **phmm_range); >>> + struct hmm_range *hmm_range); >>> bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); >>> #if defined(CONFIG_HMM_MIRROR) >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> index 901e0c39a594..046ff2346dab 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>> @@ -705,10 +705,11 @@ struct amdgpu_ttm_tt { >>> * memory and start HMM tracking CPU page table update >>> * >>> * Calling function must call amdgpu_ttm_tt_userptr_range_done() >>> once and only >>> - * once afterwards to stop HMM tracking >>> + * once afterwards to stop HMM tracking. Its the caller >>> responsibility to ensure >>> + * that range is a valid memory and it is freed too. >>> */ >>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>> - struct hmm_range **range) >>> + struct hmm_range *range) >>> { >>> struct ttm_tt *ttm = bo->tbo.ttm; >>> struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); >>> @@ -718,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct >>> amdgpu_bo *bo, >>> bool readonly; >>> int r = 0; >>> - /* Make sure get_user_pages_done() can cleanup gracefully */ >>> - *range = NULL; >>> - >>> mm = bo->notifier.mm; >>> if (unlikely(!mm)) { >>> DRM_DEBUG_DRIVER("BO is not registered?\n"); >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> index 6ac94469ed40..a8379b925878 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>> @@ -191,14 +191,14 @@ uint64_t amdgpu_ttm_domain_start(struct >>> amdgpu_device *adev, uint32_t type); >>> #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) >>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>> - struct hmm_range **range); >>> + struct hmm_range *range); >>> void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, >>> struct hmm_range *range); >>> bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, >>> struct hmm_range *range); >>> #else >>> static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>> - struct hmm_range **range) >>> + struct hmm_range *range) >>> { >>> return -EPERM; >>> } >>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> index 273f42e3afdd..9f0f14ea93e5 100644 >>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>> @@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct >>> mm_struct *mm, >>> } >>> WRITE_ONCE(p->svms.faulting_task, current); >>> + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>> r = amdgpu_hmm_range_get_pages(&prange->notifier, >>> addr, npages, >>> readonly, owner, >>> - &hmm_range); >>> + hmm_range); >>> WRITE_ONCE(p->svms.faulting_task, NULL); >>> - if (r) >>> + if (r) { >>> + kfree(hmm_range); >>> pr_debug("failed %d to get svm range pages\n", r); >>> + } >>> } else { >>> r = -EFAULT; >>> } ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages 2025-10-03 20:23 ` Chen, Xiaogang @ 2025-10-06 10:35 ` Khatri, Sunil 0 siblings, 0 replies; 9+ messages in thread From: Khatri, Sunil @ 2025-10-06 10:35 UTC (permalink / raw) To: Chen, Xiaogang, Kuehling, Felix, Sunil Khatri, Christian König, Alex Deucher, amd-gfx On 10/4/2025 1:53 AM, Chen, Xiaogang wrote: > > On 9/26/2025 5:53 AM, Khatri, Sunil wrote: >> >> On 9/24/2025 10:27 PM, Kuehling, Felix wrote: >>> On 2025-09-24 06:01, Sunil Khatri wrote: >>>> update the amdgpu_ttm_tt_get_user_pages and all dependent function >>>> along with it callers to use a user allocated hmm_range buffer instead >>>> hmm layer allocates the buffer. >>>> >>>> This is a need to get hmm_range pointers easily accessible >>>> without accessing the bo and that is a requirement for the >>>> userqueue to lock the userptrs effectively. >>>> >>>> Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> >>> >>> What's the reason for this change? In the current code, the >>> hmm_range is allocated by amdgpu_hmm_range_get_pages and freed by >>> amdgpu_hmm_range_get_pages_done. Your change is breaking that symmetry. >> Sorry i missed your comment. For userqueues locking the userptr bos >> and making sure we have valid userptrs at the time of validation >> seems too complicated, so along with christian we decided to use >> hmm_range list instead and have reference to userptr bo and via >> hmm_range private field to be set to bo. >> >> Also i did made sure that wherever we are doing get pages and >> allocating range the freeing part is taken care of. Specially for >> freeing the memory is still done by amdgpu_hmm_range_get_pages_done >> only. Please share if anywhere i missed something. Also Christian >> brought out the point to have separate alloc/free for hmm_range which >> i am working on and will share soon. > > This patch has other components to allocate hmm_range, freed it at > amdgpu_hmm_range_get_pages_done. This inconsistency makes other > components handle error case awkward. It is better to let other > component free hmm_range no matter amdgpu_hmm_range_get_pages success > or not. And amdgpu_hmm_range_get_pages(done) alloc/free > hmm_range->hmm_pfns. That would be easy to understand and have less > chance to make mistake. Yes, that inconsistency is there and that is to be fixed is taken care by having separate alloc/free functions and not use amdgpu_hmm_range_get_pages_done and some more clean up done in other patch since this code is already merged. Regards Sunil khatri > > Regards > > Xiaogang > >> >> Regards >> Sunil Khatri >> >>> Regards, >>> Felix >>> >>> >>>> --- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 >>>> ++++++++++++++-- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 11 +---------- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 2 +- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++----- >>>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++-- >>>> drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- >>>> 8 files changed, 38 insertions(+), 26 deletions(-) >>>> >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>>> index 7c54fe6b0f5d..4babd37712fb 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c >>>> @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem >>>> *mem, uint64_t user_addr, >>>> return 0; >>>> } >>>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); >>>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>>> + if (unlikely(!range)) { >>>> + ret = -ENOMEM; >>>> + goto unregister_out; >>>> + } >>>> + >>>> + ret = amdgpu_ttm_tt_get_user_pages(bo, range); >>>> if (ret) { >>>> + kfree(range); >>>> if (ret == -EAGAIN) >>>> pr_debug("Failed to get user pages, try again\n"); >>>> else >>>> @@ -2567,9 +2574,14 @@ static int update_invalid_user_pages(struct >>>> amdkfd_process_info *process_info, >>>> } >>>> } >>>> + mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL); >>>> + if (unlikely(!mem->range)) >>>> + return -ENOMEM; >>>> /* Get updated user pages */ >>>> - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); >>>> + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); >>>> if (ret) { >>>> + kfree(mem->range); >>>> + mem->range = NULL; >>>> pr_debug("Failed %d to get user pages\n", ret); >>>> /* Return -EFAULT bad address error as success. It >>>> will >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>>> index 744e6ff69814..31eea1c7dac3 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>>> @@ -884,9 +884,13 @@ static int amdgpu_cs_parser_bos(struct >>>> amdgpu_cs_parser *p, >>>> amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { >>>> bool userpage_invalidated = false; >>>> struct amdgpu_bo *bo = e->bo; >>>> + e->range = kzalloc(sizeof(*e->range), GFP_KERNEL); >>>> + if (unlikely(!e->range)) >>>> + return -ENOMEM; >>>> + >>>> int i; >>>> - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); >>>> + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); >>>> if (r) >>>> goto out_free_user_pages; >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>>> index 8524aa55e057..12f0597a3659 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>>> @@ -571,10 +571,14 @@ int amdgpu_gem_userptr_ioctl(struct >>>> drm_device *dev, void *data, >>>> goto release_object; >>>> if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { >>>> - r = amdgpu_ttm_tt_get_user_pages(bo, &range); >>>> - if (r) >>>> + range = kzalloc(sizeof(*range), GFP_KERNEL); >>>> + if (unlikely(!range)) >>>> + return -ENOMEM; >>>> + r = amdgpu_ttm_tt_get_user_pages(bo, range); >>>> + if (r) { >>>> + kfree(range); >>>> goto release_object; >>>> - >>>> + } >>>> r = amdgpu_bo_reserve(bo, true); >>>> if (r) >>>> goto user_pages_done; >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>>> index 2c6a6b858112..53d405a92a14 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c >>>> @@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) >>>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier >>>> *notifier, >>>> uint64_t start, uint64_t npages, bool readonly, >>>> void *owner, >>>> - struct hmm_range **phmm_range) >>>> + struct hmm_range *hmm_range) >>>> { >>>> - struct hmm_range *hmm_range; >>>> unsigned long end; >>>> unsigned long timeout; >>>> unsigned long *pfns; >>>> int r = 0; >>>> - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>>> - if (unlikely(!hmm_range)) >>>> - return -ENOMEM; >>>> - >>>> pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); >>>> if (unlikely(!pfns)) { >>>> r = -ENOMEM; >>>> @@ -221,15 +216,11 @@ int amdgpu_hmm_range_get_pages(struct >>>> mmu_interval_notifier *notifier, >>>> hmm_range->start = start; >>>> hmm_range->hmm_pfns = pfns; >>>> - *phmm_range = hmm_range; >>>> - >>>> return 0; >>>> out_free_pfns: >>>> kvfree(pfns); >>>> out_free_range: >>>> - kfree(hmm_range); >>>> - >>>> if (r == -EBUSY) >>>> r = -EAGAIN; >>>> return r; >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>>> index 953e1d06de20..c54e3c64251a 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h >>>> @@ -34,7 +34,7 @@ >>>> int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier >>>> *notifier, >>>> uint64_t start, uint64_t npages, bool readonly, >>>> void *owner, >>>> - struct hmm_range **phmm_range); >>>> + struct hmm_range *hmm_range); >>>> bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); >>>> #if defined(CONFIG_HMM_MIRROR) >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>> index 901e0c39a594..046ff2346dab 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c >>>> @@ -705,10 +705,11 @@ struct amdgpu_ttm_tt { >>>> * memory and start HMM tracking CPU page table update >>>> * >>>> * Calling function must call amdgpu_ttm_tt_userptr_range_done() >>>> once and only >>>> - * once afterwards to stop HMM tracking >>>> + * once afterwards to stop HMM tracking. Its the caller >>>> responsibility to ensure >>>> + * that range is a valid memory and it is freed too. >>>> */ >>>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>>> - struct hmm_range **range) >>>> + struct hmm_range *range) >>>> { >>>> struct ttm_tt *ttm = bo->tbo.ttm; >>>> struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); >>>> @@ -718,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct >>>> amdgpu_bo *bo, >>>> bool readonly; >>>> int r = 0; >>>> - /* Make sure get_user_pages_done() can cleanup gracefully */ >>>> - *range = NULL; >>>> - >>>> mm = bo->notifier.mm; >>>> if (unlikely(!mm)) { >>>> DRM_DEBUG_DRIVER("BO is not registered?\n"); >>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>> index 6ac94469ed40..a8379b925878 100644 >>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h >>>> @@ -191,14 +191,14 @@ uint64_t amdgpu_ttm_domain_start(struct >>>> amdgpu_device *adev, uint32_t type); >>>> #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) >>>> int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>>> - struct hmm_range **range); >>>> + struct hmm_range *range); >>>> void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, >>>> struct hmm_range *range); >>>> bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, >>>> struct hmm_range *range); >>>> #else >>>> static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, >>>> - struct hmm_range **range) >>>> + struct hmm_range *range) >>>> { >>>> return -EPERM; >>>> } >>>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>>> b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>>> index 273f42e3afdd..9f0f14ea93e5 100644 >>>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c >>>> @@ -1737,12 +1737,15 @@ static int >>>> svm_range_validate_and_map(struct mm_struct *mm, >>>> } >>>> WRITE_ONCE(p->svms.faulting_task, current); >>>> + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); >>>> r = amdgpu_hmm_range_get_pages(&prange->notifier, >>>> addr, npages, >>>> readonly, owner, >>>> - &hmm_range); >>>> + hmm_range); >>>> WRITE_ONCE(p->svms.faulting_task, NULL); >>>> - if (r) >>>> + if (r) { >>>> + kfree(hmm_range); >>>> pr_debug("failed %d to get svm range pages\n", r); >>>> + } >>>> } else { >>>> r = -EFAULT; >>>> } ^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2025-10-06 10:36 UTC | newest] Thread overview: 9+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2025-09-24 10:01 [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages Sunil Khatri 2025-09-24 10:01 ` [Patch v2 2/2] drm/amdgpu: move variable declaration to top of amdgpu_cs_parser_bos Sunil Khatri 2025-09-24 12:09 ` Christian König 2025-09-24 16:57 ` [Patch v2 1/2] drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages Kuehling, Felix 2025-09-26 10:53 ` Khatri, Sunil 2025-10-01 5:41 ` Kuehling, Felix 2025-10-01 7:32 ` Christian König 2025-10-03 20:23 ` Chen, Xiaogang 2025-10-06 10:35 ` Khatri, Sunil
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox