* [PATCH v11] Add CRIU support for amdgpu dmabuf
@ 2025-08-07 20:22 David Francis
2025-08-07 20:22 ` [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process David Francis
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: David Francis @ 2025-08-07 20:22 UTC (permalink / raw)
To: amd-gfx
Cc: tvrtko.ursulin, Felix.Kuehling, David.YatSin, Chris.Freehill,
Christian.Koenig, dcostantino, sruffell, mripard, tzimmermann,
Alexander.Deucher
This patch series adds support for CRIU checkpointing of processes that
share memory with the amdgpu dmabuf interface.
This v11 fixes some locking and formatting issues identified by Christian
Accompanying CRIU changes:
https://github.com/checkpoint-restore/criu/pull/2613
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process
2025-08-07 20:22 [PATCH v11] Add CRIU support for amdgpu dmabuf David Francis
@ 2025-08-07 20:22 ` David Francis
2025-08-08 9:37 ` Christian König
2025-08-11 9:58 ` Tvrtko Ursulin
2025-08-07 20:22 ` [PATCH v11 2/3] drm/amdgpu: Add mapping info option for GEM_OP ioctl David Francis
2025-08-07 20:22 ` [PATCH v11 3/3] drm/amdgpu: Allow kfd CRIU with no buffer objects David Francis
2 siblings, 2 replies; 7+ messages in thread
From: David Francis @ 2025-08-07 20:22 UTC (permalink / raw)
To: amd-gfx
Cc: tvrtko.ursulin, Felix.Kuehling, David.YatSin, Chris.Freehill,
Christian.Koenig, dcostantino, sruffell, mripard, tzimmermann,
Alexander.Deucher, David Francis
Add new ioctl DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES.
This ioctl returns a list of bos with their handles, sizes,
and flags and domains.
This ioctl is meant to be used during CRIU checkpoint and
provide information needed to reconstruct the bos
in CRIU restore.
Signed-off-by: David Francis <David.Francis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 +
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 82 +++++++++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 2 +
include/uapi/drm/amdgpu_drm.h | 33 ++++++++++
4 files changed, 118 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4ff3a2eaaf55..f19795dddf9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -3031,6 +3031,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e3f65977eeee..7f55e3b7d8a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -1032,6 +1032,88 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
return r;
}
+/**
+ * drm_amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_bos is set as an input to the size of the bo_buckets array.
+ * num_bos is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+ args->num_entries = num_bos;
+ if (num_bos == 0)
+ return 0;
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries) {
+ ret = -ENOMEM;
+ goto exit_free;
+ }
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EINVAL;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & (~AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE);
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ if (!ret) {
+ ret = copy_to_user((void __user *)args->entries, bo_entries, num_bos * sizeof(*bo_entries));
+ if (ret) {
+ pr_debug("Failed to copy BO information to user\n");
+ ret = -EFAULT;
+ }
+ }
+exit_free:
+ kvfree(bo_entries);
+
+ return ret;
+}
+
+
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
int width,
int cpp,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index b51e8f95ee86..7cdb6237bb92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -67,6 +67,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index bdedbaccf776..218a179818d4 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -57,6 +57,7 @@ extern "C" {
#define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18
+#define DRM_AMDGPU_GEM_LIST_HANDLES 0x19
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -77,6 +78,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
+#define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles)
/**
* DOC: memory domains
@@ -811,6 +813,37 @@ struct drm_amdgpu_gem_op {
__u64 value;
};
+#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
+
+struct drm_amdgpu_gem_list_handles {
+ /* User pointer to array of drm_amdgpu_gem_bo_info_entry */
+ __u64 entries;
+
+ /* IN: Size of entries buffer. OUT: Number of handles in process (if larger than size of buffer, must retry) */
+ __u32 num_entries;
+
+ __u32 padding;
+};
+
+struct drm_amdgpu_gem_list_handles_entry {
+ /* gem handle of buffer object */
+ __u32 gem_handle;
+
+ /* Pending how to handle this; provides information needed to remake the buffer on restore */
+ __u32 preferred_domains;
+
+ /* Size of bo */
+ __u64 size;
+
+ /* GEM_CREATE flags for re-creation of buffer */
+ __u64 alloc_flags;
+
+ /* Currently just one flag: IS_IMPORT */
+ __u32 flags;
+
+ __u32 padding;
+};
+
#define AMDGPU_VA_OP_MAP 1
#define AMDGPU_VA_OP_UNMAP 2
#define AMDGPU_VA_OP_CLEAR 3
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v11 2/3] drm/amdgpu: Add mapping info option for GEM_OP ioctl
2025-08-07 20:22 [PATCH v11] Add CRIU support for amdgpu dmabuf David Francis
2025-08-07 20:22 ` [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process David Francis
@ 2025-08-07 20:22 ` David Francis
2025-08-11 11:14 ` Tvrtko Ursulin
2025-08-07 20:22 ` [PATCH v11 3/3] drm/amdgpu: Allow kfd CRIU with no buffer objects David Francis
2 siblings, 1 reply; 7+ messages in thread
From: David Francis @ 2025-08-07 20:22 UTC (permalink / raw)
To: amd-gfx
Cc: tvrtko.ursulin, Felix.Kuehling, David.YatSin, Chris.Freehill,
Christian.Koenig, dcostantino, sruffell, mripard, tzimmermann,
Alexander.Deucher, David Francis
Add new GEM_OP_IOCTL option GET_MAPPING_INFO, which
returns a list of mappings associated with a given bo, along with
their positions and offsets.
Signed-off-by: David Francis <David.Francis@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 96 +++++++++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 ++
include/uapi/drm/amdgpu_drm.h | 18 +++++
3 files changed, 119 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7f55e3b7d8a2..29ee1df37661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -956,6 +956,98 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return r;
}
+/**
+ * amdgpu_gem_list_mappings - get information about a buffer's mappings
+ *
+ * @gobj: gem object
+ * @args: gem_op arguments
+ * @fpriv: drm file pointer
+ *
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_bucket stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+static int amdgpu_gem_list_mappings(struct drm_gem_object *gobj, struct amdgpu_fpriv *fpriv,
+ struct drm_amdgpu_gem_op *args)
+{
+ struct amdgpu_vm *avm = &fpriv->vm;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(avm, bo);
+ struct drm_amdgpu_gem_vm_bucket *vm_buckets;
+ struct amdgpu_bo_va_mapping *mapping;
+ struct drm_exec exec;
+ int num_mappings = 0;
+ int ret;
+
+ vm_buckets = kvcalloc(args->num_entries, sizeof(*vm_buckets), GFP_KERNEL);
+ if (!vm_buckets) {
+ ret = -ENOMEM;
+ goto free_vms;
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ if (gobj) {
+ ret = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ goto unlock_exec;
+ }
+
+ ret = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ goto unlock_exec;
+ }
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_buckets[num_mappings].start = mapping->start;
+ vm_buckets[num_mappings].last = mapping->last;
+ vm_buckets[num_mappings].offset = mapping->offset;
+ vm_buckets[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_buckets[num_mappings].start = mapping->start;
+ vm_buckets[num_mappings].last = mapping->last;
+ vm_buckets[num_mappings].offset = mapping->offset;
+ vm_buckets[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries) {
+ ret = copy_to_user((void __user *)args->value, vm_buckets, num_mappings * sizeof(*vm_buckets));
+ if (ret) {
+ pr_debug("Failed to copy BO information to user\n");
+ ret = -EFAULT;
+ }
+ }
+ args->num_entries = num_mappings;
+
+ kvfree(vm_buckets);
+
+ return ret;
+unlock_exec:
+ drm_exec_fini(&exec);
+free_vms:
+ kvfree(vm_buckets);
+
+ return ret;
+}
+
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -1022,6 +1114,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO:
+ amdgpu_bo_unreserve(robj);
+ r = amdgpu_gem_list_mappings(gobj, filp->driver_priv, args);
+ break;
default:
amdgpu_bo_unreserve(robj);
r = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f9549f6b3d1f..5a63ae490b0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -668,4 +668,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &bo_va->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &bo_va->invalids, list)
+
#endif
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 218a179818d4..6b857f528823 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -802,6 +802,21 @@ union drm_amdgpu_wait_fences {
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
+#define AMDGPU_GEM_OP_GET_MAPPING_INFO 2
+
+struct drm_amdgpu_gem_vm_bucket {
+ /* Start of mapping (in number of pages) */
+ __u64 start;
+
+ /* End of mapping (in number of pages) */
+ __u64 last;
+
+ /* Mapping offset */
+ __u64 offset;
+
+ /* flags needed to recreate mapping */
+ __u64 flags;
+};
/* Sets or returns a value associated with a buffer. */
struct drm_amdgpu_gem_op {
@@ -811,6 +826,9 @@ struct drm_amdgpu_gem_op {
__u32 op;
/** Input or return value */
__u64 value;
+ /** For MAPPING_INFO op: number of mappings (in/out) */
+ __u32 num_entries;
+ __u32 padding;
};
#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v11 3/3] drm/amdgpu: Allow kfd CRIU with no buffer objects
2025-08-07 20:22 [PATCH v11] Add CRIU support for amdgpu dmabuf David Francis
2025-08-07 20:22 ` [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process David Francis
2025-08-07 20:22 ` [PATCH v11 2/3] drm/amdgpu: Add mapping info option for GEM_OP ioctl David Francis
@ 2025-08-07 20:22 ` David Francis
2 siblings, 0 replies; 7+ messages in thread
From: David Francis @ 2025-08-07 20:22 UTC (permalink / raw)
To: amd-gfx
Cc: tvrtko.ursulin, Felix.Kuehling, David.YatSin, Chris.Freehill,
Christian.Koenig, dcostantino, sruffell, mripard, tzimmermann,
Alexander.Deucher, David Francis, Felix Kuehling
The kfd CRIU checkpoint ioctl would return an error if trying
to checkpoint a process with no kfd buffer objects.
This is a normal case and should not be an error.
Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
Signed-off-by: David Francis <David.Francis@amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 828a9ceef1e7..f7f34b710d3e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -2566,8 +2566,8 @@ static int criu_restore(struct file *filep,
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
- if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
- !args->num_devices || !args->num_bos)
+ if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data ||
+ !args->priv_data_size || !args->num_devices)
return -EINVAL;
mutex_lock(&p->mutex);
--
2.34.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process
2025-08-07 20:22 ` [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process David Francis
@ 2025-08-08 9:37 ` Christian König
2025-08-11 9:58 ` Tvrtko Ursulin
1 sibling, 0 replies; 7+ messages in thread
From: Christian König @ 2025-08-08 9:37 UTC (permalink / raw)
To: David Francis, amd-gfx
Cc: tvrtko.ursulin, Felix.Kuehling, David.YatSin, Chris.Freehill,
dcostantino, sruffell, mripard, tzimmermann, Alexander.Deucher
On 07.08.25 22:22, David Francis wrote:
> Add new ioctl DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES.
>
> This ioctl returns a list of bos with their handles, sizes,
> and flags and domains.
>
> This ioctl is meant to be used during CRIU checkpoint and
> provide information needed to reconstruct the bos
> in CRIU restore.
>
> Signed-off-by: David Francis <David.Francis@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 +
> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 82 +++++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 2 +
> include/uapi/drm/amdgpu_drm.h | 33 ++++++++++
> 4 files changed, 118 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 4ff3a2eaaf55..f19795dddf9d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -3031,6 +3031,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
> DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> };
>
> static const struct drm_driver amdgpu_kms_driver = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index e3f65977eeee..7f55e3b7d8a2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -1032,6 +1032,88 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
> return r;
> }
>
> +/**
> + * drm_amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
> + *
> + * @dev: drm device pointer
> + * @data: drm_amdgpu_gem_list_handles
> + * @filp: drm file pointer
> + *
> + * num_bos is set as an input to the size of the bo_buckets array.
> + * num_bos is sent back as output as the number of bos in the process.
> + * If that number is larger than the size of the array, the ioctl must
> + * be retried.
> + *
> + * Returns:
> + * 0 for success, -errno for errors.
> + */
> +int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *filp)
> +{
> + struct drm_amdgpu_gem_list_handles *args = data;
> + struct drm_amdgpu_gem_list_handles_entry *bo_entries;
> + struct drm_gem_object *gobj;
> + int id, ret = 0;
> + int bo_index = 0;
> + int num_bos = 0;
> +
> + spin_lock(&filp->table_lock);
> + idr_for_each_entry(&filp->object_idr, gobj, id)
> + num_bos += 1;
> + spin_unlock(&filp->table_lock);
> +
> + if (args->num_entries < num_bos) {
> + args->num_entries = num_bos;
> + return 0;
> + }
> + args->num_entries = num_bos;
> + if (num_bos == 0)
> + return 0;
> +
> + bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
> + if (!bo_entries) {
> + ret = -ENOMEM;
> + goto exit_free;
> + }
> +
> + spin_lock(&filp->table_lock);
> + idr_for_each_entry(&filp->object_idr, gobj, id) {
> + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
> + struct drm_amdgpu_gem_list_handles_entry *bo_entry;
> +
> + if (bo_index >= num_bos) {
> + ret = -EINVAL;
> + break;
spin_unlock() + goto exit_free.
Apart from that looks good to me. Feel free to add my rb, but I think Felix and others might want to take a look as well.
Regards,
Christian.
> + }
> +
> + bo_entry = &bo_entries[bo_index];
> +
> + bo_entry->size = amdgpu_bo_size(bo);
> + bo_entry->alloc_flags = bo->flags & (~AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE);
> + bo_entry->preferred_domains = bo->preferred_domains;
> + bo_entry->gem_handle = id;
> +
> + if (bo->tbo.base.import_attach)
> + bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
> +
> + bo_index += 1;
> + }
> + spin_unlock(&filp->table_lock);
> +
> + if (!ret) {
> + ret = copy_to_user((void __user *)args->entries, bo_entries, num_bos * sizeof(*bo_entries));
> + if (ret) {
> + pr_debug("Failed to copy BO information to user\n");
> + ret = -EFAULT;
> + }
> + }
> +exit_free:
> + kvfree(bo_entries);
> +
> + return ret;
> +}
> +
> +
> static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
> int width,
> int cpp,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
> index b51e8f95ee86..7cdb6237bb92 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
> @@ -67,6 +67,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> +int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *filp);
>
> int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index bdedbaccf776..218a179818d4 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -57,6 +57,7 @@ extern "C" {
> #define DRM_AMDGPU_USERQ 0x16
> #define DRM_AMDGPU_USERQ_SIGNAL 0x17
> #define DRM_AMDGPU_USERQ_WAIT 0x18
> +#define DRM_AMDGPU_GEM_LIST_HANDLES 0x19
>
> #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
> #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
> @@ -77,6 +78,7 @@ extern "C" {
> #define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
> #define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
> #define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
> +#define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles)
>
> /**
> * DOC: memory domains
> @@ -811,6 +813,37 @@ struct drm_amdgpu_gem_op {
> __u64 value;
> };
>
> +#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
> +
> +struct drm_amdgpu_gem_list_handles {
> + /* User pointer to array of drm_amdgpu_gem_bo_info_entry */
> + __u64 entries;
> +
> + /* IN: Size of entries buffer. OUT: Number of handles in process (if larger than size of buffer, must retry) */
> + __u32 num_entries;
> +
> + __u32 padding;
> +};
> +
> +struct drm_amdgpu_gem_list_handles_entry {
> + /* gem handle of buffer object */
> + __u32 gem_handle;
> +
> + /* Pending how to handle this; provides information needed to remake the buffer on restore */
> + __u32 preferred_domains;
> +
> + /* Size of bo */
> + __u64 size;
> +
> + /* GEM_CREATE flags for re-creation of buffer */
> + __u64 alloc_flags;
> +
> + /* Currently just one flag: IS_IMPORT */
> + __u32 flags;
> +
> + __u32 padding;
> +};
> +
> #define AMDGPU_VA_OP_MAP 1
> #define AMDGPU_VA_OP_UNMAP 2
> #define AMDGPU_VA_OP_CLEAR 3
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process
2025-08-07 20:22 ` [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process David Francis
2025-08-08 9:37 ` Christian König
@ 2025-08-11 9:58 ` Tvrtko Ursulin
1 sibling, 0 replies; 7+ messages in thread
From: Tvrtko Ursulin @ 2025-08-11 9:58 UTC (permalink / raw)
To: David Francis, amd-gfx
Cc: Felix.Kuehling, David.YatSin, Chris.Freehill, Christian.Koenig,
dcostantino, sruffell, mripard, tzimmermann, Alexander.Deucher
On 07/08/2025 21:22, David Francis wrote:
> Add new ioctl DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES.
>
> This ioctl returns a list of bos with their handles, sizes,
> and flags and domains.
>
> This ioctl is meant to be used during CRIU checkpoint and
> provide information needed to reconstruct the bos
> in CRIU restore.
>
> Signed-off-by: David Francis <David.Francis@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 +
> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 82 +++++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 2 +
> include/uapi/drm/amdgpu_drm.h | 33 ++++++++++
> 4 files changed, 118 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 4ff3a2eaaf55..f19795dddf9d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -3031,6 +3031,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
> DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
> };
>
> static const struct drm_driver amdgpu_kms_driver = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index e3f65977eeee..7f55e3b7d8a2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -1032,6 +1032,88 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
> return r;
> }
>
> +/**
> + * drm_amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
> + *
> + * @dev: drm device pointer
> + * @data: drm_amdgpu_gem_list_handles
> + * @filp: drm file pointer
> + *
> + * num_bos is set as an input to the size of the bo_buckets array.
> + * num_bos is sent back as output as the number of bos in the process.
> + * If that number is larger than the size of the array, the ioctl must
> + * be retried.
> + *
> + * Returns:
> + * 0 for success, -errno for errors.
> + */
> +int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *filp)
> +{
> + struct drm_amdgpu_gem_list_handles *args = data;
> + struct drm_amdgpu_gem_list_handles_entry *bo_entries;
> + struct drm_gem_object *gobj;
> + int id, ret = 0;
> + int bo_index = 0;
> + int num_bos = 0;
> +
> + spin_lock(&filp->table_lock);
> + idr_for_each_entry(&filp->object_idr, gobj, id)
> + num_bos += 1;
> + spin_unlock(&filp->table_lock);
> +
> + if (args->num_entries < num_bos) {
> + args->num_entries = num_bos;
> + return 0;
> + }
> + args->num_entries = num_bos;
Looks like authoritative number is only obtained in the second iteration
so I think it should be assigned only there.
At least the uapi doc does not suggest ("OUT: Number of handles in
process (if larger than size of buffer, must retry)" that the count can
be larger than the actual number of handles, and that handles must also
be tested for being non-zero. If that is the intention it works as is
but needs to be documented in the uapi.
> + if (num_bos == 0)
> + return 0;
> +
> + bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
> + if (!bo_entries) {
> + ret = -ENOMEM;
> + goto exit_free;
return -ENOMEM is enough here.
> + }
> +
> + spin_lock(&filp->table_lock);
> + idr_for_each_entry(&filp->object_idr, gobj, id) {
> + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
> + struct drm_amdgpu_gem_list_handles_entry *bo_entry;
> +
> + if (bo_index >= num_bos) {
> + ret = -EINVAL;
> + break;
> + }
> +
> + bo_entry = &bo_entries[bo_index];
> +
> + bo_entry->size = amdgpu_bo_size(bo);
> + bo_entry->alloc_flags = bo->flags & (~AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE);
Why is this flag special and why it is okay to not restore it?
> + bo_entry->preferred_domains = bo->preferred_domains;
> + bo_entry->gem_handle = id;
> +
> + if (bo->tbo.base.import_attach)
> + bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
Imported ones are only informative (since they will need a completely
different path to restore)? But it is required to list them? Do you have
the restore side implemented for reference?
> +
> + bo_index += 1;
> + }
> + spin_unlock(&filp->table_lock);
> +
> + if (!ret) {
> + ret = copy_to_user((void __user *)args->entries, bo_entries, num_bos * sizeof(*bo_entries));
u64_to_user_ptr()
> + if (ret) {
> + pr_debug("Failed to copy BO information to user\n");
I would be tempted to remove this pr_debug since -EFAULT is pretty clear
anyway. In which case this whole if (ret) block seems superflous.
> + ret = -EFAULT;
> + }
> + }
> +exit_free:
> + kvfree(bo_entries);
> +
> + return ret;
> +}
> +
> +
> static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
> int width,
> int cpp,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
> index b51e8f95ee86..7cdb6237bb92 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
> @@ -67,6 +67,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> +int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *filp);
>
> int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp);
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index bdedbaccf776..218a179818d4 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -57,6 +57,7 @@ extern "C" {
> #define DRM_AMDGPU_USERQ 0x16
> #define DRM_AMDGPU_USERQ_SIGNAL 0x17
> #define DRM_AMDGPU_USERQ_WAIT 0x18
> +#define DRM_AMDGPU_GEM_LIST_HANDLES 0x19
>
> #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
> #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
> @@ -77,6 +78,7 @@ extern "C" {
> #define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
> #define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
> #define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
> +#define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles)
>
> /**
> * DOC: memory domains
> @@ -811,6 +813,37 @@ struct drm_amdgpu_gem_op {
> __u64 value;
> };
>
> +#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
> +
> +struct drm_amdgpu_gem_list_handles {
> + /* User pointer to array of drm_amdgpu_gem_bo_info_entry */
> + __u64 entries;
> +
> + /* IN: Size of entries buffer. OUT: Number of handles in process (if larger than size of buffer, must retry) */
> + __u32 num_entries;
> +
> + __u32 padding;
> +};
> +
> +struct drm_amdgpu_gem_list_handles_entry {
> + /* gem handle of buffer object */
> + __u32 gem_handle;
> +
> + /* Pending how to handle this; provides information needed to remake the buffer on restore */
> + __u32 preferred_domains;
Hm, just as an observation, the width for these is not very well defined
in the uapi. Yes flags are just a few, but for example
drm_amdgpu_gem_create_in has them as u64 and also value via
AMDGPU_GEM_OP_SET_PLACEMENT. So maybe future proofing to __u64 wouldn't
harm here. It would also remove the need to pad the structure.
Regards,
Tvrtko
> +
> + /* Size of bo */
> + __u64 size;
> +
> + /* GEM_CREATE flags for re-creation of buffer */
> + __u64 alloc_flags;
> +
> + /* Currently just one flag: IS_IMPORT */
> + __u32 flags;
> +
> + __u32 padding;
> +};
> +
> #define AMDGPU_VA_OP_MAP 1
> #define AMDGPU_VA_OP_UNMAP 2
> #define AMDGPU_VA_OP_CLEAR 3
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v11 2/3] drm/amdgpu: Add mapping info option for GEM_OP ioctl
2025-08-07 20:22 ` [PATCH v11 2/3] drm/amdgpu: Add mapping info option for GEM_OP ioctl David Francis
@ 2025-08-11 11:14 ` Tvrtko Ursulin
0 siblings, 0 replies; 7+ messages in thread
From: Tvrtko Ursulin @ 2025-08-11 11:14 UTC (permalink / raw)
To: David Francis, amd-gfx
Cc: Felix.Kuehling, David.YatSin, Chris.Freehill, Christian.Koenig,
dcostantino, sruffell, mripard, tzimmermann, Alexander.Deucher
On 07/08/2025 21:22, David Francis wrote:
> Add new GEM_OP_IOCTL option GET_MAPPING_INFO, which
> returns a list of mappings associated with a given bo, along with
> their positions and offsets.
>
> Signed-off-by: David Francis <David.Francis@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 96 +++++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 ++
> include/uapi/drm/amdgpu_drm.h | 18 +++++
> 3 files changed, 119 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 7f55e3b7d8a2..29ee1df37661 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -956,6 +956,98 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
> return r;
> }
>
> +/**
> + * amdgpu_gem_list_mappings - get information about a buffer's mappings
> + *
> + * @gobj: gem object
> + * @args: gem_op arguments
> + * @fpriv: drm file pointer
> + *
> + * num_entries is set as an input to the size of the user-allocated array of
> + * drm_amdgpu_gem_vm_bucket stored at args->value.
> + * num_entries is sent back as output as the number of mappings the bo has.
> + * If that number is larger than the size of the array, the ioctl must
> + * be retried.
> + *
> + * Returns:
> + * 0 for success, -errno for errors.
> + */
> +static int amdgpu_gem_list_mappings(struct drm_gem_object *gobj, struct amdgpu_fpriv *fpriv,
> + struct drm_amdgpu_gem_op *args)
> +{
> + struct amdgpu_vm *avm = &fpriv->vm;
> + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
> + struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(avm, bo);
> + struct drm_amdgpu_gem_vm_bucket *vm_buckets;
> + struct amdgpu_bo_va_mapping *mapping;
> + struct drm_exec exec;
> + int num_mappings = 0;
> + int ret;
> +
> + vm_buckets = kvcalloc(args->num_entries, sizeof(*vm_buckets), GFP_KERNEL);
> + if (!vm_buckets) {
> + ret = -ENOMEM;
> + goto free_vms;
return -ENOMEM would be enough.
> + }
> +
> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
> + DRM_EXEC_IGNORE_DUPLICATES, 0);
> + drm_exec_until_all_locked(&exec) {
> + if (gobj) {
> + ret = drm_exec_lock_obj(&exec, gobj);
> + drm_exec_retry_on_contention(&exec);
> + if (ret)
> + goto unlock_exec;
> + }
> +
> + ret = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
What are the two reserved fence slots for?
> + drm_exec_retry_on_contention(&exec);
> + if (ret)
> + goto unlock_exec;
> + }
> +
> + amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
> + if (num_mappings < args->num_entries) {
> + vm_buckets[num_mappings].start = mapping->start;
> + vm_buckets[num_mappings].last = mapping->last;
> + vm_buckets[num_mappings].offset = mapping->offset;
> + vm_buckets[num_mappings].flags = mapping->flags;
> + }
> + num_mappings += 1;
> + }
> +
> + amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
> + if (num_mappings < args->num_entries) {
> + vm_buckets[num_mappings].start = mapping->start;
> + vm_buckets[num_mappings].last = mapping->last;
> + vm_buckets[num_mappings].offset = mapping->offset;
> + vm_buckets[num_mappings].flags = mapping->flags;
> + }
> + num_mappings += 1;
> + }
> +
> + drm_exec_fini(&exec);
> +
> + if (num_mappings > 0 && num_mappings <= args->num_entries) {
> + ret = copy_to_user((void __user *)args->value, vm_buckets, num_mappings * sizeof(*vm_buckets));
> + if (ret) {
> + pr_debug("Failed to copy BO information to user\n");
> + ret = -EFAULT;
> + }
Same comment as in the first patch - u64_to_user_pointer and I'd
consider dropping the pr_debug path.
> + }
> + args->num_entries = num_mappings;
> +
> + kvfree(vm_buckets);
> +
> + return ret;
> +unlock_exec:
> + drm_exec_fini(&exec);
> +free_vms:
> + kvfree(vm_buckets);
> +
> + return ret;
I think the success/failure exit paths could be consolidated:
amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
...
}
args->num_entries = num_mappings;
if (num_mappings > 0 && num_mappings <= args->num_entries)
ret = copy_to_user(...);
unlock_exec:
drm_exec_fini(&exec);
kvfree(vm_buckets);
return ret;
Regards,
Tvrtko
> +}
> +
> int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
> struct drm_file *filp)
> {
> @@ -1022,6 +1114,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
>
> amdgpu_bo_unreserve(robj);
> break;
> + case AMDGPU_GEM_OP_GET_MAPPING_INFO:
> + amdgpu_bo_unreserve(robj);
> + r = amdgpu_gem_list_mappings(gobj, filp->driver_priv, args);
> + break;
> default:
> amdgpu_bo_unreserve(robj);
> r = -EINVAL;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index f9549f6b3d1f..5a63ae490b0e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -668,4 +668,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
> struct amdgpu_vm *vm,
> struct dma_fence **fence);
>
> +#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
> + list_for_each_entry(mapping, &bo_va->valids, list)
> +#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
> + list_for_each_entry(mapping, &bo_va->invalids, list)
> +
> #endif
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 218a179818d4..6b857f528823 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -802,6 +802,21 @@ union drm_amdgpu_wait_fences {
>
> #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
> #define AMDGPU_GEM_OP_SET_PLACEMENT 1
> +#define AMDGPU_GEM_OP_GET_MAPPING_INFO 2
> +
> +struct drm_amdgpu_gem_vm_bucket {
> + /* Start of mapping (in number of pages) */
> + __u64 start;
> +
> + /* End of mapping (in number of pages) */
> + __u64 last;
> +
> + /* Mapping offset */
> + __u64 offset;
> +
> + /* flags needed to recreate mapping */
> + __u64 flags;
> +};
>
> /* Sets or returns a value associated with a buffer. */
> struct drm_amdgpu_gem_op {
> @@ -811,6 +826,9 @@ struct drm_amdgpu_gem_op {
> __u32 op;
> /** Input or return value */
> __u64 value;
> + /** For MAPPING_INFO op: number of mappings (in/out) */
> + __u32 num_entries;
> + __u32 padding;
> };
>
> #define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2025-08-11 11:14 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-07 20:22 [PATCH v11] Add CRIU support for amdgpu dmabuf David Francis
2025-08-07 20:22 ` [PATCH v11 1/3] drm/amdgpu: Add ioctl to get all gem handles for a process David Francis
2025-08-08 9:37 ` Christian König
2025-08-11 9:58 ` Tvrtko Ursulin
2025-08-07 20:22 ` [PATCH v11 2/3] drm/amdgpu: Add mapping info option for GEM_OP ioctl David Francis
2025-08-11 11:14 ` Tvrtko Ursulin
2025-08-07 20:22 ` [PATCH v11 3/3] drm/amdgpu: Allow kfd CRIU with no buffer objects David Francis
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).