From: Jonathan Cavitt <jonathan.cavitt@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: saurabhg.gupta@intel.com, alex.zuo@intel.com,
jonathan.cavitt@intel.com, joonas.lahtinen@linux.intel.com,
matthew.brost@intel.com, jianxun.zhang@intel.com,
dri-devel@lists.freedesktop.org
Subject: [PATCH v2 8/8] drm/xe/xe_vm: Implement xe_vm_get_property_ioctl
Date: Thu, 27 Feb 2025 19:14:57 +0000 [thread overview]
Message-ID: <20250227191457.84035-9-jonathan.cavitt@intel.com> (raw)
In-Reply-To: <20250227191457.84035-1-jonathan.cavitt@intel.com>
Add support for userspace to get various properties from a specified VM.
The currently supported properties are:
- The number of engine resets the VM has observed
- The number of exec queue bans the VM has observed, up to the last 50
relevant ones, in total.
- The number of exec queue bans the VM has observed, up to the last 50
relevant ones, that were caused by faults.
The latter two requests also include information on the exec queue bans
themselves, such as the ID of the banned exec queue and, when relevant,
the faulting address, address type, and address precision.
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
---
drivers/gpu/drm/xe/xe_device.c | 3 +
drivers/gpu/drm/xe/xe_vm.c | 102 +++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_vm.h | 2 +
3 files changed, 107 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 9454b51f7ad8..43accae152ff 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -193,6 +193,9 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_GET_PROPERTY, xe_vm_get_property_ioctl,
+ DRM_RENDER_ALLOW),
+
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 3e88652670e6..8ac54aaca51a 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3258,6 +3258,108 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return err;
}
+static u32 xe_vm_get_property_size(struct xe_vm *vm, u32 property)
+{
+ u32 size = 0;
+
+ switch (property) {
+ case DRM_XE_VM_GET_PROPERTY_FAULTS:
+ struct xe_exec_queue_ban_entry *entry;
+
+ spin_lock(&vm->bans.lock);
+ list_for_each_entry(entry, &vm->bans.list, list) {
+ struct xe_pagefault *pf = entry->pf;
+
+ size += pf ? sizeof(struct drm_xe_ban) : 0;
+ }
+ spin_unlock(&vm->bans.lock);
+ return size;
+ case DRM_XE_VM_GET_PROPERTY_BANS:
+ spin_lock(&vm->bans.lock);
+ size = vm->bans.len * sizeof(struct drm_xe_ban);
+ spin_unlock(&vm->bans.lock);
+ return size;
+ case DRM_XE_VM_GET_PROPERTY_NUM_RESETS:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fill_property_bans(struct xe_vm *vm,
+ struct drm_xe_vm_get_property *args,
+ u32 size, bool faults_only)
+{
+ struct drm_xe_ban __user *usr_ptr = u64_to_user_ptr(args->ptr);
+ struct drm_xe_ban *ban_list;
+ struct drm_xe_ban *ban;
+ struct xe_exec_queue_ban_entry *entry;
+ int i = 0;
+
+ if (copy_from_user(&ban_list, usr_ptr, size))
+ return -EFAULT;
+
+ spin_lock(&vm->bans.lock);
+ list_for_each_entry(entry, &vm->bans.list, list) {
+ struct xe_pagefault *pf = entry->pf;
+
+ if (!pf && faults_only)
+ continue;
+
+ ban = &ban_list[i++];
+ ban->exec_queue_id = entry->exec_queue_id;
+ ban->faulted = !pf ? 0 : 1;
+ ban->address = pf ? pf->page_addr : 0;
+ ban->address_type = pf ? pf->address_type : 0;
+ ban->address_precision = SZ_4K;
+ }
+ spin_unlock(&vm->bans.lock);
+
+ if (copy_to_user(usr_ptr, &ban_list, size))
+ return -EFAULT;
+
+ return 0;
+}
+
+int xe_vm_get_property_ioctl(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vm_get_property *args = data;
+ struct xe_vm *vm;
+ u32 size;
+
+ if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -ENOENT;
+
+ size = xe_vm_get_property_size(vm, args->property);
+ if (size < 0) {
+ return size;
+ } else if (args->size != size) {
+ if (args->size)
+ return -EINVAL;
+ args->size = size;
+ return 0;
+ }
+
+ switch (args->property) {
+ case DRM_XE_VM_GET_PROPERTY_FAULTS:
+ return fill_property_bans(vm, args, size, true);
+ case DRM_XE_VM_GET_PROPERTY_BANS:
+ return fill_property_bans(vm, args, size, false);
+ case DRM_XE_VM_GET_PROPERTY_NUM_RESETS:
+ args->data = atomic_read(&vm->reset_count);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* xe_vm_bind_kernel_bo - bind a kernel BO to a VM
* @vm: VM to bind the BO to
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 9f8457ceb905..0338f42f7a71 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -184,6 +184,8 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int xe_vm_get_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
void xe_vm_close_and_put(struct xe_vm *vm);
--
2.43.0
next prev parent reply other threads:[~2025-02-27 19:15 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-27 19:14 [PATCH v2 0/8] drm/xe/xe_vm: Implement xe_vm_get_property_ioctl Jonathan Cavitt
2025-02-27 19:14 ` [PATCH v2 1/8] drm/xe/xe_gt_pagefault: Disallow writes to read-only VMAs Jonathan Cavitt
2025-02-27 19:14 ` [PATCH v2 2/8] drm/xe/xe_exec_queue: Add ID param to exec queue struct Jonathan Cavitt
2025-02-27 19:14 ` [PATCH v2 3/8] drm/xe/xe_gt_pagefault: Migrate pagefault struct to header Jonathan Cavitt
2025-02-27 19:14 ` [PATCH v2 4/8] drm/xe/xe_vm: Add per VM pagefault info Jonathan Cavitt
2025-02-27 19:14 ` [PATCH v2 5/8] drm/xe/xe_vm: Add per VM reset stats Jonathan Cavitt
2025-02-27 19:14 ` [PATCH v2 6/8] drm/xe/uapi: Define drm_xe_vm_get_property Jonathan Cavitt
2025-02-27 19:14 ` [PATCH v2 7/8] drm/xe/xe_gt_pagefault: Add address_type field to pagefaults Jonathan Cavitt
2025-02-27 19:14 ` Jonathan Cavitt [this message]
2025-02-28 3:44 ` [PATCH v2 8/8] drm/xe/xe_vm: Implement xe_vm_get_property_ioctl kernel test robot
2025-02-27 21:01 ` ✓ CI.Patch_applied: success for drm/xe/xe_vm: Implement xe_vm_get_property_ioctl (rev2) Patchwork
2025-02-27 21:01 ` ✗ CI.checkpatch: warning " Patchwork
2025-02-27 21:02 ` ✓ CI.KUnit: success " Patchwork
2025-02-27 21:19 ` ✓ CI.Build: " Patchwork
2025-02-27 21:21 ` ✓ CI.Hooks: " Patchwork
2025-02-27 21:23 ` ✓ CI.checksparse: " Patchwork
2025-02-27 21:43 ` ✗ Xe.CI.BAT: failure " Patchwork
2025-02-28 2:08 ` ✗ Xe.CI.Full: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250227191457.84035-9-jonathan.cavitt@intel.com \
--to=jonathan.cavitt@intel.com \
--cc=alex.zuo@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jianxun.zhang@intel.com \
--cc=joonas.lahtinen@linux.intel.com \
--cc=matthew.brost@intel.com \
--cc=saurabhg.gupta@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox