From: Matthew Brost <matthew.brost@intel.com>
To: <intel-xe@lists.freedesktop.org>
Subject: [Intel-xe] [RFC PATCH 1/7] drm/xe: Use a flags field instead of bools for VMA create
Date: Wed, 6 Dec 2023 21:57:23 -0800 [thread overview]
Message-ID: <20231207055729.438642-2-matthew.brost@intel.com> (raw)
In-Reply-To: <20231207055729.438642-1-matthew.brost@intel.com>
Use a flags field instead of severval bools for VMA create as it is
easier to read and less bug prone.
Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 64 ++++++++++++++++++++------------------
1 file changed, 34 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index e09050f16f07..44b2972d5d5f 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -860,17 +860,20 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return fence;
}
+#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
+#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
u64 start, u64 end,
- bool read_only,
- bool is_null,
- u16 pat_index)
+ u16 pat_index, unsigned int flags)
{
struct xe_vma *vma;
struct xe_tile *tile;
u8 id;
+ bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
+ bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -2242,7 +2245,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
}
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
- bool read_only, bool is_null, u16 pat_index)
+ u16 pat_index, unsigned int flags)
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct xe_vma *vma;
@@ -2257,8 +2260,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
}
vma = xe_vma_create(vm, bo, op->gem.offset,
op->va.addr, op->va.addr +
- op->va.range - 1, read_only, is_null,
- pat_index);
+ op->va.range - 1, pat_index, flags);
if (bo)
xe_bo_unlock(bo);
@@ -2384,7 +2386,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma;
bool first = list_empty(ops_list);
+ unsigned int flags = 0;
INIT_LIST_HEAD(&op->link);
list_add_tail(&op->link, ops_list);
@@ -2400,10 +2404,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
- struct xe_vma *vma;
+ flags |= op->map.read_only ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->map.is_null ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
- vma = new_vma(vm, &op->base.map, op->map.read_only,
- op->map.is_null, op->map.pat_index);
+ vma = new_vma(vm, &op->base.map, op->map.pat_index,
+ flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2419,16 +2426,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
op->remap.range = xe_vma_size(old);
if (op->base.remap.prev) {
- struct xe_vma *vma;
- bool read_only =
- op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY;
- bool is_null =
- op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE;
-
- vma = new_vma(vm, op->base.remap.prev, read_only,
- is_null, old->pat_index);
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+
+ vma = new_vma(vm, op->base.remap.prev,
+ old->pat_index, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2451,17 +2457,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
}
if (op->base.remap.next) {
- struct xe_vma *vma;
- bool read_only =
- op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY;
-
- bool is_null =
- op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE;
-
- vma = new_vma(vm, op->base.remap.next, read_only,
- is_null, old->pat_index);
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+
+ vma = new_vma(vm, op->base.remap.next,
+ old->pat_index, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
--
2.34.1
next prev parent reply other threads:[~2023-12-07 5:57 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-07 5:57 [Intel-xe] [RFC PATCH 0/7] Syncs vs async exec/bind uAPI change Matthew Brost
2023-12-07 5:57 ` Matthew Brost [this message]
2023-12-07 5:57 ` [Intel-xe] [RFC PATCH 2/7] drm/xe: Use a flags field instead of bools for sync parse Matthew Brost
2023-12-07 5:57 ` [Intel-xe] [RFC PATCH 3/7] drm/xe: Allow num_binds == 0 in VM bind IOCTL Matthew Brost
2023-12-07 5:57 ` [Intel-xe] [RFC PATCH 4/7] drm/xe: Allow num_batch_buffer == 0 in exec IOCTL Matthew Brost
2023-12-07 5:57 ` [Intel-xe] [RFC PATCH 5/7] drm/xe: Take in-syncs into account when num_execs or num_binds == 0 Matthew Brost
2023-12-08 15:04 ` Thomas Hellström
2023-12-12 17:18 ` Matthew Brost
2023-12-07 5:57 ` [Intel-xe] [RFC PATCH 6/7] drm/xe: Add last fence as dependency for jobs on user exec queues Matthew Brost
2023-12-07 5:57 ` [Intel-xe] [RFC PATCH 7/7] drm/xe/uapi: Uniform async vs sync handling Matthew Brost
2023-12-07 19:51 ` Rodrigo Vivi
2023-12-08 15:00 ` Thomas Hellström
2023-12-08 9:45 ` Matthew Brost
2023-12-11 15:43 ` Thomas Hellström
2023-12-11 16:49 ` Matthew Brost
2023-12-11 18:11 ` Thomas Hellström
2023-12-11 21:11 ` Matthew Brost
2023-12-12 8:43 ` Thomas Hellström
2023-12-08 12:24 ` Matthew Brost
2023-12-11 15:34 ` Thomas Hellström
2023-12-11 16:50 ` Matthew Brost
2023-12-07 7:38 ` [Intel-xe] ✗ CI.Patch_applied: failure for Syncs vs async exec/bind uAPI change Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231207055729.438642-2-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox