From: Matthew Brost <matthew.brost@intel.com>
To: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
<dri-devel@lists.freedesktop.org>, <apopple@nvidia.com>,
<airlied@gmail.com>, <christian.koenig@amd.com>,
<simona.vetter@ffwll.ch>, <felix.kuehling@amd.com>,
<dakr@kernel.org>
Subject: Re: [PATCH v2 26/29] drm/xe: Add SVM debug
Date: Mon, 16 Dec 2024 17:05:20 -0800 [thread overview]
Message-ID: <Z2DOUDyzRJp0ICni@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <db162f5ef89b9d48c9fe300918120a4537b7b0d1.camel@linux.intel.com>
On Mon, Dec 02, 2024 at 01:33:29PM +0100, Thomas Hellström wrote:
> On Tue, 2024-10-15 at 20:25 -0700, Matthew Brost wrote:
> > Add some useful SVM debug logging.
> >
> > v2:
> > - Upadte logging with latest structure layout
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_pt.c | 8 +++
> > drivers/gpu/drm/xe/xe_svm.c | 101 +++++++++++++++++++++++++++++++---
> > --
> > drivers/gpu/drm/xe/xe_svm.h | 2 +
> > 3 files changed, 99 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> > index 687abd1a5e74..75f548ebe2b3 100644
> > --- a/drivers/gpu/drm/xe/xe_pt.c
> > +++ b/drivers/gpu/drm/xe/xe_pt.c
> > @@ -632,6 +632,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct
> > xe_vma *vma,
> > /* Move this entire thing to xe_svm.c? */
> > xe_svm_notifier_lock(xe_vma_vm(vma));
> > if (!xe_svm_range_pages_valid(range)) {
> > + xe_svm_range_debug(range, "BIND PREPARE -
> > RETRY");
> > xe_svm_notifier_unlock(xe_vma_vm(vma));
> > return -EAGAIN;
> > }
> > @@ -640,6 +641,10 @@ xe_pt_stage_bind(struct xe_tile *tile, struct
> > xe_vma *vma,
> > range->base.va.end - range-
> > >base.va.start,
> > &curs);
> > is_devmem = xe_res_is_vram(&curs);
> > + if (is_devmem)
> > + xe_svm_range_debug(range, "BIND
> > PREPARE - DMA VRAM");
> > + else
> > + xe_svm_range_debug(range, "BIND
> > PREPARE - DMA");
> > } else {
> > xe_assert(xe, false);
> > }
> > @@ -1397,10 +1402,13 @@ static int xe_pt_svm_pre_commit(struct
> > xe_migrate_pt_update *pt_update)
> > if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
> > continue;
> >
> > + xe_svm_range_debug(range, "PRE-COMMIT");
> > +
> > xe_assert(vm->xe, xe_vma_is_system_allocator(op-
> > >map_range.vma));
> > xe_assert(vm->xe, op->subop ==
> > XE_VMA_SUBOP_MAP_RANGE);
> >
> > if (!xe_svm_range_pages_valid(range)) {
> > + xe_svm_range_debug(range, "PRE-COMMIT -
> > RETRY");
> > xe_svm_notifier_unlock(vm);
> > return -EAGAIN;
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_svm.c
> > b/drivers/gpu/drm/xe/xe_svm.c
> > index 555bc71ae523..acf2a3750f38 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.c
> > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > @@ -14,6 +14,18 @@
> > #include "xe_vm.h"
> > #include "xe_vm_types.h"
> >
> > +static bool xe_svm_range_in_vram(struct xe_svm_range *range)
> > +{
> > + /* Not reliable without notifier lock */
>
> lockdep assert?
>
Ah, no. We call this from the debug code which doesn't have this lock so
it is best effort there. This comment is saying don't call this and
expect it to be reliable without this lock.
> > + return range->base.flags.has_devmem_pages;
> > +}
> > +
> > +static bool xe_svm_range_has_vram_binding(struct xe_svm_range
> > *range)
> > +{
> > + /* Not reliable without notifier lock */
>
> lockdep assert?
>
Same here.
> > + return xe_svm_range_in_vram(range) && range->tile_present;
> > +}
> > +
> > static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
> > {
> > return container_of(gpusvm, struct xe_vm, svm.gpusvm);
> > @@ -24,6 +36,23 @@ static struct xe_vm *range_to_vm(struct
> > drm_gpusvm_range *r)
> > return gpusvm_to_vm(r->gpusvm);
> > }
> >
> > +#define range_debug(r__,
> > operaton__) \
> > + vm_dbg(&range_to_vm(&(r__)->base)->xe-
> > >drm, \
> > + "%s: asid=%u, gpusvm=0x%016llx, vram=%d,%d,
> > seqno=%lu, " \
> > + "start=0x%014llx, end=0x%014llx,
> > size=%llu", \
> > + (operaton__), range_to_vm(&(r__)->base)-
> > >usm.asid, \
> > + (u64)(r__)-
> > >base.gpusvm, \
> > + xe_svm_range_in_vram((r__)) ? 1 :
> > 0, \
> > + xe_svm_range_has_vram_binding((r__)) ? 1 :
> > 0, \
> > + (r__)-
> > >base.notifier_seq, \
> > + (r__)->base.va.start, (r__)-
> > >base.va.end, \
> > + (r__)->base.va.end - (r__)->base.va.start)
> > +
> > +void xe_svm_range_debug(struct xe_svm_range *range, const char
> > *operation)
> > +{
> > + range_debug(range, operation);
> > +}
> > +
> > static void *xe_svm_devm_owner(struct xe_device *xe)
> > {
> > return xe;
> > @@ -61,6 +90,8 @@ xe_svm_garbage_collector_add_range(struct xe_vm
> > *vm, struct xe_svm_range *range,
> > {
> > struct xe_device *xe = vm->xe;
> >
> > + range_debug(range, "GARBAGE COLLECTOR ADD");
> > +
> > drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
> >
> > spin_lock(&vm->svm.garbage_collector.lock);
> > @@ -84,10 +115,14 @@ xe_svm_range_notifier_event_begin(struct xe_vm
> > *vm, struct drm_gpusvm_range *r,
> > u8 tile_mask = 0;
> > u8 id;
> >
> > + range_debug(range, "NOTIFIER");
> > +
> > /* Skip if already unmapped or if no binding exist */
> > if (range->base.flags.unmapped || !range->tile_present)
> > return 0;
> >
> > + range_debug(range, "NOTIFIER - EXECUTE");
> > +
> > /* Adjust invalidation to range boundaries */
> > if (range->base.va.start < mmu_range->start)
> > *adj_start = range->base.va.start;
> > @@ -139,6 +174,11 @@ static void xe_svm_invalidate(struct drm_gpusvm
> > *gpusvm,
> > if (xe_vm_is_closed(vm))
> > return;
> >
> > + vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
> > + "INVALIDATE: asid=%u, gpusvm=0x%016llx, seqno=%lu,
> > start=0x%016lx, end=0x%016lx, event=%d",
> > + vm->usm.asid, (u64)gpusvm, notifier-
> > >notifier.invalidate_seq,
> > + mmu_range->start, mmu_range->end, mmu_range->event);
> > +
> > /* Adjust invalidation to notifier boundaries */
> > if (adj_start < notifier->interval.start)
> > adj_start = notifier->interval.start;
> > @@ -218,8 +258,12 @@ static int __xe_svm_garbage_collector(struct
> > xe_vm *vm,
> > {
> > struct dma_fence *fence;
> >
> > - if (IS_DGFX(vm->xe) && range->base.flags.partial_unmap)
> > + range_debug(range, "GARBAGE COLLECTOR");
> > +
> > + if (IS_DGFX(vm->xe) && range->base.flags.partial_unmap) {
> > + range_debug(range, "GARBAGE COLLECTOT - EVICT");
> Typo COLLECTOT
>
Will fix.
Matt
> With those fixed,
> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>
>
>
>
> > drm_gpusvm_range_evict(&vm->svm.gpusvm, &range-
> > >base);
> > + }
> >
> > xe_vm_lock(vm, false);
> > fence = xe_vm_range_unbind(vm, range);
> > @@ -350,16 +394,23 @@ static int xe_svm_copy(struct page **pages,
> > dma_addr_t *dma_addr,
> > int incr = (match && last) ? 1 : 0;
> >
> > if (vram_addr != VRAM_ADDR_INVALID) {
> > - if (sram)
> > + if (sram) {
> > + vm_dbg(&tile->xe->drm,
> > + "COPY TO SRAM -
> > 0x%016llx -> 0x%016llx, NPAGES=%ld",
> > + vram_addr,
> > dma_addr[pos], i - pos + incr);
> > __fence =
> > xe_migrate_from_vram(tile->migrate,
> >
> > i - pos + incr,
> >
> > vram_addr,
> >
> > dma_addr + pos);
> > - else
> > + } else {
> > + vm_dbg(&tile->xe->drm,
> > + "COPY TO VRAM -
> > 0x%016llx -> 0x%016llx, NPAGES=%ld",
> > + dma_addr[pos],
> > vram_addr, i - pos + incr);
> > __fence =
> > xe_migrate_to_vram(tile->migrate,
> >
> > i - pos + incr,
> >
> > dma_addr + pos,
> >
> > vram_addr);
> > + }
> > if (IS_ERR(__fence)) {
> > err = PTR_ERR(__fence);
> > goto err_out;
> > @@ -377,14 +428,21 @@ static int xe_svm_copy(struct page **pages,
> > dma_addr_t *dma_addr,
> > }
> >
> > if (!match && last && dma_addr[i] && spage)
> > {
> > - if (sram)
> > + if (sram) {
> > + vm_dbg(&tile->xe->drm,
> > + "COPY TO SRAM -
> > 0x%016llx -> 0x%016llx, NPAGES=%d",
> > + vram_addr,
> > dma_addr[pos], 1);
> > __fence =
> > xe_migrate_from_vram(tile->migrate, 1,
> >
> > vram_addr,
> >
> > dma_addr + pos);
> > - else
> > + } else {
> > + vm_dbg(&tile->xe->drm,
> > + "COPY TO VRAM -
> > 0x%016llx -> 0x%016llx, NPAGES=%d",
> > + dma_addr[pos],
> > vram_addr, 1);
> > __fence =
> > xe_migrate_to_vram(tile->migrate, 1,
> >
> > dma_addr + pos,
> >
> > vram_addr);
> > + }
> > if (IS_ERR(__fence)) {
> > err = PTR_ERR(__fence);
> > goto err_out;
> > @@ -554,12 +612,14 @@ static struct xe_bo *xe_svm_alloc_vram(struct
> > xe_vm *vm, struct xe_tile *tile,
> > const struct drm_gpusvm_ctx
> > *ctx)
> > {
> > struct xe_mem_region *mr = tile_to_mr(tile);
> > + struct drm_buddy *buddy = tile_to_buddy(tile);
> > struct drm_buddy_block *block;
> > struct list_head *blocks;
> > struct xe_bo *bo;
> > ktime_t end = 0;
> > int err;
> >
> > + range_debug(range, "ALLOCATE VRAM");
> > retry:
> > xe_vm_lock(vm, false);
> > bo = xe_bo_create(tile_to_xe(tile), tile, vm, range-
> > >base.va.end -
> > @@ -582,8 +642,13 @@ static struct xe_bo *xe_svm_alloc_vram(struct
> > xe_vm *vm, struct xe_tile *tile,
> > range->base.va.start);
> >
> > blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)-
> > >blocks;
> > - list_for_each_entry(block, blocks, link)
> > + list_for_each_entry(block, blocks, link) {
> > + vm_dbg(&vm->xe->drm, "ALLOC VRAM: asid=%u,
> > gpusvm=0x%016llx, pfn=%llu, npages=%llu",
> > + vm->usm.asid, (u64)&vm->svm.gpusvm,
> > + block_offset_to_pfn(mr,
> > drm_buddy_block_offset(block)),
> > + drm_buddy_block_size(buddy, block) >>
> > PAGE_SHIFT);
> > block->private = mr;
> > + }
> >
> > /*
> > * Take ref because as soon as drm_gpusvm_migrate_to_devmem
> > succeeds the
> > @@ -637,6 +702,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > struct xe_vma *vma,
> > if (xe_svm_range_is_valid(range, tile))
> > return 0;
> >
> > + range_debug(range, "PAGE FAULT");
> > +
> > /* XXX: Add migration policy, for now migrate range once */
> > if (IS_DGFX(vm->xe) && !range->migrated &&
> > range->base.flags.migrate_devmem &&
> > @@ -646,25 +713,33 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > struct xe_vma *vma,
> > bo = xe_svm_alloc_vram(vm, tile, range, &ctx);
> > if (IS_ERR(bo)) {
> > drm_info(&vm->xe->drm,
> > - "VRAM allocation failed, falling
> > back to retrying, asid=%u, errno %ld\n",
> > - vm->usm.asid, PTR_ERR(bo));
> > + "VRAM allocation failed, falling
> > back to retrying, asid=%u, gpusvm=0x%016llx, errno %ld\n",
> > + vm->usm.asid, (u64)&vm->svm.gpusvm,
> > + PTR_ERR(bo));
> > bo = NULL;
> > goto retry;
> > }
> > }
> >
> > + range_debug(range, "GET PAGES");
> > err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
> > - if (err == -EFAULT || err == -EPERM) /* Corner where CPU
> > mappings have change */
> > if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
> > { /* Corner where CPU mappings have change */
> > - if (err == -EOPNOTSUPP)
> > + if (err == -EOPNOTSUPP) {
> > + range_debug(range, "PAGE FAULT - EVICT
> > PAGES");
> > drm_gpusvm_range_evict(&vm->svm.gpusvm,
> > &range->base);
> > + }
> > drm_info(&vm->xe->drm,
> > "Get pages failed, falling back to
> > retrying, asid=%u, gpusvm=0x%016llx, errno %d\n",
> > vm->usm.asid, (u64)&vm->svm.gpusvm, err);
> > + range_debug(range, "PAGE FAULT - RETRY PAGES");
> > goto retry;
> > }
> > - if (err)
> > + if (err) {
> > + range_debug(range, "PAGE FAULT - FAIL PAGE
> > COLLECT");
> > goto err_out;
> > + }
> > +
> > + range_debug(range, "PAGE FAULT - BIND");
> >
> > retry_bind:
> > drm_exec_init(&exec, 0, 0);
> > @@ -680,8 +755,10 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > struct xe_vma *vma,
> > if (IS_ERR(fence)) {
> > drm_exec_fini(&exec);
> > err = PTR_ERR(fence);
> > - if (err == -EAGAIN)
> > + if (err == -EAGAIN) {
> > + range_debug(range, "PAGE FAULT -
> > RETRY BIND");
> > goto retry;
> > + }
> > if (xe_vm_validate_should_retry(&exec, err,
> > &end))
> > goto retry_bind;
> > goto err_out;
> > diff --git a/drivers/gpu/drm/xe/xe_svm.h
> > b/drivers/gpu/drm/xe/xe_svm.h
> > index 5b9d5ac9ef72..139acee41b42 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.h
> > +++ b/drivers/gpu/drm/xe/xe_svm.h
> > @@ -36,6 +36,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > struct xe_vma *vma,
> > bool atomic);
> > bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
> >
> > +void xe_svm_range_debug(struct xe_svm_range *range, const char
> > *operation);
> > +
> > int xe_svm_bo_evict(struct xe_bo *bo);
> >
> > static inline bool xe_svm_range_pages_valid(struct xe_svm_range
> > *range)
>
next prev parent reply other threads:[~2024-12-17 1:04 UTC|newest]
Thread overview: 129+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-16 3:24 [PATCH v2 00/29] Introduce GPU SVM and Xe SVM implementation Matthew Brost
2024-10-16 3:24 ` [PATCH v2 01/29] drm/xe: Retry BO allocation Matthew Brost
2024-10-16 3:24 ` [PATCH v2 02/29] mm/migrate: Add migrate_device_prepopulated_range Matthew Brost
2024-10-16 4:04 ` Alistair Popple
2024-10-16 4:46 ` Matthew Brost
2024-10-17 0:56 ` Matthew Brost
2024-10-17 1:49 ` Alistair Popple
2024-10-17 2:45 ` Matthew Brost
2024-10-17 3:21 ` Alistair Popple
2024-10-17 4:07 ` Matthew Brost
2024-10-17 5:49 ` Alistair Popple
2024-10-17 15:40 ` Matthew Brost
2024-10-17 21:58 ` Alistair Popple
2024-10-18 0:54 ` Matthew Brost
2024-10-18 5:59 ` Alistair Popple
2024-10-18 6:39 ` Mika Penttilä
2024-10-18 7:16 ` Matthew Brost
2024-10-18 7:33 ` Matthew Brost
2024-10-18 7:34 ` Alistair Popple
2024-10-18 7:57 ` Matthew Brost
2024-10-18 4:02 ` Mika Penttilä
2024-10-18 5:55 ` Alistair Popple
2024-10-16 3:24 ` [PATCH v2 03/29] mm/migrate: Trylock device page in do_swap_page Matthew Brost
2024-10-16 4:00 ` Alistair Popple
2024-10-16 4:41 ` Matthew Brost
2024-10-17 1:51 ` Alistair Popple
2024-10-25 0:31 ` Matthew Brost
2024-10-29 6:37 ` Alistair Popple
2024-11-01 17:19 ` Matthew Brost
2024-11-28 23:31 ` Alistair Popple
2024-12-13 22:16 ` Matthew Brost
2024-12-14 5:59 ` Matthew Brost
2024-10-16 3:24 ` [PATCH v2 04/29] drm/pagemap: Add DRM pagemap Matthew Brost
2024-10-16 3:24 ` [PATCH v2 05/29] drm/gpusvm: Add support for GPU Shared Virtual Memory Matthew Brost
2024-10-31 18:58 ` Thomas Hellström
2024-11-04 22:53 ` Matthew Brost
2024-11-04 15:25 ` Thomas Hellström
2024-11-04 17:21 ` Matthew Brost
2024-11-04 18:59 ` Thomas Hellström
2024-11-04 23:07 ` Matthew Brost
2024-11-05 10:22 ` Thomas Hellström
2024-11-05 16:12 ` Matthew Brost
2024-11-05 16:28 ` Thomas Hellström
2024-11-05 14:48 ` Thomas Hellström
2024-11-05 16:32 ` Matthew Brost
2024-11-20 3:00 ` Gwan-gyeong Mun
2024-11-29 0:00 ` Alistair Popple
2024-12-14 1:16 ` Matthew Brost
2024-10-16 3:24 ` [PATCH v2 06/29] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATON flag Matthew Brost
2024-11-18 13:44 ` Thomas Hellström
2024-11-19 16:01 ` Matthew Brost
2024-10-16 3:24 ` [PATCH v2 07/29] drm/xe: Add SVM init / close / fini to faulting VMs Matthew Brost
2024-11-19 12:13 ` Thomas Hellström
2024-11-19 16:22 ` Matthew Brost
2024-10-16 3:24 ` [PATCH v2 08/29] drm/xe: Add dma_addr res cursor Matthew Brost
2024-11-19 12:15 ` Thomas Hellström
2024-11-19 16:24 ` Matthew Brost
2024-10-16 3:24 ` [PATCH v2 09/29] drm/xe: Add SVM range invalidation Matthew Brost
2024-11-19 13:56 ` Thomas Hellström
2024-12-11 19:01 ` Matthew Brost
2024-12-14 23:11 ` Matthew Brost
2024-12-16 10:01 ` Thomas Hellström
2024-12-16 16:09 ` Matthew Brost
2024-12-16 17:35 ` Thomas Hellström
2024-10-16 3:24 ` [PATCH v2 10/29] drm/gpuvm: Add DRM_GPUVA_OP_USER Matthew Brost
2024-11-19 13:57 ` Thomas Hellström
2024-11-19 16:26 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 11/29] drm/xe: Add (re)bind to SVM page fault handler Matthew Brost
2024-11-19 14:26 ` Thomas Hellström
2024-12-11 19:07 ` Matthew Brost
2024-12-16 10:03 ` Thomas Hellström
2024-10-16 3:25 ` [PATCH v2 12/29] drm/xe: Add SVM garbage collector Matthew Brost
2024-11-19 14:45 ` Thomas Hellström
2024-12-11 19:17 ` Matthew Brost
2024-12-16 10:36 ` Thomas Hellström
2024-12-16 23:46 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 13/29] drm/xe: Add unbind to " Matthew Brost
2024-11-19 15:31 ` Thomas Hellström
2024-11-19 23:44 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 14/29] drm/xe: Do not allow system allocator VMA unbind if the GPU has bindings Matthew Brost
2024-11-19 16:33 ` Thomas Hellström
2024-11-19 23:37 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 15/29] drm/xe: Enable system allocator uAPI Matthew Brost
2024-11-19 16:34 ` Thomas Hellström
2024-10-16 3:25 ` [PATCH v2 16/29] drm/xe: Add migrate layer functions for SVM support Matthew Brost
2024-11-19 16:45 ` Thomas Hellström
2024-11-19 23:08 ` Matthew Brost
2024-11-20 8:04 ` Thomas Hellström
2024-12-11 19:11 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 17/29] drm/xe: Add SVM device memory mirroring Matthew Brost
2024-11-19 16:50 ` Thomas Hellström
2024-11-20 3:05 ` Gwan-gyeong Mun
2024-12-11 19:44 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 18/29] drm/xe: Add drm_gpusvm_devmem to xe_bo Matthew Brost
2024-11-19 16:51 ` Thomas Hellström
2024-12-15 4:38 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 19/29] drm/xe: Add GPUSVM devic memory copy vfunc functions Matthew Brost
2024-12-02 10:13 ` Thomas Hellström
2024-12-12 3:59 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 20/29] drm/xe: Add drm_pagemap ops to SVM Matthew Brost
2024-10-16 3:25 ` [PATCH v2 21/29] drm/xe: Add Xe SVM populate_devmem_pfn vfunc Matthew Brost
2024-12-02 10:19 ` Thomas Hellström
2024-10-16 3:25 ` [PATCH v2 22/29] drm/xe: Add Xe SVM devmem_release vfunc Matthew Brost
2024-12-02 10:21 ` Thomas Hellström
2024-10-16 3:25 ` [PATCH v2 23/29] drm/xe: Add BO flags required for SVM Matthew Brost
2024-12-02 10:44 ` Thomas Hellström
2024-12-11 21:42 ` Matthew Brost
2024-12-16 10:44 ` Thomas Hellström
2024-10-16 3:25 ` [PATCH v2 24/29] drm/xe: Add SVM VRAM migration Matthew Brost
2024-12-02 12:06 ` Thomas Hellström
2024-12-11 20:17 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 25/29] drm/xe: Basic SVM BO eviction Matthew Brost
2024-12-02 12:27 ` Thomas Hellström
2024-12-11 19:47 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 26/29] drm/xe: Add SVM debug Matthew Brost
2024-12-02 12:33 ` Thomas Hellström
2024-12-17 1:05 ` Matthew Brost [this message]
2024-10-16 3:25 ` [PATCH v2 27/29] drm/xe: Add modparam for SVM notifier size Matthew Brost
2024-12-02 12:37 ` Thomas Hellström
2024-12-11 19:50 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 28/29] drm/xe: Add always_migrate_to_vram modparam Matthew Brost
2024-12-02 12:40 ` Thomas Hellström
2024-12-11 19:51 ` Matthew Brost
2024-10-16 3:25 ` [PATCH v2 29/29] drm/doc: gpusvm: Add GPU SVM documentation Matthew Brost
2024-12-02 13:00 ` Thomas Hellström
2024-12-17 23:14 ` Matthew Brost
2024-10-16 3:30 ` ✓ CI.Patch_applied: success for Introduce GPU SVM and Xe SVM implementation (rev2) Patchwork
2024-10-16 3:31 ` ✗ CI.checkpatch: warning " Patchwork
2024-10-16 3:31 ` ✗ CI.KUnit: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Z2DOUDyzRJp0ICni@lstrano-desk.jf.intel.com \
--to=matthew.brost@intel.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=felix.kuehling@amd.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=simona.vetter@ffwll.ch \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox