All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: Matthew Auld <matthew.auld@intel.com>, intel-gfx@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Subject: Re: [Intel-gfx] [PATCH 14/20] drm/i915/selftests: exercise mmap migration
Date: Thu, 3 Feb 2022 10:01:01 +0100	[thread overview]
Message-ID: <32bffc9b-38d0-88ab-91cc-91eb8dfcd674@linux.intel.com> (raw)
In-Reply-To: <20220126152155.3070602-15-matthew.auld@intel.com>


On 1/26/22 16:21, Matthew Auld wrote:
> Exercise each of the migration scenarios, verifying that the final
> placement and buffer contents match our expectations.
>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> ---
>   .../drm/i915/gem/selftests/i915_gem_mman.c    | 306 ++++++++++++++++++
>   1 file changed, 306 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> index ba29767348be..d2c1071df98a 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> @@ -10,6 +10,7 @@
>   #include "gt/intel_gpu_commands.h"
>   #include "gt/intel_gt.h"
>   #include "gt/intel_gt_pm.h"
> +#include "gt/intel_migrate.h"
>   #include "gem/i915_gem_region.h"
>   #include "huge_gem_object.h"
>   #include "i915_selftest.h"
> @@ -999,6 +1000,310 @@ static int igt_mmap(void *arg)
>   	return 0;
>   }
>   
> +static void igt_close_objects(struct drm_i915_private *i915,
> +			      struct list_head *objects)
> +{
> +	struct drm_i915_gem_object *obj, *on;
> +
> +	list_for_each_entry_safe(obj, on, objects, st_link) {
> +		i915_gem_object_lock(obj, NULL);
> +		if (i915_gem_object_has_pinned_pages(obj))
> +			i915_gem_object_unpin_pages(obj);
> +		/* No polluting the memory region between tests */
> +		__i915_gem_object_put_pages(obj);
> +		i915_gem_object_unlock(obj);
> +		list_del(&obj->st_link);
> +		i915_gem_object_put(obj);
> +	}
> +
> +	cond_resched();
> +
> +	i915_gem_drain_freed_objects(i915);
> +}
> +
> +static void igt_make_evictable(struct list_head *objects)
> +{
> +	struct drm_i915_gem_object *obj;
> +
> +	list_for_each_entry(obj, objects, st_link) {
> +		i915_gem_object_lock(obj, NULL);
> +		if (i915_gem_object_has_pinned_pages(obj))
> +			i915_gem_object_unpin_pages(obj);
> +		i915_gem_object_unlock(obj);
> +	}
> +
> +	cond_resched();
> +}
> +
> +static int igt_fill_mappable(struct intel_memory_region *mr,
> +			     struct list_head *objects)
> +{
> +	u64 size, total;
> +	int err;
> +
> +	total = 0;
> +	size = mr->io_size;
> +	do {
> +		struct drm_i915_gem_object *obj;
> +
> +		obj = i915_gem_object_create_region(mr, size, 0, 0);
> +		if (IS_ERR(obj)) {
> +			err = PTR_ERR(obj);
> +			goto err_close;
> +		}
> +
> +		list_add(&obj->st_link, objects);
> +
> +		err = i915_gem_object_pin_pages_unlocked(obj);
> +		if (err) {
> +			if (err != -ENXIO && err != -ENOMEM)
> +				goto err_close;
> +
> +			if (size == mr->min_page_size) {
> +				err = 0;
> +				break;
> +			}
> +
> +			size >>= 1;
> +			continue;
> +		}
> +
> +		total += obj->base.size;
> +	} while (1);
> +
> +	pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
> +	return 0;
> +
> +err_close:
> +	igt_close_objects(mr->i915, objects);
> +	return err;
> +}
> +
> +static int ___igt_mmap_migrate(struct drm_i915_private *i915,
> +			       struct drm_i915_gem_object *obj,
> +			       unsigned long addr,
> +			       bool unfaultable)
> +{
> +	struct vm_area_struct *area;
> +	int err = 0, i;
> +
> +	pr_info("igt_mmap(%s, %d) @ %lx\n",
> +		 obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
> +
> +	mmap_read_lock(current->mm);
> +	area = vma_lookup(current->mm, addr);
> +	mmap_read_unlock(current->mm);
> +	if (!area) {
> +		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
> +		       obj->mm.region->name);
> +		err = -EINVAL;
> +		goto out_unmap;
> +	}
> +
> +	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
> +		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
> +		u32 x;
> +
> +		if (get_user(x, ux)) {
> +			err = -EFAULT;
> +			if (!unfaultable) {
> +				pr_err("%s: Unable to read from mmap, offset:%zd\n",
> +				       obj->mm.region->name, i * sizeof(x));
> +				goto out_unmap;
> +			}
> +
> +			continue;
> +		}
> +
> +		if (unfaultable) {
> +			pr_err("%s: Faulted unmappable memory\n",
> +			       obj->mm.region->name);
> +			err = -EINVAL;
> +			goto out_unmap;
> +		}
> +
> +		if (x != expand32(POISON_INUSE)) {
> +			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
> +			       obj->mm.region->name,
> +			       i * sizeof(x), x, expand32(POISON_INUSE));
> +			err = -EINVAL;
> +			goto out_unmap;
> +		}
> +
> +		x = expand32(POISON_FREE);
> +		if (put_user(x, ux)) {
> +			pr_err("%s: Unable to write to mmap, offset:%zd\n",
> +			       obj->mm.region->name, i * sizeof(x));
> +			err = -EFAULT;
> +			goto out_unmap;
> +		}
> +	}
> +
> +	if (unfaultable) {
> +		if (err == -EFAULT)
> +			err = 0;
> +	} else {
> +		obj->flags &= ~I915_BO_ALLOC_TOPDOWN;
> +		err = wc_check(obj);
> +	}
> +out_unmap:
> +	vm_munmap(addr, obj->base.size);
> +	return err;
> +}
> +
> +#define IGT_MMAP_MIGRATE_TOPDOWN     (1<<0)
> +#define IGT_MMAP_MIGRATE_FILL        (1<<1)
> +#define IGT_MMAP_MIGRATE_EVICTABLE   (1<<2)
> +#define IGT_MMAP_MIGRATE_UNFAULTABLE (1<<3)
> +static int __igt_mmap_migrate(struct intel_memory_region **placements,
> +			      int n_placements,
> +			      struct intel_memory_region *expected_mr,
> +			      unsigned int flags)
> +{
> +	struct drm_i915_private *i915 = placements[0]->i915;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_gem_ww_ctx ww;
> +	struct i915_request *rq = NULL;
> +	unsigned long addr;
> +	LIST_HEAD(objects);
> +	u64 offset;
> +	int err;
> +
> +	obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
> +					    placements,
> +					    n_placements);
> +	if (IS_ERR(obj))
> +		return PTR_ERR(obj);
> +
> +	if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
> +		obj->flags |= I915_BO_ALLOC_TOPDOWN;
> +
> +	err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
> +	if (err)
> +		goto out_put;
> +
> +	/*
> +	 * This will eventually create a GEM context, due to opening dummy drm
> +	 * file, which needs a tiny amount of mappable device memory for the top
> +	 * level paging structures(and perhaps scratch), so make sure we
> +	 * allocate early, to avoid tears.
> +	 */
> +	addr = igt_mmap_offset(i915, offset, obj->base.size,
> +			       PROT_WRITE, MAP_SHARED);
> +	if (IS_ERR_VALUE(addr)) {
> +		err = addr;
> +		goto out_put;
> +	}
> +
> +	if (flags & IGT_MMAP_MIGRATE_FILL) {
> +		err = igt_fill_mappable(placements[0], &objects);
> +		if (err)
> +			goto out_put;
> +	}
> +
> +	for_i915_gem_ww(&ww, err, true) {

Do we need a full ww transaction here? Sufficient to only lock the 
object with NULL?


> +		err = i915_gem_object_lock(obj, &ww);
> +		if (err)
> +			continue;
> +
> +		err = i915_gem_object_pin_pages(obj);
> +		if (err)
> +			continue;
> +
> +		err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
> +						  obj->mm.pages->sgl, obj->cache_level,
> +						  i915_gem_object_is_lmem(obj),
> +						  expand32(POISON_INUSE), &rq);
> +		i915_gem_object_unpin_pages(obj);
> +		if (rq) {
> +			dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
> +			i915_gem_object_set_moving_fence(obj, &rq->fence);
> +			i915_request_put(rq);
> +		}
> +		if (err)
> +			continue;
Not needed?
> +	}
> +	if (err)
> +		goto out_put;
> +
> +	if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
> +		igt_make_evictable(&objects);
> +
> +	err = ___igt_mmap_migrate(i915, obj, addr,
> +				  flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
> +	if (!err && obj->mm.region != expected_mr) {
> +		pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
> +		err = -EINVAL;
> +	}
> +
> +out_put:
> +	i915_gem_object_put(obj);
> +	igt_close_objects(i915, &objects);
> +	return err;
> +}
> +
> +static int igt_mmap_migrate(void *arg)
> +{
> +	struct drm_i915_private *i915 = arg;
> +	struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
> +	struct intel_memory_region *mr;
> +	enum intel_region_id id;
> +
> +	for_each_memory_region(mr, i915, id) {
> +		struct intel_memory_region *mixed[] = { mr, system };
> +		struct intel_memory_region *single[] = { mr };
> +		int err;
> +
> +		if (mr->private)
> +			continue;
> +
> +		if (!mr->io_size || mr->io_size == mr->total)
> +			continue;
> +
> +		/*
> +		 * Allocate in the mappable portion, should be no suprises here.
> +		 */
> +		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
> +		if (err)
> +			return err;
> +
> +		/*
> +		 * Allocate in the non-mappable portion, but force migrating to
> +		 * the mappable portion on fault (LMEM -> LMEM)
> +		 */
> +		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
> +					 IGT_MMAP_MIGRATE_TOPDOWN |
> +					 IGT_MMAP_MIGRATE_FILL |
> +					 IGT_MMAP_MIGRATE_EVICTABLE);
> +		if (err)
> +			return err;
> +
> +		/*
> +		 * Allocate in the non-mappable portion, but force spilling into
> +		 * system memory on fault (LMEM -> SMEM)
> +		 */
> +		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
> +					 IGT_MMAP_MIGRATE_TOPDOWN |
> +					 IGT_MMAP_MIGRATE_FILL);
> +		if (err)
> +			return err;
> +
> +		/*
> +		 * Allocate in the non-mappable portion, but since the mappable
> +		 * portion is already full, and we can't spill to system memory,
> +		 * then we should expect the fault to fail.
> +		 */
> +		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
> +					 IGT_MMAP_MIGRATE_TOPDOWN |
> +					 IGT_MMAP_MIGRATE_FILL |
> +					 IGT_MMAP_MIGRATE_UNFAULTABLE);
> +		if (err)
> +			return err;
> +	}
> +
> +	return 0;
> +}
> +
>   static const char *repr_mmap_type(enum i915_mmap_type type)
>   {
>   	switch (type) {
> @@ -1424,6 +1729,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
>   		SUBTEST(igt_smoke_tiling),
>   		SUBTEST(igt_mmap_offset_exhaustion),
>   		SUBTEST(igt_mmap),
> +		SUBTEST(igt_mmap_migrate),
>   		SUBTEST(igt_mmap_access),
>   		SUBTEST(igt_mmap_revoke),
>   		SUBTEST(igt_mmap_gpu),

WARNING: multiple messages have this Message-ID (diff)
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: Matthew Auld <matthew.auld@intel.com>, intel-gfx@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 14/20] drm/i915/selftests: exercise mmap migration
Date: Thu, 3 Feb 2022 10:01:01 +0100	[thread overview]
Message-ID: <32bffc9b-38d0-88ab-91cc-91eb8dfcd674@linux.intel.com> (raw)
In-Reply-To: <20220126152155.3070602-15-matthew.auld@intel.com>


On 1/26/22 16:21, Matthew Auld wrote:
> Exercise each of the migration scenarios, verifying that the final
> placement and buffer contents match our expectations.
>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> ---
>   .../drm/i915/gem/selftests/i915_gem_mman.c    | 306 ++++++++++++++++++
>   1 file changed, 306 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> index ba29767348be..d2c1071df98a 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> @@ -10,6 +10,7 @@
>   #include "gt/intel_gpu_commands.h"
>   #include "gt/intel_gt.h"
>   #include "gt/intel_gt_pm.h"
> +#include "gt/intel_migrate.h"
>   #include "gem/i915_gem_region.h"
>   #include "huge_gem_object.h"
>   #include "i915_selftest.h"
> @@ -999,6 +1000,310 @@ static int igt_mmap(void *arg)
>   	return 0;
>   }
>   
> +static void igt_close_objects(struct drm_i915_private *i915,
> +			      struct list_head *objects)
> +{
> +	struct drm_i915_gem_object *obj, *on;
> +
> +	list_for_each_entry_safe(obj, on, objects, st_link) {
> +		i915_gem_object_lock(obj, NULL);
> +		if (i915_gem_object_has_pinned_pages(obj))
> +			i915_gem_object_unpin_pages(obj);
> +		/* No polluting the memory region between tests */
> +		__i915_gem_object_put_pages(obj);
> +		i915_gem_object_unlock(obj);
> +		list_del(&obj->st_link);
> +		i915_gem_object_put(obj);
> +	}
> +
> +	cond_resched();
> +
> +	i915_gem_drain_freed_objects(i915);
> +}
> +
> +static void igt_make_evictable(struct list_head *objects)
> +{
> +	struct drm_i915_gem_object *obj;
> +
> +	list_for_each_entry(obj, objects, st_link) {
> +		i915_gem_object_lock(obj, NULL);
> +		if (i915_gem_object_has_pinned_pages(obj))
> +			i915_gem_object_unpin_pages(obj);
> +		i915_gem_object_unlock(obj);
> +	}
> +
> +	cond_resched();
> +}
> +
> +static int igt_fill_mappable(struct intel_memory_region *mr,
> +			     struct list_head *objects)
> +{
> +	u64 size, total;
> +	int err;
> +
> +	total = 0;
> +	size = mr->io_size;
> +	do {
> +		struct drm_i915_gem_object *obj;
> +
> +		obj = i915_gem_object_create_region(mr, size, 0, 0);
> +		if (IS_ERR(obj)) {
> +			err = PTR_ERR(obj);
> +			goto err_close;
> +		}
> +
> +		list_add(&obj->st_link, objects);
> +
> +		err = i915_gem_object_pin_pages_unlocked(obj);
> +		if (err) {
> +			if (err != -ENXIO && err != -ENOMEM)
> +				goto err_close;
> +
> +			if (size == mr->min_page_size) {
> +				err = 0;
> +				break;
> +			}
> +
> +			size >>= 1;
> +			continue;
> +		}
> +
> +		total += obj->base.size;
> +	} while (1);
> +
> +	pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
> +	return 0;
> +
> +err_close:
> +	igt_close_objects(mr->i915, objects);
> +	return err;
> +}
> +
> +static int ___igt_mmap_migrate(struct drm_i915_private *i915,
> +			       struct drm_i915_gem_object *obj,
> +			       unsigned long addr,
> +			       bool unfaultable)
> +{
> +	struct vm_area_struct *area;
> +	int err = 0, i;
> +
> +	pr_info("igt_mmap(%s, %d) @ %lx\n",
> +		 obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
> +
> +	mmap_read_lock(current->mm);
> +	area = vma_lookup(current->mm, addr);
> +	mmap_read_unlock(current->mm);
> +	if (!area) {
> +		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
> +		       obj->mm.region->name);
> +		err = -EINVAL;
> +		goto out_unmap;
> +	}
> +
> +	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
> +		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
> +		u32 x;
> +
> +		if (get_user(x, ux)) {
> +			err = -EFAULT;
> +			if (!unfaultable) {
> +				pr_err("%s: Unable to read from mmap, offset:%zd\n",
> +				       obj->mm.region->name, i * sizeof(x));
> +				goto out_unmap;
> +			}
> +
> +			continue;
> +		}
> +
> +		if (unfaultable) {
> +			pr_err("%s: Faulted unmappable memory\n",
> +			       obj->mm.region->name);
> +			err = -EINVAL;
> +			goto out_unmap;
> +		}
> +
> +		if (x != expand32(POISON_INUSE)) {
> +			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
> +			       obj->mm.region->name,
> +			       i * sizeof(x), x, expand32(POISON_INUSE));
> +			err = -EINVAL;
> +			goto out_unmap;
> +		}
> +
> +		x = expand32(POISON_FREE);
> +		if (put_user(x, ux)) {
> +			pr_err("%s: Unable to write to mmap, offset:%zd\n",
> +			       obj->mm.region->name, i * sizeof(x));
> +			err = -EFAULT;
> +			goto out_unmap;
> +		}
> +	}
> +
> +	if (unfaultable) {
> +		if (err == -EFAULT)
> +			err = 0;
> +	} else {
> +		obj->flags &= ~I915_BO_ALLOC_TOPDOWN;
> +		err = wc_check(obj);
> +	}
> +out_unmap:
> +	vm_munmap(addr, obj->base.size);
> +	return err;
> +}
> +
> +#define IGT_MMAP_MIGRATE_TOPDOWN     (1<<0)
> +#define IGT_MMAP_MIGRATE_FILL        (1<<1)
> +#define IGT_MMAP_MIGRATE_EVICTABLE   (1<<2)
> +#define IGT_MMAP_MIGRATE_UNFAULTABLE (1<<3)
> +static int __igt_mmap_migrate(struct intel_memory_region **placements,
> +			      int n_placements,
> +			      struct intel_memory_region *expected_mr,
> +			      unsigned int flags)
> +{
> +	struct drm_i915_private *i915 = placements[0]->i915;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_gem_ww_ctx ww;
> +	struct i915_request *rq = NULL;
> +	unsigned long addr;
> +	LIST_HEAD(objects);
> +	u64 offset;
> +	int err;
> +
> +	obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
> +					    placements,
> +					    n_placements);
> +	if (IS_ERR(obj))
> +		return PTR_ERR(obj);
> +
> +	if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
> +		obj->flags |= I915_BO_ALLOC_TOPDOWN;
> +
> +	err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
> +	if (err)
> +		goto out_put;
> +
> +	/*
> +	 * This will eventually create a GEM context, due to opening dummy drm
> +	 * file, which needs a tiny amount of mappable device memory for the top
> +	 * level paging structures(and perhaps scratch), so make sure we
> +	 * allocate early, to avoid tears.
> +	 */
> +	addr = igt_mmap_offset(i915, offset, obj->base.size,
> +			       PROT_WRITE, MAP_SHARED);
> +	if (IS_ERR_VALUE(addr)) {
> +		err = addr;
> +		goto out_put;
> +	}
> +
> +	if (flags & IGT_MMAP_MIGRATE_FILL) {
> +		err = igt_fill_mappable(placements[0], &objects);
> +		if (err)
> +			goto out_put;
> +	}
> +
> +	for_i915_gem_ww(&ww, err, true) {

Do we need a full ww transaction here? Sufficient to only lock the 
object with NULL?


> +		err = i915_gem_object_lock(obj, &ww);
> +		if (err)
> +			continue;
> +
> +		err = i915_gem_object_pin_pages(obj);
> +		if (err)
> +			continue;
> +
> +		err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
> +						  obj->mm.pages->sgl, obj->cache_level,
> +						  i915_gem_object_is_lmem(obj),
> +						  expand32(POISON_INUSE), &rq);
> +		i915_gem_object_unpin_pages(obj);
> +		if (rq) {
> +			dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
> +			i915_gem_object_set_moving_fence(obj, &rq->fence);
> +			i915_request_put(rq);
> +		}
> +		if (err)
> +			continue;
Not needed?
> +	}
> +	if (err)
> +		goto out_put;
> +
> +	if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
> +		igt_make_evictable(&objects);
> +
> +	err = ___igt_mmap_migrate(i915, obj, addr,
> +				  flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
> +	if (!err && obj->mm.region != expected_mr) {
> +		pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
> +		err = -EINVAL;
> +	}
> +
> +out_put:
> +	i915_gem_object_put(obj);
> +	igt_close_objects(i915, &objects);
> +	return err;
> +}
> +
> +static int igt_mmap_migrate(void *arg)
> +{
> +	struct drm_i915_private *i915 = arg;
> +	struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
> +	struct intel_memory_region *mr;
> +	enum intel_region_id id;
> +
> +	for_each_memory_region(mr, i915, id) {
> +		struct intel_memory_region *mixed[] = { mr, system };
> +		struct intel_memory_region *single[] = { mr };
> +		int err;
> +
> +		if (mr->private)
> +			continue;
> +
> +		if (!mr->io_size || mr->io_size == mr->total)
> +			continue;
> +
> +		/*
> +		 * Allocate in the mappable portion, should be no suprises here.
> +		 */
> +		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
> +		if (err)
> +			return err;
> +
> +		/*
> +		 * Allocate in the non-mappable portion, but force migrating to
> +		 * the mappable portion on fault (LMEM -> LMEM)
> +		 */
> +		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
> +					 IGT_MMAP_MIGRATE_TOPDOWN |
> +					 IGT_MMAP_MIGRATE_FILL |
> +					 IGT_MMAP_MIGRATE_EVICTABLE);
> +		if (err)
> +			return err;
> +
> +		/*
> +		 * Allocate in the non-mappable portion, but force spilling into
> +		 * system memory on fault (LMEM -> SMEM)
> +		 */
> +		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
> +					 IGT_MMAP_MIGRATE_TOPDOWN |
> +					 IGT_MMAP_MIGRATE_FILL);
> +		if (err)
> +			return err;
> +
> +		/*
> +		 * Allocate in the non-mappable portion, but since the mappable
> +		 * portion is already full, and we can't spill to system memory,
> +		 * then we should expect the fault to fail.
> +		 */
> +		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
> +					 IGT_MMAP_MIGRATE_TOPDOWN |
> +					 IGT_MMAP_MIGRATE_FILL |
> +					 IGT_MMAP_MIGRATE_UNFAULTABLE);
> +		if (err)
> +			return err;
> +	}
> +
> +	return 0;
> +}
> +
>   static const char *repr_mmap_type(enum i915_mmap_type type)
>   {
>   	switch (type) {
> @@ -1424,6 +1729,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
>   		SUBTEST(igt_smoke_tiling),
>   		SUBTEST(igt_mmap_offset_exhaustion),
>   		SUBTEST(igt_mmap),
> +		SUBTEST(igt_mmap_migrate),
>   		SUBTEST(igt_mmap_access),
>   		SUBTEST(igt_mmap_revoke),
>   		SUBTEST(igt_mmap_gpu),

  reply	other threads:[~2022-02-03  9:01 UTC|newest]

Thread overview: 105+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-26 15:21 [Intel-gfx] [PATCH 00/20] Initial support for small BAR recovery Matthew Auld
2022-01-26 15:21 ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 01/20] drm: improve drm_buddy_alloc function Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-26 18:03   ` [Intel-gfx] " Jani Nikula
2022-01-26 15:21 ` [Intel-gfx] [PATCH 02/20] drm: implement top-down allocation method Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-26 18:42   ` [Intel-gfx] " Robert Beckett
2022-01-26 15:21 ` [Intel-gfx] [PATCH 03/20] drm: implement a method to free unused pages Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 04/20] drm/i915: add io_size plumbing Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-31 15:14   ` [Intel-gfx] " Thomas Hellström
2022-01-31 15:14     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 05/20] drm/i915/ttm: require mappable by default Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 06/20] drm/i915: add I915_BO_ALLOC_TOPDOWN Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-31 15:28   ` [Intel-gfx] " Thomas Hellström
2022-01-31 15:28     ` Thomas Hellström
2022-01-31 15:49     ` [Intel-gfx] " Matthew Auld
2022-01-31 15:49       ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 07/20] drm/i915/buddy: track available visible size Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-31 16:12   ` [Intel-gfx] " Thomas Hellström
2022-01-31 16:12     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 08/20] drm/i915/buddy: adjust res->start Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-01 10:38   ` [Intel-gfx] " Thomas Hellström
2022-02-01 10:38     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 09/20] drm/i915/buddy: tweak 2big check Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-01 10:39   ` [Intel-gfx] " Thomas Hellström
2022-02-01 10:39     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 10/20] drm/i915/selftests: mock test io_size Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-02 10:24   ` [Intel-gfx] " Thomas Hellström
2022-02-02 10:24     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 11/20] drm/i915/ttm: tweak priority hint selection Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-02 13:34   ` [Intel-gfx] " Thomas Hellström
2022-02-02 13:34     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 12/20] drm/i915/ttm: make eviction mappable aware Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-02 13:41   ` [Intel-gfx] " Thomas Hellström
2022-02-02 13:41     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 13/20] drm/i915/ttm: mappable migration on fault Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-03  7:59   ` [Intel-gfx] " Thomas Hellström
2022-02-03  7:59     ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 14/20] drm/i915/selftests: exercise mmap migration Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-03  9:01   ` Thomas Hellström [this message]
2022-02-03  9:01     ` Thomas Hellström
2022-02-03  9:12     ` [Intel-gfx] " Matthew Auld
2022-02-03  9:12       ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 15/20] drm/i915/selftests: handle allocation failures Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-03  9:05   ` [Intel-gfx] " Thomas Hellström
2022-02-03  9:05     ` Thomas Hellström
2022-02-03  9:11     ` [Intel-gfx] " Matthew Auld
2022-02-03  9:11       ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 16/20] drm/i915/create: apply ALLOC_TOPDOWN by default Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-03  9:17   ` [Intel-gfx] " Thomas Hellström
2022-02-03  9:17     ` Thomas Hellström
2022-02-03  9:32     ` [Intel-gfx] " Matthew Auld
2022-02-03  9:32       ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 17/20] drm/i915/uapi: add NEEDS_CPU_ACCESS hint Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-03  9:28   ` [Intel-gfx] " Thomas Hellström
2022-02-03  9:28     ` Thomas Hellström
2022-02-03 11:38     ` [Intel-gfx] " Matthew Auld
2022-02-03 11:38       ` Matthew Auld
2022-02-03 13:29       ` [Intel-gfx] " Thomas Hellström
2022-02-03 13:29         ` Thomas Hellström
2022-01-26 15:21 ` [Intel-gfx] [PATCH 18/20] drm/i915/uapi: forbid ALLOC_TOPDOWN for error capture Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-26 19:42   ` [Intel-gfx] " kernel test robot
2022-01-26 19:42     ` kernel test robot
2022-01-26 20:03   ` kernel test robot
2022-01-26 20:03     ` kernel test robot
2022-01-26 20:03     ` kernel test robot
2022-02-03  9:43   ` Thomas Hellström
2022-02-03  9:43     ` Thomas Hellström
2022-02-03  9:44     ` [Intel-gfx] " Matthew Auld
2022-02-03  9:44       ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 19/20] drm/i915/lmem: don't treat small BAR as an error Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-02-03  9:48   ` [Intel-gfx] " Thomas Hellström
2022-02-03  9:48     ` Thomas Hellström
2022-02-03 11:18     ` [Intel-gfx] " Matthew Auld
2022-02-03 11:18       ` Matthew Auld
2022-02-03 13:56       ` [Intel-gfx] " Thomas Hellström
2022-02-03 13:56         ` Thomas Hellström
2022-02-03 14:09         ` [Intel-gfx] " Matthew Auld
2022-02-03 14:09           ` Matthew Auld
2022-01-26 15:21 ` [Intel-gfx] [PATCH 20/20] HAX: DG1 small BAR Matthew Auld
2022-01-26 15:21   ` Matthew Auld
2022-01-26 21:07 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Initial support for small BAR recovery Patchwork
2022-01-26 21:08 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2022-01-26 21:41 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2022-01-27 16:27 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Initial support for small BAR recovery (rev2) Patchwork
2022-01-27 16:28 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2022-01-27 16:57 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=32bffc9b-38d0-88ab-91cc-91eb8dfcd674@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.