* [PATCH 2/2] drm/panfrost: Add madvise and shrinker support
2019-08-05 14:33 [PATCH 1/2] drm/shmem: Add madvise state and purge helpers Rob Herring
@ 2019-08-05 14:33 ` Rob Herring
2019-08-05 15:52 ` Alyssa Rosenzweig
2019-08-05 16:35 ` [PATCH 1/2] drm/shmem: Add madvise state and purge helpers Daniel Vetter
2019-10-16 19:20 ` Sean Paul
2 siblings, 1 reply; 9+ messages in thread
From: Rob Herring @ 2019-08-05 14:33 UTC (permalink / raw)
To: dri-devel
Cc: Rob Clark, Tomeu Vizoso, David Airlie, Steven Price,
Alyssa Rosenzweig
Add support for madvise and a shrinker similar to other drivers. This
allows userspace to mark BOs which can be freed when there is memory
pressure.
Unlike other implementations, we don't depend on struct_mutex. The
driver maintains a list of BOs which can be freed when the shrinker
is called. Access to the list is serialized with the shrinker_lock.
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Rob Herring <robh@kernel.org>
---
drivers/gpu/drm/panfrost/Makefile | 1 +
drivers/gpu/drm/panfrost/TODO | 3 -
drivers/gpu/drm/panfrost/panfrost_device.h | 4 +
drivers/gpu/drm/panfrost/panfrost_drv.c | 38 +++++++
drivers/gpu/drm/panfrost/panfrost_gem.c | 5 +
drivers/gpu/drm/panfrost/panfrost_gem.h | 3 +
.../gpu/drm/panfrost/panfrost_gem_shrinker.c | 107 ++++++++++++++++++
include/uapi/drm/panfrost_drm.h | 22 ++++
8 files changed, 180 insertions(+), 3 deletions(-)
create mode 100644 drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index ecf0864cb515..b71935862417 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -5,6 +5,7 @@ panfrost-y := \
panfrost_device.o \
panfrost_devfreq.o \
panfrost_gem.o \
+ panfrost_gem_shrinker.o \
panfrost_gpu.o \
panfrost_job.o \
panfrost_mmu.o \
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index c2e44add37d8..f3beca8d4ce9 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -18,10 +18,7 @@
- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
-- Support for madvise and a shrinker.
-
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
- Performance counter support. (Boris)
-
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 83cc01cafde1..9751e8dc9ec6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -84,6 +84,10 @@ struct panfrost_device {
struct mutex sched_lock;
struct mutex reset_lock;
+ struct mutex shrinker_lock;
+ struct list_head shrinker_list;
+ struct shrinker shrinker;
+
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index cb43ff4ebf4a..399007d306ea 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -301,6 +301,38 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
return 0;
}
+static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_madvise *args = data;
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct drm_gem_object *gem_obj;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+
+ if (args->retained) {
+ struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
+
+ mutex_lock(&pfdev->shrinker_lock);
+
+ if (args->madv == PANFROST_MADV_DONTNEED)
+ list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
+ else if (args->madv == PANFROST_MADV_WILLNEED)
+ list_del_init(&bo->base.madv_list);
+
+ mutex_unlock(&pfdev->shrinker_lock);
+ }
+
+ drm_gem_object_put_unlocked(gem_obj);
+ return 0;
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -352,6 +384,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
@@ -400,6 +433,8 @@ static int panfrost_probe(struct platform_device *pdev)
pfdev->ddev = ddev;
spin_lock_init(&pfdev->mm_lock);
+ mutex_init(&pfdev->shrinker_lock);
+ INIT_LIST_HEAD(&pfdev->shrinker_list);
/* 4G enough for now. can be 48-bit */
drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
@@ -430,6 +465,8 @@ static int panfrost_probe(struct platform_device *pdev)
if (err < 0)
goto err_out1;
+ panfrost_gem_shrinker_init(ddev);
+
return 0;
err_out1:
@@ -446,6 +483,7 @@ static int panfrost_remove(struct platform_device *pdev)
struct drm_device *ddev = pfdev->ddev;
drm_dev_unregister(ddev);
+ panfrost_gem_shrinker_cleanup(ddev);
pm_runtime_get_sync(pfdev->dev);
pm_runtime_put_sync_autosuspend(pfdev->dev);
pm_runtime_disable(pfdev->dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 543ab1b81bd5..67d374184340 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -26,6 +26,11 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_mm_remove_node(&bo->node);
spin_unlock(&pfdev->mm_lock);
+ mutex_lock(&pfdev->shrinker_lock);
+ if (!list_empty(&bo->base.madv_list))
+ list_del(&bo->base.madv_list);
+ mutex_unlock(&pfdev->shrinker_lock);
+
drm_gem_shmem_free_object(obj);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 6dbcaba020fc..5f51f881ea3f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -27,4 +27,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
+void panfrost_gem_shrinker_init(struct drm_device *dev);
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
new file mode 100644
index 000000000000..d191632b6197
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd.
+ *
+ * Based on msm_gem_freedreno.c:
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/list.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+static unsigned long
+panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct panfrost_device *pfdev =
+ container_of(shrinker, struct panfrost_device, shrinker);
+ struct drm_gem_shmem_object *shmem;
+ unsigned long count = 0;
+
+ if (!mutex_trylock(&pfdev->shrinker_lock))
+ return 0;
+
+ list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
+ if (drm_gem_shmem_is_purgeable(shmem))
+ count += shmem->base.size >> PAGE_SHIFT;
+ }
+
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ return count;
+}
+
+static void panfrost_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ mutex_lock(&shmem->pages_lock);
+
+ panfrost_mmu_unmap(to_panfrost_bo(obj));
+ drm_gem_shmem_purge_locked(obj);
+
+ mutex_unlock(&shmem->pages_lock);
+}
+
+static unsigned long
+panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct panfrost_device *pfdev =
+ container_of(shrinker, struct panfrost_device, shrinker);
+ struct drm_gem_shmem_object *shmem, *tmp;
+ unsigned long freed = 0;
+
+ if (!mutex_trylock(&pfdev->shrinker_lock))
+ return SHRINK_STOP;
+
+ list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
+ if (freed >= sc->nr_to_scan)
+ break;
+ if (drm_gem_shmem_is_purgeable(shmem)) {
+ panfrost_gem_purge(&shmem->base);
+ freed += shmem->base.size >> PAGE_SHIFT;
+ list_del_init(&shmem->madv_list);
+ }
+ }
+
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ if (freed > 0)
+ pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+ return freed;
+}
+
+/**
+ * panfrost_gem_shrinker_init - Initialize panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function registers and sets up the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_init(struct drm_device *dev)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
+ pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
+ pfdev->shrinker.seeks = DEFAULT_SEEKS;
+ WARN_ON(register_shrinker(&pfdev->shrinker));
+}
+
+/**
+ * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function unregisters the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+
+ if (pfdev->shrinker.nr_deferred) {
+ unregister_shrinker(&pfdev->shrinker);
+ }
+}
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index b5d370638846..1b3ff910ebe7 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -20,6 +20,7 @@ extern "C" {
#define DRM_PANFROST_GET_BO_OFFSET 0x05
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
#define DRM_PANFROST_PERFCNT_DUMP 0x07
+#define DRM_PANFROST_MADVISE 0x08
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -27,6 +28,7 @@ extern "C" {
#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -159,6 +161,26 @@ struct drm_panfrost_perfcnt_dump {
__u64 buf_ptr;
};
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */
+
+struct drm_panfrost_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, PANFROST_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
#if defined(__cplusplus)
}
#endif
--
2.20.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH 1/2] drm/shmem: Add madvise state and purge helpers
2019-08-05 14:33 [PATCH 1/2] drm/shmem: Add madvise state and purge helpers Rob Herring
2019-08-05 14:33 ` [PATCH 2/2] drm/panfrost: Add madvise and shrinker support Rob Herring
@ 2019-08-05 16:35 ` Daniel Vetter
2019-08-05 17:47 ` Rob Clark
2019-10-16 19:20 ` Sean Paul
2 siblings, 1 reply; 9+ messages in thread
From: Daniel Vetter @ 2019-08-05 16:35 UTC (permalink / raw)
To: Rob Herring
Cc: Rob Clark, Tomeu Vizoso, Maxime Ripard, David Airlie, dri-devel,
Steven Price, Alyssa Rosenzweig, Sean Paul
On Mon, Aug 05, 2019 at 08:33:57AM -0600, Rob Herring wrote:
> Add support to the shmem GEM helpers for tracking madvise state and
> purging pages. This is based on the msm implementation.
>
> The BO provides a list_head, but the list management is handled outside
> of the shmem helpers as there are different locking requirements.
>
> Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> Cc: Maxime Ripard <maxime.ripard@bootlin.com>
> Cc: Sean Paul <sean@poorly.run>
> Cc: David Airlie <airlied@linux.ie>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Cc: Eric Anholt <eric@anholt.net>
> Signed-off-by: Rob Herring <robh@kernel.org>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 57 ++++++++++++++++++++++++++
> include/drm/drm_gem_shmem_helper.h | 15 +++++++
> 2 files changed, 72 insertions(+)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 2f64667ac805..4b442576de1c 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
> shmem = to_drm_gem_shmem_obj(obj);
> mutex_init(&shmem->pages_lock);
> mutex_init(&shmem->vmap_lock);
Maybe a bit late, but for reasons (interop with ttm, which will be more
important once we have dynamic dma-buf) it would be real nice to use the
reservation_obj lock for all this stuff. msm, being struct_mutex based,
isn't a great example here. The downside is that it will be a lot harder
to get msm to use these then, but much better to not spread struct_mutex
inspired locking too far.
Other bit: Wire this all up in a shrinker while at it?
-Daniel
> + INIT_LIST_HEAD(&shmem->madv_list);
>
> /*
> * Our buffers are kept pinned, so allocating them
> @@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
> }
> EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
>
> +/* Update madvise status, returns true if not purged, else
> + * false or -errno.
> + */
> +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
> +{
> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> + mutex_lock(&shmem->pages_lock);
> +
> + if (shmem->madv >= 0)
> + shmem->madv = madv;
> +
> + madv = shmem->madv;
> +
> + mutex_unlock(&shmem->pages_lock);
> +
> + return (madv >= 0);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_madvise);
> +
> +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
> +{
> + struct drm_device *dev = obj->dev;
> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> + WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
> +
> + drm_gem_shmem_put_pages_locked(shmem);
> +
> + shmem->madv = -1;
> +
> + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
> + drm_gem_free_mmap_offset(obj);
> +
> + /* Our goal here is to return as much of the memory as
> + * is possible back to the system as we are called from OOM.
> + * To do this we must instruct the shmfs to drop all of its
> + * backing pages, *now*.
> + */
> + shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
> +
> + invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
> + 0, (loff_t)-1);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
> +
> +void drm_gem_shmem_purge(struct drm_gem_object *obj)
> +{
> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> + mutex_lock(&shmem->pages_lock);
> + drm_gem_shmem_purge_locked(obj);
> + mutex_unlock(&shmem->pages_lock);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_purge);
> +
> /**
> * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
> * @file: DRM file structure to create the dumb buffer for
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index 038b6d313447..ce1600fdfc3e 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -44,6 +44,9 @@ struct drm_gem_shmem_object {
> */
> unsigned int pages_use_count;
>
> + int madv;
> + struct list_head madv_list;
> +
> /**
> * @pages_mark_dirty_on_put:
> *
> @@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj);
> void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
> void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
>
> +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
> +
> +static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
> +{
> + return (shmem->madv > 0) &&
> + !shmem->vmap_use_count && shmem->sgt &&
> + !shmem->base.dma_buf && !shmem->base.import_attach;
> +}
> +
> +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
> +void drm_gem_shmem_purge(struct drm_gem_object *obj);
> +
> struct drm_gem_shmem_object *
> drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
> struct drm_device *dev, size_t size,
> --
> 2.20.1
>
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/2] drm/shmem: Add madvise state and purge helpers
2019-08-05 14:33 [PATCH 1/2] drm/shmem: Add madvise state and purge helpers Rob Herring
2019-08-05 14:33 ` [PATCH 2/2] drm/panfrost: Add madvise and shrinker support Rob Herring
2019-08-05 16:35 ` [PATCH 1/2] drm/shmem: Add madvise state and purge helpers Daniel Vetter
@ 2019-10-16 19:20 ` Sean Paul
2 siblings, 0 replies; 9+ messages in thread
From: Sean Paul @ 2019-10-16 19:20 UTC (permalink / raw)
To: Rob Herring
Cc: Rob Clark, Tomeu Vizoso, Maxime Ripard, David Airlie, dri-devel,
Steven Price, Alyssa Rosenzweig, Sean Paul
On Mon, Aug 05, 2019 at 08:33:57AM -0600, Rob Herring wrote:
> Add support to the shmem GEM helpers for tracking madvise state and
> purging pages. This is based on the msm implementation.
>
> The BO provides a list_head, but the list management is handled outside
> of the shmem helpers as there are different locking requirements.
>
> Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> Cc: Maxime Ripard <maxime.ripard@bootlin.com>
> Cc: Sean Paul <sean@poorly.run>
> Cc: David Airlie <airlied@linux.ie>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Cc: Eric Anholt <eric@anholt.net>
> Signed-off-by: Rob Herring <robh@kernel.org>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 57 ++++++++++++++++++++++++++
> include/drm/drm_gem_shmem_helper.h | 15 +++++++
> 2 files changed, 72 insertions(+)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 2f64667ac805..4b442576de1c 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
> shmem = to_drm_gem_shmem_obj(obj);
> mutex_init(&shmem->pages_lock);
> mutex_init(&shmem->vmap_lock);
> + INIT_LIST_HEAD(&shmem->madv_list);
>
> /*
> * Our buffers are kept pinned, so allocating them
> @@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
> }
> EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
>
> +/* Update madvise status, returns true if not purged, else
> + * false or -errno.
> + */
> +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
> +{
> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> + mutex_lock(&shmem->pages_lock);
> +
> + if (shmem->madv >= 0)
> + shmem->madv = madv;
> +
> + madv = shmem->madv;
> +
> + mutex_unlock(&shmem->pages_lock);
> +
> + return (madv >= 0);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_madvise);
> +
> +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
> +{
> + struct drm_device *dev = obj->dev;
> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> + WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
> +
> + drm_gem_shmem_put_pages_locked(shmem);
> +
> + shmem->madv = -1;
> +
> + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
> + drm_gem_free_mmap_offset(obj);
> +
> + /* Our goal here is to return as much of the memory as
> + * is possible back to the system as we are called from OOM.
> + * To do this we must instruct the shmfs to drop all of its
> + * backing pages, *now*.
> + */
> + shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
> +
> + invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
> + 0, (loff_t)-1);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
> +
> +void drm_gem_shmem_purge(struct drm_gem_object *obj)
> +{
> + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> + mutex_lock(&shmem->pages_lock);
> + drm_gem_shmem_purge_locked(obj);
> + mutex_unlock(&shmem->pages_lock);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_purge);
> +
> /**
> * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
> * @file: DRM file structure to create the dumb buffer for
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index 038b6d313447..ce1600fdfc3e 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -44,6 +44,9 @@ struct drm_gem_shmem_object {
> */
> unsigned int pages_use_count;
>
> + int madv;
> + struct list_head madv_list;
Hey Rob,
Not sure why I didn't notice this earlier, but:
../include/drm/drm_gem_shmem_helper.h:87: warning: Function parameter or member 'madv' not described in 'drm_gem_shmem_object'
../include/drm/drm_gem_shmem_helper.h:87: warning: Function parameter or member 'madv_list' not described in 'drm_gem_shmem_object'
Could you please add some kerneldoc for these?
Sean
> +
> /**
> * @pages_mark_dirty_on_put:
> *
> @@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj);
> void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
> void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
>
> +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
> +
> +static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
> +{
> + return (shmem->madv > 0) &&
> + !shmem->vmap_use_count && shmem->sgt &&
> + !shmem->base.dma_buf && !shmem->base.import_attach;
> +}
> +
> +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
> +void drm_gem_shmem_purge(struct drm_gem_object *obj);
> +
> struct drm_gem_shmem_object *
> drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
> struct drm_device *dev, size_t size,
> --
> 2.20.1
>
--
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 9+ messages in thread