From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
To: "David Airlie" <airlied@gmail.com>,
"Gerd Hoffmann" <kraxel@redhat.com>,
"Gurchetan Singh" <gurchetansingh@chromium.org>,
"Chia-I Wu" <olvaffe@gmail.com>,
"Daniel Vetter" <daniel@ffwll.ch>,
"Maarten Lankhorst" <maarten.lankhorst@linux.intel.com>,
"Maxime Ripard" <mripard@kernel.org>,
"Thomas Zimmermann" <tzimmermann@suse.de>,
"Christian König" <christian.koenig@amd.com>,
"Qiang Yu" <yuq825@gmail.com>,
"Steven Price" <steven.price@arm.com>,
"Boris Brezillon" <boris.brezillon@collabora.com>,
"Emma Anholt" <emma@anholt.net>, "Melissa Wen" <mwen@igalia.com>,
"Will Deacon" <will@kernel.org>,
"Peter Zijlstra" <peterz@infradead.org>,
"Boqun Feng" <boqun.feng@gmail.com>,
"Mark Rutland" <mark.rutland@arm.com>
Cc: intel-gfx@lists.freedesktop.org, kernel@collabora.com,
linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
virtualization@lists.linux-foundation.org
Subject: [Intel-gfx] [PATCH v15 13/23] drm/shmem-helper: Use kref for pages_use_count
Date: Sun, 27 Aug 2023 20:54:39 +0300 [thread overview]
Message-ID: <20230827175449.1766701-14-dmitry.osipenko@collabora.com> (raw)
In-Reply-To: <20230827175449.1766701-1-dmitry.osipenko@collabora.com>
Use atomic kref helper for pages_use_count to optimize pin/unpin functions
by skipping reservation locking while GEM's pin refcount > 1.
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 48 ++++++++++++++-----------
drivers/gpu/drm/lima/lima_gem.c | 2 +-
drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +-
include/drm/drm_gem_shmem_helper.h | 2 +-
4 files changed, 30 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 1a7e5c332fd8..5a2e37b3e51d 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -155,7 +155,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
if (shmem->got_sgt)
drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, kref_read(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
}
@@ -172,14 +172,13 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (kref_get_unless_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -195,26 +194,20 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ kref_init(&shmem->pages_use_count);
+
return 0;
}
-/*
- * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
- * @shmem: shmem GEM object
- *
- * This function decreases the use count and puts the backing pages when use drops to zero.
- */
-void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
-{
- struct drm_gem_object *obj = &shmem->base;
-
- dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
+static void drm_gem_shmem_kref_release_pages(struct kref *kref)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
- if (--shmem->pages_use_count > 0)
- return;
+ shmem = container_of(kref, struct drm_gem_shmem_object,
+ pages_use_count);
+ obj = &shmem->base;
#ifdef CONFIG_X86
if (shmem->map_wc)
@@ -226,6 +219,19 @@ void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages_mark_accessed_on_put);
shmem->pages = NULL;
}
+
+/*
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ dma_resv_assert_held(shmem->base.resv);
+
+ kref_put(&shmem->pages_use_count, drm_gem_shmem_kref_release_pages);
+}
EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
@@ -556,8 +562,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !kref_get_unless_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -638,7 +644,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
if (shmem->base.import_attach)
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", kref_read(&shmem->pages_use_count));
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 7d74c71f5558..a5f015d188cd 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ kref_init(&bo->base.pages_use_count);
mapping_set_unevictable(mapping);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 7771769f0ce0..c9ac9d361864 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -487,7 +487,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_unlock;
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ kref_init(&bo->base.pages_use_count);
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index afb7cd671e2a..a5a3c193cc8f 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -37,7 +37,7 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ struct kref pages_use_count;
/**
* @pages_pin_count:
--
2.41.0
WARNING: multiple messages have this Message-ID (diff)
From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
To: "David Airlie" <airlied@gmail.com>,
"Gerd Hoffmann" <kraxel@redhat.com>,
"Gurchetan Singh" <gurchetansingh@chromium.org>,
"Chia-I Wu" <olvaffe@gmail.com>,
"Daniel Vetter" <daniel@ffwll.ch>,
"Maarten Lankhorst" <maarten.lankhorst@linux.intel.com>,
"Maxime Ripard" <mripard@kernel.org>,
"Thomas Zimmermann" <tzimmermann@suse.de>,
"Christian König" <christian.koenig@amd.com>,
"Qiang Yu" <yuq825@gmail.com>,
"Steven Price" <steven.price@arm.com>,
"Boris Brezillon" <boris.brezillon@collabora.com>,
"Emma Anholt" <emma@anholt.net>, "Melissa Wen" <mwen@igalia.com>,
"Will Deacon" <will@kernel.org>,
"Peter Zijlstra" <peterz@infradead.org>,
"Boqun Feng" <boqun.feng@gmail.com>,
"Mark Rutland" <mark.rutland@arm.com>
Cc: intel-gfx@lists.freedesktop.org, kernel@collabora.com,
linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
virtualization@lists.linux-foundation.org
Subject: [PATCH v15 13/23] drm/shmem-helper: Use kref for pages_use_count
Date: Sun, 27 Aug 2023 20:54:39 +0300 [thread overview]
Message-ID: <20230827175449.1766701-14-dmitry.osipenko@collabora.com> (raw)
In-Reply-To: <20230827175449.1766701-1-dmitry.osipenko@collabora.com>
Use atomic kref helper for pages_use_count to optimize pin/unpin functions
by skipping reservation locking while GEM's pin refcount > 1.
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 48 ++++++++++++++-----------
drivers/gpu/drm/lima/lima_gem.c | 2 +-
drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +-
include/drm/drm_gem_shmem_helper.h | 2 +-
4 files changed, 30 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 1a7e5c332fd8..5a2e37b3e51d 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -155,7 +155,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
if (shmem->got_sgt)
drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, kref_read(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
}
@@ -172,14 +172,13 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (kref_get_unless_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -195,26 +194,20 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ kref_init(&shmem->pages_use_count);
+
return 0;
}
-/*
- * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
- * @shmem: shmem GEM object
- *
- * This function decreases the use count and puts the backing pages when use drops to zero.
- */
-void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
-{
- struct drm_gem_object *obj = &shmem->base;
-
- dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
+static void drm_gem_shmem_kref_release_pages(struct kref *kref)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
- if (--shmem->pages_use_count > 0)
- return;
+ shmem = container_of(kref, struct drm_gem_shmem_object,
+ pages_use_count);
+ obj = &shmem->base;
#ifdef CONFIG_X86
if (shmem->map_wc)
@@ -226,6 +219,19 @@ void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages_mark_accessed_on_put);
shmem->pages = NULL;
}
+
+/*
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ dma_resv_assert_held(shmem->base.resv);
+
+ kref_put(&shmem->pages_use_count, drm_gem_shmem_kref_release_pages);
+}
EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
@@ -556,8 +562,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !kref_get_unless_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -638,7 +644,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
if (shmem->base.import_attach)
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", kref_read(&shmem->pages_use_count));
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 7d74c71f5558..a5f015d188cd 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ kref_init(&bo->base.pages_use_count);
mapping_set_unevictable(mapping);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 7771769f0ce0..c9ac9d361864 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -487,7 +487,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_unlock;
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ kref_init(&bo->base.pages_use_count);
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index afb7cd671e2a..a5a3c193cc8f 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -37,7 +37,7 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ struct kref pages_use_count;
/**
* @pages_pin_count:
--
2.41.0
next prev parent reply other threads:[~2023-08-27 17:56 UTC|newest]
Thread overview: 112+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-27 17:54 [Intel-gfx] [PATCH v15 00/23] Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 01/23] drm/shmem-helper: Fix UAF in error path when freeing SGT of imported GEM Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 11:16 ` [Intel-gfx] " Boris Brezillon
2023-08-28 11:16 ` Boris Brezillon
2023-09-02 18:15 ` [Intel-gfx] " Dmitry Osipenko
2023-09-02 18:15 ` Dmitry Osipenko
2023-09-04 8:01 ` [Intel-gfx] " Boris Brezillon
2023-09-04 8:01 ` Boris Brezillon
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 02/23] drm/shmem-helper: Use flag for tracking page count bumped by get_pages_sgt() Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 10:55 ` [Intel-gfx] " Boris Brezillon
2023-08-28 10:55 ` Boris Brezillon
2023-09-02 18:28 ` [Intel-gfx] " Dmitry Osipenko
2023-09-02 18:28 ` Dmitry Osipenko
2023-09-04 7:52 ` [Intel-gfx] " Boris Brezillon
2023-09-04 7:52 ` Boris Brezillon
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 03/23] drm/gem: Change locked/unlocked postfix of drm_gem_v/unmap() function names Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 11:25 ` [Intel-gfx] " Boris Brezillon
2023-08-28 11:25 ` Boris Brezillon
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 04/23] drm/gem: Add _locked postfix to functions that have unlocked counterpart Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 11:25 ` [Intel-gfx] " Boris Brezillon
2023-08-28 11:25 ` Boris Brezillon
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 05/23] drm/v3d: Replace open-coded drm_gem_shmem_free() with drm_gem_object_put() Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 06/23] drm/virtio: Replace " Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 07/23] drm/shmem-helper: Make all exported symbols GPL Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 08/23] drm/shmem-helper: Refactor locked/unlocked functions Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 11:28 ` [Intel-gfx] " Boris Brezillon
2023-08-28 11:28 ` Boris Brezillon
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 09/23] drm/shmem-helper: Remove obsoleted is_iomem test Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 11:29 ` [Intel-gfx] " Boris Brezillon
2023-08-28 11:29 ` Boris Brezillon
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 10/23] locking/refcount, kref: Add kref_put_ww_mutex() Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 9:26 ` [Intel-gfx] " Boris Brezillon
2023-08-28 9:26 ` Boris Brezillon
2023-08-29 2:28 ` [Intel-gfx] " Dmitry Osipenko
2023-08-29 2:28 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 11/23] dma-resv: Add kref_put_dma_resv() Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 10:21 ` [Intel-gfx] " Christian König
2023-08-28 10:21 ` Christian König
2023-08-28 10:21 ` Christian König via Virtualization
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 12/23] drm/shmem-helper: Add and use pages_pin_count Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 9:38 ` [Intel-gfx] " Boris Brezillon
2023-08-28 9:38 ` Boris Brezillon
2023-08-28 11:46 ` [Intel-gfx] " Boris Brezillon
2023-08-28 11:46 ` Boris Brezillon
2023-08-29 2:30 ` [Intel-gfx] " Dmitry Osipenko
2023-08-29 2:30 ` Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko [this message]
2023-08-27 17:54 ` [PATCH v15 13/23] drm/shmem-helper: Use kref for pages_use_count Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 14/23] drm/shmem-helper: Add and use lockless drm_gem_shmem_get_pages() Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 15/23] drm/shmem-helper: Switch drm_gem_shmem_vmap/vunmap to use pin/unpin Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 16/23] drm/shmem-helper: Use kref for vmap_use_count Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 10:00 ` [Intel-gfx] " Boris Brezillon
2023-08-28 10:00 ` Boris Brezillon
2023-09-02 20:22 ` [Intel-gfx] " Dmitry Osipenko
2023-09-02 20:22 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 17/23] drm/shmem-helper: Add and use drm_gem_shmem_resv_assert_held() helper Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-28 10:12 ` [Intel-gfx] " Boris Brezillon
2023-08-28 10:12 ` Boris Brezillon
2023-08-29 2:34 ` [Intel-gfx] " Dmitry Osipenko
2023-08-29 2:34 ` Dmitry Osipenko
2023-08-29 7:29 ` [Intel-gfx] " Boris Brezillon
2023-08-29 7:29 ` Boris Brezillon
2023-08-29 8:52 ` [Intel-gfx] " Christian König
2023-08-29 8:52 ` Christian König
2023-08-29 8:52 ` Christian König via Virtualization
2023-08-29 9:44 ` [Intel-gfx] " Boris Brezillon
2023-08-29 9:44 ` Boris Brezillon
2023-08-29 10:21 ` [Intel-gfx] " Boris Brezillon
2023-08-29 10:21 ` Boris Brezillon
2023-09-02 19:43 ` [Intel-gfx] " Dmitry Osipenko
2023-09-02 19:43 ` Dmitry Osipenko
2023-09-04 8:36 ` [Intel-gfx] " Boris Brezillon
2023-09-04 8:36 ` Boris Brezillon
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 18/23] drm/shmem-helper: Add memory shrinker Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 19/23] drm/shmem-helper: Export drm_gem_shmem_get_pages_sgt_locked() Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 20/23] drm/virtio: Pin display framebuffer BO Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 21/23] drm/virtio: Attach shmem BOs dynamically Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 22/23] drm/virtio: Support memory shrinking Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 17:54 ` [Intel-gfx] [PATCH v15 23/23] drm/panfrost: Switch to generic memory shrinker Dmitry Osipenko
2023-08-27 17:54 ` Dmitry Osipenko
2023-08-27 18:44 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers (rev3) Patchwork
2023-08-27 18:44 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2023-08-27 19:01 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2023-08-27 20:23 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
2023-08-28 14:37 ` [Intel-gfx] [PATCH v15 00/23] Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers Helen Mae Koike Fornazier
2023-08-28 14:37 ` Helen Mae Koike Fornazier
2023-08-28 15:24 ` [Intel-gfx] " Helen Mae Koike Fornazier
2023-08-28 15:24 ` Helen Mae Koike Fornazier
2023-08-29 2:36 ` [Intel-gfx] " Dmitry Osipenko
2023-08-29 2:36 ` Dmitry Osipenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230827175449.1766701-14-dmitry.osipenko@collabora.com \
--to=dmitry.osipenko@collabora.com \
--cc=airlied@gmail.com \
--cc=boqun.feng@gmail.com \
--cc=boris.brezillon@collabora.com \
--cc=christian.koenig@amd.com \
--cc=daniel@ffwll.ch \
--cc=dri-devel@lists.freedesktop.org \
--cc=emma@anholt.net \
--cc=gurchetansingh@chromium.org \
--cc=intel-gfx@lists.freedesktop.org \
--cc=kernel@collabora.com \
--cc=kraxel@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=maarten.lankhorst@linux.intel.com \
--cc=mark.rutland@arm.com \
--cc=mripard@kernel.org \
--cc=mwen@igalia.com \
--cc=olvaffe@gmail.com \
--cc=peterz@infradead.org \
--cc=steven.price@arm.com \
--cc=tzimmermann@suse.de \
--cc=virtualization@lists.linux-foundation.org \
--cc=will@kernel.org \
--cc=yuq825@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.