* [PATCH v2 1/4] fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info
2026-02-24 8:25 [PATCH v2 0/4] fbdev: defio: Protect against device/module removal Thomas Zimmermann
@ 2026-02-24 8:25 ` Thomas Zimmermann
2026-02-24 8:25 ` [PATCH v2 2/4] fbdev: defio: Keep module reference from VMAs Thomas Zimmermann
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Thomas Zimmermann @ 2026-02-24 8:25 UTC (permalink / raw)
To: deller, simona, jayalk; +Cc: linux-fbdev, dri-devel, Thomas Zimmermann, stable
Hold state of deferred I/O in struct fb_deferred_io_state. Allocate an
instance as part of initializing deferred I/O and remove it only after
the final mapping has been closed. If the fb_info and the contained
deferred I/O meanwhile goes away, clear struct fb_deferred_io_state.info
to invalidate the mapping. Any access will then result in a SIGBUS
signal.
Fixes a long-standing problem, where a device hot-unplug happens while
user space still has an active mapping of the graphics memory. The hot-
unplug frees the instance of struct fb_info. Accessing the memory will
operate on undefined state.
v2:
- use kzalloc_obj()
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Fixes: 60b59beafba8 ("fbdev: mm: Deferred IO support")
Cc: Helge Deller <deller@gmx.de>
Cc: linux-fbdev@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: <stable@vger.kernel.org> # v2.6.22+
---
drivers/video/fbdev/core/fb_defio.c | 178 ++++++++++++++++++++++------
include/linux/fb.h | 4 +-
2 files changed, 145 insertions(+), 37 deletions(-)
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index ca48b89a323d..93bd2f696fa4 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -24,6 +24,75 @@
#include <linux/rmap.h>
#include <linux/pagemap.h>
+/*
+ * struct fb_deferred_io_state
+ */
+
+struct fb_deferred_io_state {
+ struct kref ref;
+
+ struct mutex lock; /* mutex that protects the pageref list */
+ /* fields protected by lock */
+ struct fb_info *info;
+};
+
+static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
+{
+ struct fb_deferred_io_state *fbdefio_state;
+
+ fbdefio_state = kzalloc_obj(*fbdefio_state);
+ if (!fbdefio_state)
+ return NULL;
+
+ kref_init(&fbdefio_state->ref);
+ mutex_init(&fbdefio_state->lock);
+
+ return fbdefio_state;
+}
+
+static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
+{
+ mutex_destroy(&fbdefio_state->lock);
+
+ kfree(fbdefio_state);
+}
+
+static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state)
+{
+ kref_get(&fbdefio_state->ref);
+}
+
+static void __fb_deferred_io_state_release(struct kref *ref)
+{
+ struct fb_deferred_io_state *fbdefio_state =
+ container_of(ref, struct fb_deferred_io_state, ref);
+
+ fb_deferred_io_state_release(fbdefio_state);
+}
+
+static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state)
+{
+ kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
+}
+
+/*
+ * struct vm_operations_struct
+ */
+
+static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
+{
+ struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
+
+ fb_deferred_io_state_get(fbdefio_state);
+}
+
+static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
+{
+ struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
+
+ fb_deferred_io_state_put(fbdefio_state);
+}
+
static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
@@ -121,25 +190,46 @@ static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
/* this is to find and return the vmalloc-ed fb pages */
static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
{
+ struct fb_info *info;
unsigned long offset;
struct page *page;
- struct fb_info *info = vmf->vma->vm_private_data;
+ vm_fault_t ret;
+ struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
+
+ mutex_lock(&fbdefio_state->lock);
+
+ info = fbdefio_state->info;
+ if (!info) {
+ ret = VM_FAULT_SIGBUS; /* our device is gone */
+ goto err_mutex_unlock;
+ }
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= info->fix.smem_len)
- return VM_FAULT_SIGBUS;
+ if (offset >= info->fix.smem_len) {
+ ret = VM_FAULT_SIGBUS;
+ goto err_mutex_unlock;
+ }
page = fb_deferred_io_get_page(info, offset);
- if (!page)
- return VM_FAULT_SIGBUS;
+ if (!page) {
+ ret = VM_FAULT_SIGBUS;
+ goto err_mutex_unlock;
+ }
if (!vmf->vma->vm_file)
fb_err(info, "no mapping available\n");
BUG_ON(!info->fbdefio->mapping);
+ mutex_unlock(&fbdefio_state->lock);
+
vmf->page = page;
+
return 0;
+
+err_mutex_unlock:
+ mutex_unlock(&fbdefio_state->lock);
+ return ret;
}
int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
@@ -166,15 +256,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
* Adds a page to the dirty list. Call this from struct
* vm_operations_struct.page_mkwrite.
*/
-static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
- struct page *page)
+static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state,
+ unsigned long offset, struct page *page)
{
- struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct fb_info *info;
+ struct fb_deferred_io *fbdefio;
struct fb_deferred_io_pageref *pageref;
vm_fault_t ret;
/* protect against the workqueue changing the page list */
- mutex_lock(&fbdefio->lock);
+ mutex_lock(&fbdefio_state->lock);
+
+ info = fbdefio_state->info;
+ if (!info) {
+ ret = VM_FAULT_SIGBUS; /* our device is gone */
+ goto err_mutex_unlock;
+ }
+
+ fbdefio = info->fbdefio;
pageref = fb_deferred_io_pageref_get(info, offset, page);
if (WARN_ON_ONCE(!pageref)) {
@@ -192,50 +291,38 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
*/
lock_page(pageref->page);
- mutex_unlock(&fbdefio->lock);
+ mutex_unlock(&fbdefio_state->lock);
/* come back after delay to process the deferred IO */
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
return VM_FAULT_LOCKED;
err_mutex_unlock:
- mutex_unlock(&fbdefio->lock);
+ mutex_unlock(&fbdefio_state->lock);
return ret;
}
-/*
- * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
- * @fb_info: The fbdev info structure
- * @vmf: The VM fault
- *
- * This is a callback we get when userspace first tries to
- * write to the page. We schedule a workqueue. That workqueue
- * will eventually mkclean the touched pages and execute the
- * deferred framebuffer IO. Then if userspace touches a page
- * again, we repeat the same scheme.
- *
- * Returns:
- * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
- */
-static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
+static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state,
+ struct vm_fault *vmf)
{
unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);
- return fb_deferred_io_track_page(info, offset, page);
+ return fb_deferred_io_track_page(fbdefio_state, offset, page);
}
-/* vm_ops->page_mkwrite handler */
static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
{
- struct fb_info *info = vmf->vma->vm_private_data;
+ struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
- return fb_deferred_io_page_mkwrite(info, vmf);
+ return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
}
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
+ .open = fb_deferred_io_vm_open,
+ .close = fb_deferred_io_vm_close,
.fault = fb_deferred_io_fault,
.page_mkwrite = fb_deferred_io_mkwrite,
};
@@ -252,7 +339,10 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
vm_flags_set(vma, VM_IO);
- vma->vm_private_data = info;
+ vma->vm_private_data = info->fbdefio_state;
+
+ fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */
+
return 0;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
@@ -263,9 +353,10 @@ static void fb_deferred_io_work(struct work_struct *work)
struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
struct fb_deferred_io_pageref *pageref, *next;
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
/* here we wrprotect the page's mappings, then do all deferred IO. */
- mutex_lock(&fbdefio->lock);
+ mutex_lock(&fbdefio_state->lock);
#ifdef CONFIG_MMU
list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
struct page *page = pageref->page;
@@ -283,12 +374,13 @@ static void fb_deferred_io_work(struct work_struct *work)
list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
fb_deferred_io_pageref_put(pageref, info);
- mutex_unlock(&fbdefio->lock);
+ mutex_unlock(&fbdefio_state->lock);
}
int fb_deferred_io_init(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct fb_deferred_io_state *fbdefio_state;
struct fb_deferred_io_pageref *pagerefs;
unsigned long npagerefs;
int ret;
@@ -298,7 +390,11 @@ int fb_deferred_io_init(struct fb_info *info)
if (WARN_ON(!info->fix.smem_len))
return -EINVAL;
- mutex_init(&fbdefio->lock);
+ fbdefio_state = fb_deferred_io_state_alloc();
+ if (!fbdefio_state)
+ return -ENOMEM;
+ fbdefio_state->info = info;
+
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
INIT_LIST_HEAD(&fbdefio->pagereflist);
if (fbdefio->delay == 0) /* set a default of 1 s */
@@ -315,10 +411,12 @@ int fb_deferred_io_init(struct fb_info *info)
info->npagerefs = npagerefs;
info->pagerefs = pagerefs;
+ info->fbdefio_state = fbdefio_state;
+
return 0;
err:
- mutex_destroy(&fbdefio->lock);
+ fb_deferred_io_state_release(fbdefio_state);
return ret;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
@@ -352,11 +450,19 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
fb_deferred_io_lastclose(info);
+ info->fbdefio_state = NULL;
+
+ mutex_lock(&fbdefio_state->lock);
+ fbdefio_state->info = NULL;
+ mutex_unlock(&fbdefio_state->lock);
+
+ fb_deferred_io_state_put(fbdefio_state);
+
kvfree(info->pagerefs);
- mutex_destroy(&fbdefio->lock);
fbdefio->mapping = NULL;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 6d4a58084fd5..aed17567fe50 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -218,13 +218,14 @@ struct fb_deferred_io {
unsigned long delay;
bool sort_pagereflist; /* sort pagelist by offset */
int open_count; /* number of opened files; protected by fb_info lock */
- struct mutex lock; /* mutex that protects the pageref list */
struct list_head pagereflist; /* list of pagerefs for touched pages */
struct address_space *mapping; /* page cache object for fb device */
/* callback */
struct page *(*get_page)(struct fb_info *info, unsigned long offset);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
+
+struct fb_deferred_io_state;
#endif
/*
@@ -487,6 +488,7 @@ struct fb_info {
unsigned long npagerefs;
struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
+ struct fb_deferred_io_state *fbdefio_state;
#endif
const struct fb_ops *fbops;
--
2.52.0
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH v2 2/4] fbdev: defio: Keep module reference from VMAs
2026-02-24 8:25 [PATCH v2 0/4] fbdev: defio: Protect against device/module removal Thomas Zimmermann
2026-02-24 8:25 ` [PATCH v2 1/4] fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info Thomas Zimmermann
@ 2026-02-24 8:25 ` Thomas Zimmermann
2026-02-24 8:25 ` [PATCH v2 3/4] fbdev: defio: Move variable state into struct fb_deferred_io_state Thomas Zimmermann
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Thomas Zimmermann @ 2026-02-24 8:25 UTC (permalink / raw)
To: deller, simona, jayalk; +Cc: linux-fbdev, dri-devel, Thomas Zimmermann
Acquire a module reference on each mmap and VMA open; hold it until
the kernel closes the VMA. Protects against unloading the module
while user space still has a mapping of the graphics memory. The
VMA page-fault handling would then call into undefined code.
This situation can happen if the underlying device has been unplugged
and the driver has been unloaded. It would then be possible to trigger
the bug by unloading the fbdev core module.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
drivers/video/fbdev/core/fb_defio.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 93bd2f696fa4..56030eb42f71 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -83,6 +84,7 @@ static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
{
struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
+ WARN_ON_ONCE(!try_module_get(THIS_MODULE));
fb_deferred_io_state_get(fbdefio_state);
}
@@ -91,6 +93,7 @@ static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
fb_deferred_io_state_put(fbdefio_state);
+ module_put(THIS_MODULE);
}
static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
@@ -335,6 +338,9 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
vma->vm_ops = &fb_deferred_io_vm_ops;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
--
2.52.0
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH v2 3/4] fbdev: defio: Move variable state into struct fb_deferred_io_state
2026-02-24 8:25 [PATCH v2 0/4] fbdev: defio: Protect against device/module removal Thomas Zimmermann
2026-02-24 8:25 ` [PATCH v2 1/4] fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info Thomas Zimmermann
2026-02-24 8:25 ` [PATCH v2 2/4] fbdev: defio: Keep module reference from VMAs Thomas Zimmermann
@ 2026-02-24 8:25 ` Thomas Zimmermann
2026-02-24 8:25 ` [PATCH v2 4/4] fbdev: defio: Move pageref array to " Thomas Zimmermann
2026-02-24 22:02 ` [PATCH v2 0/4] fbdev: defio: Protect against device/module removal Helge Deller
4 siblings, 0 replies; 6+ messages in thread
From: Thomas Zimmermann @ 2026-02-24 8:25 UTC (permalink / raw)
To: deller, simona, jayalk; +Cc: linux-fbdev, dri-devel, Thomas Zimmermann
Move variable fields from struct fb_deferred_io into struct
fb_deferred_io_state. These fields are internal to the defio code
and should not be exposed to drivers. At some later point, struct
fb_defered_io might become const in all defio code.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
drivers/video/fbdev/core/fb_defio.c | 37 +++++++++++++++++------------
include/linux/fb.h | 3 ---
2 files changed, 22 insertions(+), 18 deletions(-)
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 56030eb42f71..35ac13727da1 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -25,6 +25,8 @@
#include <linux/rmap.h>
#include <linux/pagemap.h>
+struct address_space;
+
/*
* struct fb_deferred_io_state
*/
@@ -32,9 +34,13 @@
struct fb_deferred_io_state {
struct kref ref;
+ int open_count; /* number of opened files; protected by fb_info lock */
+ struct address_space *mapping; /* page cache object for fb device */
+
struct mutex lock; /* mutex that protects the pageref list */
/* fields protected by lock */
struct fb_info *info;
+ struct list_head pagereflist; /* list of pagerefs for touched pages */
};
static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
@@ -48,11 +54,14 @@ static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
kref_init(&fbdefio_state->ref);
mutex_init(&fbdefio_state->lock);
+ INIT_LIST_HEAD(&fbdefio_state->pagereflist);
+
return fbdefio_state;
}
static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
{
+ WARN_ON(!list_empty(&fbdefio_state->pagereflist));
mutex_destroy(&fbdefio_state->lock);
kfree(fbdefio_state);
@@ -147,7 +156,8 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info
struct page *page)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
- struct list_head *pos = &fbdefio->pagereflist;
+ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
+ struct list_head *pos = &fbdefio_state->pagereflist;
struct fb_deferred_io_pageref *pageref, *cur;
pageref = fb_deferred_io_pageref_lookup(info, offset, page);
@@ -171,7 +181,7 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info
* pages. If possible, drivers should try to work with
* unsorted page lists instead.
*/
- list_for_each_entry(cur, &fbdefio->pagereflist, list) {
+ list_for_each_entry(cur, &fbdefio_state->pagereflist, list) {
if (cur->offset > pageref->offset)
break;
}
@@ -222,7 +232,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
if (!vmf->vma->vm_file)
fb_err(info, "no mapping available\n");
- BUG_ON(!info->fbdefio->mapping);
+ fb_WARN_ON_ONCE(info, !fbdefio_state->mapping);
mutex_unlock(&fbdefio_state->lock);
@@ -364,20 +374,20 @@ static void fb_deferred_io_work(struct work_struct *work)
/* here we wrprotect the page's mappings, then do all deferred IO. */
mutex_lock(&fbdefio_state->lock);
#ifdef CONFIG_MMU
- list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
+ list_for_each_entry(pageref, &fbdefio_state->pagereflist, list) {
struct page *page = pageref->page;
pgoff_t pgoff = pageref->offset >> PAGE_SHIFT;
- mapping_wrprotect_range(fbdefio->mapping, pgoff,
+ mapping_wrprotect_range(fbdefio_state->mapping, pgoff,
page_to_pfn(page), 1);
}
#endif
/* driver's callback with pagereflist */
- fbdefio->deferred_io(info, &fbdefio->pagereflist);
+ fbdefio->deferred_io(info, &fbdefio_state->pagereflist);
/* clear the list */
- list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
+ list_for_each_entry_safe(pageref, next, &fbdefio_state->pagereflist, list)
fb_deferred_io_pageref_put(pageref, info);
mutex_unlock(&fbdefio_state->lock);
@@ -402,7 +412,6 @@ int fb_deferred_io_init(struct fb_info *info)
fbdefio_state->info = info;
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
- INIT_LIST_HEAD(&fbdefio->pagereflist);
if (fbdefio->delay == 0) /* set a default of 1 s */
fbdefio->delay = HZ;
@@ -431,11 +440,11 @@ void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file)
{
- struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
- fbdefio->mapping = file->f_mapping;
+ fbdefio_state->mapping = file->f_mapping;
file->f_mapping->a_ops = &fb_deferred_io_aops;
- fbdefio->open_count++;
+ fbdefio_state->open_count++;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
@@ -446,16 +455,15 @@ static void fb_deferred_io_lastclose(struct fb_info *info)
void fb_deferred_io_release(struct fb_info *info)
{
- struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
- if (!--fbdefio->open_count)
+ if (!--fbdefio_state->open_count)
fb_deferred_io_lastclose(info);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info)
{
- struct fb_deferred_io *fbdefio = info->fbdefio;
struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
fb_deferred_io_lastclose(info);
@@ -469,6 +477,5 @@ void fb_deferred_io_cleanup(struct fb_info *info)
fb_deferred_io_state_put(fbdefio_state);
kvfree(info->pagerefs);
- fbdefio->mapping = NULL;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index aed17567fe50..2791777f3a50 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -217,9 +217,6 @@ struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
bool sort_pagereflist; /* sort pagelist by offset */
- int open_count; /* number of opened files; protected by fb_info lock */
- struct list_head pagereflist; /* list of pagerefs for touched pages */
- struct address_space *mapping; /* page cache object for fb device */
/* callback */
struct page *(*get_page)(struct fb_info *info, unsigned long offset);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
--
2.52.0
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH v2 4/4] fbdev: defio: Move pageref array to struct fb_deferred_io_state
2026-02-24 8:25 [PATCH v2 0/4] fbdev: defio: Protect against device/module removal Thomas Zimmermann
` (2 preceding siblings ...)
2026-02-24 8:25 ` [PATCH v2 3/4] fbdev: defio: Move variable state into struct fb_deferred_io_state Thomas Zimmermann
@ 2026-02-24 8:25 ` Thomas Zimmermann
2026-02-24 22:02 ` [PATCH v2 0/4] fbdev: defio: Protect against device/module removal Helge Deller
4 siblings, 0 replies; 6+ messages in thread
From: Thomas Zimmermann @ 2026-02-24 8:25 UTC (permalink / raw)
To: deller, simona, jayalk; +Cc: linux-fbdev, dri-devel, Thomas Zimmermann
The pageref array stores all pageref structures for a device's defio
helpers. Move it into struct fb_deferred_io_state to not expose it to
drivers.
v2:
- use kvzalloc_objs()
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
drivers/video/fbdev/core/fb_defio.c | 55 ++++++++++++++---------------
include/linux/fb.h | 2 --
2 files changed, 27 insertions(+), 30 deletions(-)
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 35ac13727da1..a12dd25ab697 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -41,28 +41,46 @@ struct fb_deferred_io_state {
/* fields protected by lock */
struct fb_info *info;
struct list_head pagereflist; /* list of pagerefs for touched pages */
+ unsigned long npagerefs;
+ struct fb_deferred_io_pageref *pagerefs;
};
-static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
+static struct fb_deferred_io_state *fb_deferred_io_state_alloc(unsigned long len)
{
struct fb_deferred_io_state *fbdefio_state;
+ struct fb_deferred_io_pageref *pagerefs;
+ unsigned long npagerefs;
fbdefio_state = kzalloc_obj(*fbdefio_state);
if (!fbdefio_state)
return NULL;
+ npagerefs = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ /* alloc a page ref for each page of the display memory */
+ pagerefs = kvzalloc_objs(*pagerefs, npagerefs);
+ if (!pagerefs)
+ goto err_kfree;
+ fbdefio_state->npagerefs = npagerefs;
+ fbdefio_state->pagerefs = pagerefs;
+
kref_init(&fbdefio_state->ref);
mutex_init(&fbdefio_state->lock);
INIT_LIST_HEAD(&fbdefio_state->pagereflist);
return fbdefio_state;
+
+err_kfree:
+ kfree(fbdefio_state);
+ return NULL;
}
static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
{
WARN_ON(!list_empty(&fbdefio_state->pagereflist));
mutex_destroy(&fbdefio_state->lock);
+ kvfree(fbdefio_state->pagerefs);
kfree(fbdefio_state);
}
@@ -125,18 +143,19 @@ static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long
return page;
}
-static struct fb_deferred_io_pageref *fb_deferred_io_pageref_lookup(struct fb_info *info,
- unsigned long offset,
- struct page *page)
+static struct fb_deferred_io_pageref *
+fb_deferred_io_pageref_lookup(struct fb_deferred_io_state *fbdefio_state, unsigned long offset,
+ struct page *page)
{
+ struct fb_info *info = fbdefio_state->info;
unsigned long pgoff = offset >> PAGE_SHIFT;
struct fb_deferred_io_pageref *pageref;
- if (fb_WARN_ON_ONCE(info, pgoff >= info->npagerefs))
+ if (fb_WARN_ON_ONCE(info, pgoff >= fbdefio_state->npagerefs))
return NULL; /* incorrect allocation size */
/* 1:1 mapping between pageref and page offset */
- pageref = &info->pagerefs[pgoff];
+ pageref = &fbdefio_state->pagerefs[pgoff];
if (pageref->page)
goto out;
@@ -160,7 +179,7 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info
struct list_head *pos = &fbdefio_state->pagereflist;
struct fb_deferred_io_pageref *pageref, *cur;
- pageref = fb_deferred_io_pageref_lookup(info, offset, page);
+ pageref = fb_deferred_io_pageref_lookup(fbdefio_state, offset, page);
if (!pageref)
return NULL;
@@ -397,16 +416,13 @@ int fb_deferred_io_init(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct fb_deferred_io_state *fbdefio_state;
- struct fb_deferred_io_pageref *pagerefs;
- unsigned long npagerefs;
- int ret;
BUG_ON(!fbdefio);
if (WARN_ON(!info->fix.smem_len))
return -EINVAL;
- fbdefio_state = fb_deferred_io_state_alloc();
+ fbdefio_state = fb_deferred_io_state_alloc(info->fix.smem_len);
if (!fbdefio_state)
return -ENOMEM;
fbdefio_state->info = info;
@@ -415,24 +431,9 @@ int fb_deferred_io_init(struct fb_info *info)
if (fbdefio->delay == 0) /* set a default of 1 s */
fbdefio->delay = HZ;
- npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
-
- /* alloc a page ref for each page of the display memory */
- pagerefs = kvzalloc_objs(*pagerefs, npagerefs);
- if (!pagerefs) {
- ret = -ENOMEM;
- goto err;
- }
- info->npagerefs = npagerefs;
- info->pagerefs = pagerefs;
-
info->fbdefio_state = fbdefio_state;
return 0;
-
-err:
- fb_deferred_io_state_release(fbdefio_state);
- return ret;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
@@ -475,7 +476,5 @@ void fb_deferred_io_cleanup(struct fb_info *info)
mutex_unlock(&fbdefio_state->lock);
fb_deferred_io_state_put(fbdefio_state);
-
- kvfree(info->pagerefs);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 2791777f3a50..b27943719fab 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -482,8 +482,6 @@ struct fb_info {
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
- unsigned long npagerefs;
- struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
struct fb_deferred_io_state *fbdefio_state;
#endif
--
2.52.0
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH v2 0/4] fbdev: defio: Protect against device/module removal
2026-02-24 8:25 [PATCH v2 0/4] fbdev: defio: Protect against device/module removal Thomas Zimmermann
` (3 preceding siblings ...)
2026-02-24 8:25 ` [PATCH v2 4/4] fbdev: defio: Move pageref array to " Thomas Zimmermann
@ 2026-02-24 22:02 ` Helge Deller
4 siblings, 0 replies; 6+ messages in thread
From: Helge Deller @ 2026-02-24 22:02 UTC (permalink / raw)
To: Thomas Zimmermann, simona; +Cc: linux-fbdev, dri-devel
On 2/24/26 09:25, Thomas Zimmermann wrote:
> There's a long-standing bug in defio where the framebuffer device or
> module gets removed while mmap'ed areas of the framebuffer memory
> persists in userspace. Page faults in the area then operate on defined
> state.
>
> Patches 1 and 2 fix these problems. Patches 3 and 4 build upon the fix
> and put defio state into the correct places.
>
> v2:
> - use alloc_obj() functions
>
> Thomas Zimmermann (4):
> fbdev: defio: Disconnect deferred I/O from the lifetime of struct
> fb_info
> fbdev: defio: Keep module reference from VMAs
> fbdev: defio: Move variable state into struct fb_deferred_io_state
> fbdev: defio: Move pageref array to struct fb_deferred_io_state
>
> drivers/video/fbdev/core/fb_defio.c | 266 ++++++++++++++++++++--------
> include/linux/fb.h | 9 +-
> 2 files changed, 195 insertions(+), 80 deletions(-)
Series added to fbdev git tree for further testing.
Thanks!
Helge
^ permalink raw reply [flat|nested] 6+ messages in thread