public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler
@ 2026-03-12 15:50 Pedro Demarchi Gomes
  2026-03-12 16:32 ` Boris Brezillon
  2026-03-14 19:05 ` kernel test robot
  0 siblings, 2 replies; 4+ messages in thread
From: Pedro Demarchi Gomes @ 2026-03-12 15:50 UTC (permalink / raw)
  To: Maarten Lankhorst, Maxime Ripard, Thomas Zimmermann, David Airlie,
	Simona Vetter, Boris Brezillon, Loic Molinari
  Cc: dri-devel, linux-kernel, Pedro Demarchi Gomes

When running ./tools/testing/selftests/mm/split_huge_page_test multiple
times with /sys/kernel/mm/transparent_hugepage/shmem_enabled and
/sys/kernel/mm/transparent_hugepage/enabled set as always the following BUG
occurs:

[  232.728858] ------------[ cut here ]------------
[  232.729458] kernel BUG at mm/memory.c:2276!
[  232.729726] Oops: invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI
[  232.730217] CPU: 19 UID: 60578 PID: 1497 Comm: llvmpipe-9 Not tainted 7.0.0-rc1mm-new+ #19 PREEMPT(lazy)
[  232.730855] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.17.0-9.fc43 06/10/2025
[  232.731360] RIP: 0010:walk_to_pmd+0x29e/0x3c0
[  232.731569] Code: d8 5b 5d 41 5c 41 5d 41 5e 41 5f c3 cc cc cc cc 48 89 ea 48 89 de 4c 89 f7 e8 ae 85 ff ff 85 c0 0f 84 1f fe ff ff 31 db eb d0 <0f> 0b 48 89 ea 48 89 de 4c 89 f7 e8 92 8b ff ff 85 c0 75 e8 48 b8
[  232.732614] RSP: 0000:ffff8881aa6ff9a8 EFLAGS: 00010282
[  232.732991] RAX: 8000000142e002e7 RBX: ffff8881433cae10 RCX: dffffc0000000000
[  232.733362] RDX: 0000000000000000 RSI: 00007fb47840b000 RDI: 8000000142e002e7
[  232.733801] RBP: 00007fb47840b000 R08: 0000000000000000 R09: 1ffff110354dff46
[  232.734168] R10: fffffbfff0cb921d R11: 00000000910da5ce R12: 1ffffffff0c1fcdd
[  232.734459] R13: 1ffffffff0c23f36 R14: ffff888171628040 R15: 0000000000000000
[  232.734861] FS:  00007fb4907f86c0(0000) GS:ffff888791f2c000(0000) knlGS:0000000000000000
[  232.735265] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  232.735548] CR2: 00007fb47840be00 CR3: 000000015e6dc000 CR4: 00000000000006f0
[  232.736031] Call Trace:
[  232.736273]  <TASK>
[  232.736500]  get_locked_pte+0x1f/0xa0
[  232.736878]  insert_pfn+0x9f/0x350
[  232.737190]  ? __pfx_pat_pagerange_is_ram+0x10/0x10
[  232.737614]  ? __pfx_insert_pfn+0x10/0x10
[  232.737990]  ? __pfx_css_rstat_updated+0x10/0x10
[  232.738281]  ? __pfx_pfn_modify_allowed+0x10/0x10
[  232.738552]  ? lookup_memtype+0x62/0x180
[  232.738761]  vmf_insert_pfn_prot+0x14b/0x340
[  232.739012]  ? __pfx_vmf_insert_pfn_prot+0x10/0x10
[  232.739247]  ? __pfx___might_resched+0x10/0x10
[  232.739475]  drm_gem_shmem_fault.cold+0x18/0x39
[  232.739677]  ? rcu_read_unlock+0x20/0x70
[  232.739882]  __do_fault+0x251/0x7b0
[  232.740028]  do_fault+0x6e1/0xc00
[  232.740167]  ? __lock_acquire+0x590/0xc40
[  232.740335]  handle_pte_fault+0x439/0x760
[  232.740498]  ? mtree_range_walk+0x252/0xae0
[  232.740669]  ? __pfx_handle_pte_fault+0x10/0x10
[  232.740899]  __handle_mm_fault+0xa02/0xf30
[  232.741066]  ? __pfx___handle_mm_fault+0x10/0x10
[  232.741255]  ? find_vma+0xa1/0x120
[  232.741403]  handle_mm_fault+0x2bf/0x8f0
[  232.741564]  do_user_addr_fault+0x2d3/0xed0
[  232.741736]  ? trace_page_fault_user+0x1bf/0x240
[  232.741969]  exc_page_fault+0x87/0x120
[  232.742124]  asm_exc_page_fault+0x26/0x30
[  232.742288] RIP: 0033:0x7fb4d73ed546
[  232.742441] Code: 66 41 0f 6f fb 66 44 0f 6d dc 66 44 0f 6f c6 66 41 0f 6d f1 66 0f 6c fc 66 45 0f 6c c1 66 44 0f 6f c9 66 0f 6d ca 66 0f db f0 <66> 0f df 04 08 66 44 0f 6c ca 66 45 0f db c2 66 44 0f df 10 66 44
[  232.743193] RSP: 002b:00007fb4907f68a0 EFLAGS: 00010206
[  232.743565] RAX: 00007fb47840aa00 RBX: 00007fb4d73ec070 RCX: 0000000000001400
[  232.743871] RDX: 0000000000002800 RSI: 0000000000003c00 RDI: 0000000000000001
[  232.744150] RBP: 0000000000000004 R08: 0000000000001400 R09: 00007fb4d73ec060
[  232.744433] R10: 000055f0261a4288 R11: 00007fb4c013da40 R12: 0000000000000008
[  232.744712] R13: 0000000000000000 R14: 4332322132212110 R15: 0000000000000004
[  232.746616]  </TASK>
[  232.746711] Modules linked in: nft_nat nft_masq veth bridge stp llc snd_seq_dummy snd_hrtimer snd_seq snd_seq_device snd_timer snd soundcore overlay rfkill nf_conntrack_netbios_ns nf_conntrack_broadcast nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nf_tables qrtr ppdev 9pnet_virtio 9pnet parport_pc i2c_piix4 netfs pcspkr parport i2c_smbus joydev sunrpc vfat fat loop dm_multipath nfnetlink vsock_loopback vmw_vsock_virtio_transport_common vmw_vsock_vmci_transport zram lz4hc_compress vmw_vmci lz4_compress vsock e1000 bochs serio_raw ata_generic pata_acpi scsi_dh_rdac scsi_dh_emc scsi_dh_alua i2c_dev fuse qemu_fw_cfg
[  232.749308] ---[ end trace 0000000000000000 ]---
[  232.749507] RIP: 0010:walk_to_pmd+0x29e/0x3c0
[  232.749692] Code: d8 5b 5d 41 5c 41 5d 41 5e 41 5f c3 cc cc cc cc 48 89 ea 48 89 de 4c 89 f7 e8 ae 85 ff ff 85 c0 0f 84 1f fe ff ff 31 db eb d0 <0f> 0b 48 89 ea 48 89 de 4c 89 f7 e8 92 8b ff ff 85 c0 75 e8 48 b8
[  232.750428] RSP: 0000:ffff8881aa6ff9a8 EFLAGS: 00010282
[  232.750645] RAX: 8000000142e002e7 RBX: ffff8881433cae10 RCX: dffffc0000000000
[  232.750954] RDX: 0000000000000000 RSI: 00007fb47840b000 RDI: 8000000142e002e7
[  232.751232] RBP: 00007fb47840b000 R08: 0000000000000000 R09: 1ffff110354dff46
[  232.751514] R10: fffffbfff0cb921d R11: 00000000910da5ce R12: 1ffffffff0c1fcdd
[  232.751837] R13: 1ffffffff0c23f36 R14: ffff888171628040 R15: 0000000000000000
[  232.752124] FS:  00007fb4907f86c0(0000) GS:ffff888791f2c000(0000) knlGS:0000000000000000
[  232.752441] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  232.752674] CR2: 00007fb47840be00 CR3: 000000015e6dc000 CR4: 00000000000006f0
[  232.752983] Kernel panic - not syncing: Fatal exception
[  232.753510] Kernel Offset: disabled
[  232.754643] ---[ end Kernel panic - not syncing: Fatal exception ]---

This happens when two concurrent page faults occur within the same PMD range.
One fault installs a PMD mapping through vmf_insert_pfn_pmd(), while the other
attempts to install a PTE mapping via vmf_insert_pfn(). The bug is
triggered because a pmd_trans_huge is not expected when walking the page
table inside vmf_insert_pfn.

Avoid this race by adding a huge_fault callback to drm_gem_shmem_vm_ops so that
PMD-sized mappings are handled through the appropriate huge page fault path.

Fixes: 211b9a39f261 ("drm/shmem-helper: Map huge pages in fault handler")
Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 59 +++++++++++++++++++-------
 1 file changed, 43 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 7b5a49935ae4..4cd9080c598b 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -550,24 +550,53 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
 
-static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
-				      struct page *page)
+
+static vm_fault_t drm_gem_shmem_huge_fault(struct vm_fault *vmf,
+					   unsigned int order)
 {
-#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
-	unsigned long pfn = page_to_pfn(page);
-	unsigned long paddr = pfn << PAGE_SHIFT;
-	bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
+	struct vm_area_struct *vma = vmf->vma;
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+	loff_t num_pages = obj->size >> PAGE_SHIFT;
+	vm_fault_t ret = VM_FAULT_FALLBACK;
+	struct page **pages = shmem->pages;
+	pgoff_t page_offset;
+	unsigned long pfn;
+	unsigned long paddr;
+	bool aligned;
+	struct page *page;
+
+	if (order != PMD_ORDER)
+		goto out_unlocked;
+
+	/* Offset to faulty address in the VMA. */
+	page_offset = vmf->pgoff - vma->vm_pgoff;
+
+	dma_resv_lock(shmem->base.resv, NULL);
+
+	if (page_offset >= num_pages ||
+	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
+	    shmem->madv < 0) {
+		ret = VM_FAULT_SIGBUS;
+		goto out;
+	}
+
+	page = pages[page_offset];
+	pfn = page_to_pfn(page);
+	paddr = pfn << PAGE_SHIFT;
+	aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
 
 	if (aligned &&
-	    pmd_none(*vmf->pmd) &&
 	    folio_test_pmd_mappable(page_folio(page))) {
 		pfn &= PMD_MASK >> PAGE_SHIFT;
-		if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
-			return true;
+		ret = vmf_insert_pfn_pmd(vmf, pfn, false);
 	}
-#endif
 
-	return false;
+ out:
+	dma_resv_unlock(shmem->base.resv);
+
+ out_unlocked:
+	return ret;
 }
 
 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
@@ -593,11 +622,6 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
 		goto out;
 	}
 
-	if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
-		ret = VM_FAULT_NOPAGE;
-		goto out;
-	}
-
 	pfn = page_to_pfn(pages[page_offset]);
 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
 
@@ -643,6 +667,9 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
 
 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
 	.fault = drm_gem_shmem_fault,
+	#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
+	.huge_fault = drm_gem_shmem_huge_fault,
+	#endif
 	.open = drm_gem_shmem_vm_open,
 	.close = drm_gem_shmem_vm_close,
 };
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler
  2026-03-12 15:50 [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler Pedro Demarchi Gomes
@ 2026-03-12 16:32 ` Boris Brezillon
  2026-03-13 11:32   ` Pedro Demarchi Gomes
  2026-03-14 19:05 ` kernel test robot
  1 sibling, 1 reply; 4+ messages in thread
From: Boris Brezillon @ 2026-03-12 16:32 UTC (permalink / raw)
  To: Pedro Demarchi Gomes
  Cc: Maarten Lankhorst, Maxime Ripard, Thomas Zimmermann, David Airlie,
	Simona Vetter, Loic Molinari, dri-devel, linux-kernel

On Thu, 12 Mar 2026 12:50:27 -0300
Pedro Demarchi Gomes <pedrodemargomes@gmail.com> wrote:

> When running ./tools/testing/selftests/mm/split_huge_page_test multiple
> times with /sys/kernel/mm/transparent_hugepage/shmem_enabled and
> /sys/kernel/mm/transparent_hugepage/enabled set as always the following BUG
> occurs:
> 
> [  232.728858] ------------[ cut here ]------------
> [  232.729458] kernel BUG at mm/memory.c:2276!
> [  232.729726] Oops: invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI
> [  232.730217] CPU: 19 UID: 60578 PID: 1497 Comm: llvmpipe-9 Not tainted 7.0.0-rc1mm-new+ #19 PREEMPT(lazy)
> [  232.730855] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.17.0-9.fc43 06/10/2025
> [  232.731360] RIP: 0010:walk_to_pmd+0x29e/0x3c0
> [  232.731569] Code: d8 5b 5d 41 5c 41 5d 41 5e 41 5f c3 cc cc cc cc 48 89 ea 48 89 de 4c 89 f7 e8 ae 85 ff ff 85 c0 0f 84 1f fe ff ff 31 db eb d0 <0f> 0b 48 89 ea 48 89 de 4c 89 f7 e8 92 8b ff ff 85 c0 75 e8 48 b8
> [  232.732614] RSP: 0000:ffff8881aa6ff9a8 EFLAGS: 00010282
> [  232.732991] RAX: 8000000142e002e7 RBX: ffff8881433cae10 RCX: dffffc0000000000
> [  232.733362] RDX: 0000000000000000 RSI: 00007fb47840b000 RDI: 8000000142e002e7
> [  232.733801] RBP: 00007fb47840b000 R08: 0000000000000000 R09: 1ffff110354dff46
> [  232.734168] R10: fffffbfff0cb921d R11: 00000000910da5ce R12: 1ffffffff0c1fcdd
> [  232.734459] R13: 1ffffffff0c23f36 R14: ffff888171628040 R15: 0000000000000000
> [  232.734861] FS:  00007fb4907f86c0(0000) GS:ffff888791f2c000(0000) knlGS:0000000000000000
> [  232.735265] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [  232.735548] CR2: 00007fb47840be00 CR3: 000000015e6dc000 CR4: 00000000000006f0
> [  232.736031] Call Trace:
> [  232.736273]  <TASK>
> [  232.736500]  get_locked_pte+0x1f/0xa0
> [  232.736878]  insert_pfn+0x9f/0x350
> [  232.737190]  ? __pfx_pat_pagerange_is_ram+0x10/0x10
> [  232.737614]  ? __pfx_insert_pfn+0x10/0x10
> [  232.737990]  ? __pfx_css_rstat_updated+0x10/0x10
> [  232.738281]  ? __pfx_pfn_modify_allowed+0x10/0x10
> [  232.738552]  ? lookup_memtype+0x62/0x180
> [  232.738761]  vmf_insert_pfn_prot+0x14b/0x340
> [  232.739012]  ? __pfx_vmf_insert_pfn_prot+0x10/0x10
> [  232.739247]  ? __pfx___might_resched+0x10/0x10
> [  232.739475]  drm_gem_shmem_fault.cold+0x18/0x39
> [  232.739677]  ? rcu_read_unlock+0x20/0x70
> [  232.739882]  __do_fault+0x251/0x7b0
> [  232.740028]  do_fault+0x6e1/0xc00
> [  232.740167]  ? __lock_acquire+0x590/0xc40
> [  232.740335]  handle_pte_fault+0x439/0x760
> [  232.740498]  ? mtree_range_walk+0x252/0xae0
> [  232.740669]  ? __pfx_handle_pte_fault+0x10/0x10
> [  232.740899]  __handle_mm_fault+0xa02/0xf30
> [  232.741066]  ? __pfx___handle_mm_fault+0x10/0x10
> [  232.741255]  ? find_vma+0xa1/0x120
> [  232.741403]  handle_mm_fault+0x2bf/0x8f0
> [  232.741564]  do_user_addr_fault+0x2d3/0xed0
> [  232.741736]  ? trace_page_fault_user+0x1bf/0x240
> [  232.741969]  exc_page_fault+0x87/0x120
> [  232.742124]  asm_exc_page_fault+0x26/0x30
> [  232.742288] RIP: 0033:0x7fb4d73ed546
> [  232.742441] Code: 66 41 0f 6f fb 66 44 0f 6d dc 66 44 0f 6f c6 66 41 0f 6d f1 66 0f 6c fc 66 45 0f 6c c1 66 44 0f 6f c9 66 0f 6d ca 66 0f db f0 <66> 0f df 04 08 66 44 0f 6c ca 66 45 0f db c2 66 44 0f df 10 66 44
> [  232.743193] RSP: 002b:00007fb4907f68a0 EFLAGS: 00010206
> [  232.743565] RAX: 00007fb47840aa00 RBX: 00007fb4d73ec070 RCX: 0000000000001400
> [  232.743871] RDX: 0000000000002800 RSI: 0000000000003c00 RDI: 0000000000000001
> [  232.744150] RBP: 0000000000000004 R08: 0000000000001400 R09: 00007fb4d73ec060
> [  232.744433] R10: 000055f0261a4288 R11: 00007fb4c013da40 R12: 0000000000000008
> [  232.744712] R13: 0000000000000000 R14: 4332322132212110 R15: 0000000000000004
> [  232.746616]  </TASK>
> [  232.746711] Modules linked in: nft_nat nft_masq veth bridge stp llc snd_seq_dummy snd_hrtimer snd_seq snd_seq_device snd_timer snd soundcore overlay rfkill nf_conntrack_netbios_ns nf_conntrack_broadcast nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nf_tables qrtr ppdev 9pnet_virtio 9pnet parport_pc i2c_piix4 netfs pcspkr parport i2c_smbus joydev sunrpc vfat fat loop dm_multipath nfnetlink vsock_loopback vmw_vsock_virtio_transport_common vmw_vsock_vmci_transport zram lz4hc_compress vmw_vmci lz4_compress vsock e1000 bochs serio_raw ata_generic pata_acpi scsi_dh_rdac scsi_dh_emc scsi_dh_alua i2c_dev fuse qemu_fw_cfg
> [  232.749308] ---[ end trace 0000000000000000 ]---
> [  232.749507] RIP: 0010:walk_to_pmd+0x29e/0x3c0
> [  232.749692] Code: d8 5b 5d 41 5c 41 5d 41 5e 41 5f c3 cc cc cc cc 48 89 ea 48 89 de 4c 89 f7 e8 ae 85 ff ff 85 c0 0f 84 1f fe ff ff 31 db eb d0 <0f> 0b 48 89 ea 48 89 de 4c 89 f7 e8 92 8b ff ff 85 c0 75 e8 48 b8
> [  232.750428] RSP: 0000:ffff8881aa6ff9a8 EFLAGS: 00010282
> [  232.750645] RAX: 8000000142e002e7 RBX: ffff8881433cae10 RCX: dffffc0000000000
> [  232.750954] RDX: 0000000000000000 RSI: 00007fb47840b000 RDI: 8000000142e002e7
> [  232.751232] RBP: 00007fb47840b000 R08: 0000000000000000 R09: 1ffff110354dff46
> [  232.751514] R10: fffffbfff0cb921d R11: 00000000910da5ce R12: 1ffffffff0c1fcdd
> [  232.751837] R13: 1ffffffff0c23f36 R14: ffff888171628040 R15: 0000000000000000
> [  232.752124] FS:  00007fb4907f86c0(0000) GS:ffff888791f2c000(0000) knlGS:0000000000000000
> [  232.752441] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [  232.752674] CR2: 00007fb47840be00 CR3: 000000015e6dc000 CR4: 00000000000006f0
> [  232.752983] Kernel panic - not syncing: Fatal exception
> [  232.753510] Kernel Offset: disabled
> [  232.754643] ---[ end Kernel panic - not syncing: Fatal exception ]---
> 
> This happens when two concurrent page faults occur within the same PMD range.
> One fault installs a PMD mapping through vmf_insert_pfn_pmd(), while the other
> attempts to install a PTE mapping via vmf_insert_pfn(). The bug is
> triggered because a pmd_trans_huge is not expected when walking the page
> table inside vmf_insert_pfn.
> 
> Avoid this race by adding a huge_fault callback to drm_gem_shmem_vm_ops so that
> PMD-sized mappings are handled through the appropriate huge page fault path.

Yikes, looks like we got it wrong between v4 and v6 :-/: v4 had the
.huge_fault implementation, v5 tried moved to .map_pages, and v6 went
back to .fault, but augmented to support huge pages, which I guess is
wrong for the reasons you mentioned.

> 
> Fixes: 211b9a39f261 ("drm/shmem-helper: Map huge pages in fault handler")
> Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 59 +++++++++++++++++++-------
>  1 file changed, 43 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 7b5a49935ae4..4cd9080c598b 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -550,24 +550,53 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
>  
> -static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
> -				      struct page *page)
> +
> +static vm_fault_t drm_gem_shmem_huge_fault(struct vm_fault *vmf,
> +					   unsigned int order)
>  {
> -#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
> -	unsigned long pfn = page_to_pfn(page);
> -	unsigned long paddr = pfn << PAGE_SHIFT;
> -	bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
> +	struct vm_area_struct *vma = vmf->vma;
> +	struct drm_gem_object *obj = vma->vm_private_data;
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +	loff_t num_pages = obj->size >> PAGE_SHIFT;
> +	vm_fault_t ret = VM_FAULT_FALLBACK;
> +	struct page **pages = shmem->pages;
> +	pgoff_t page_offset;
> +	unsigned long pfn;
> +	unsigned long paddr;
> +	bool aligned;
> +	struct page *page;
> +
> +	if (order != PMD_ORDER)
> +		goto out_unlocked;
> +
> +	/* Offset to faulty address in the VMA. */
> +	page_offset = vmf->pgoff - vma->vm_pgoff;
> +
> +	dma_resv_lock(shmem->base.resv, NULL);
> +
> +	if (page_offset >= num_pages ||
> +	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
> +	    shmem->madv < 0) {
> +		ret = VM_FAULT_SIGBUS;
> +		goto out;
> +	}
> +
> +	page = pages[page_offset];
> +	pfn = page_to_pfn(page);
> +	paddr = pfn << PAGE_SHIFT;
> +	aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
>  
>  	if (aligned &&
> -	    pmd_none(*vmf->pmd) &&
>  	    folio_test_pmd_mappable(page_folio(page))) {
>  		pfn &= PMD_MASK >> PAGE_SHIFT;
> -		if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
> -			return true;
> +		ret = vmf_insert_pfn_pmd(vmf, pfn, false);
>  	}
> -#endif
>  
> -	return false;
> + out:
> +	dma_resv_unlock(shmem->base.resv);
> +
> + out_unlocked:
> +	return ret;
>  }

Do we really need to duplicate everything? Can't we rename
drm_gem_shmem_fault() into drm_gem_shmem_any_fault() with a
`bool try_pmd` passed as an argument, and then have
drm_gem_shmem_[huge_]fault() as wrappers?

static vm_fault_t drm_gem_shmem_huge_fault(struct vm_fault *vmf,
					   unsigned int order)
{
	if (order != PMD_ORDER)
		return VM_FAULT_FALLBACK;

	return drm_gem_shmem_any_fault(vmf, true);
}

static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
{
	return drm_gem_shmem_any_fault(vmf, false);
}

>  
>  static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> @@ -593,11 +622,6 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>  		goto out;
>  	}
>  
> -	if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
> -		ret = VM_FAULT_NOPAGE;
> -		goto out;
> -	}
> -
>  	pfn = page_to_pfn(pages[page_offset]);
>  	ret = vmf_insert_pfn(vma, vmf->address, pfn);
>  
> @@ -643,6 +667,9 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>  
>  const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>  	.fault = drm_gem_shmem_fault,
> +	#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
> +	.huge_fault = drm_gem_shmem_huge_fault,
> +	#endif

Let's keep the #ifdef unindented:

#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
	.huge_fault = drm_gem_shmem_huge_fault,
#endif

>  	.open = drm_gem_shmem_vm_open,
>  	.close = drm_gem_shmem_vm_close,
>  };


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler
  2026-03-12 16:32 ` Boris Brezillon
@ 2026-03-13 11:32   ` Pedro Demarchi Gomes
  0 siblings, 0 replies; 4+ messages in thread
From: Pedro Demarchi Gomes @ 2026-03-13 11:32 UTC (permalink / raw)
  To: Boris Brezillon
  Cc: Maarten Lankhorst, Maxime Ripard, Thomas Zimmermann, David Airlie,
	Simona Vetter, Loic Molinari, dri-devel, linux-kernel

On Thu, Mar 12, 2026 at 05:32:27PM +0100, Boris Brezillon wrote: 
> Do we really need to duplicate everything? Can't we rename
> drm_gem_shmem_fault() into drm_gem_shmem_any_fault() with a
> `bool try_pmd` passed as an argument, and then have
> drm_gem_shmem_[huge_]fault() as wrappers?
> 
> static vm_fault_t drm_gem_shmem_huge_fault(struct vm_fault *vmf,
> 					   unsigned int order)
> {
> 	if (order != PMD_ORDER)
> 		return VM_FAULT_FALLBACK;
> 
> 	return drm_gem_shmem_any_fault(vmf, true);
> }
> 
> static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> {
> 	return drm_gem_shmem_any_fault(vmf, false);
> }
> 

Ack.

> > @@ -643,6 +667,9 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> >  
> >  const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> >  	.fault = drm_gem_shmem_fault,
> > +	#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
> > +	.huge_fault = drm_gem_shmem_huge_fault,
> > +	#endif
> 
> Let's keep the #ifdef unindented:
> 
> #ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
> 	.huge_fault = drm_gem_shmem_huge_fault,
> #endif
> 
> >  	.open = drm_gem_shmem_vm_open,
> >  	.close = drm_gem_shmem_vm_close,
> >  };
>

Ack.
I will send a v2 addressing these issues.
Thanks.


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler
  2026-03-12 15:50 [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler Pedro Demarchi Gomes
  2026-03-12 16:32 ` Boris Brezillon
@ 2026-03-14 19:05 ` kernel test robot
  1 sibling, 0 replies; 4+ messages in thread
From: kernel test robot @ 2026-03-14 19:05 UTC (permalink / raw)
  To: Pedro Demarchi Gomes, Maarten Lankhorst, Maxime Ripard,
	Thomas Zimmermann, David Airlie, Simona Vetter, Boris Brezillon,
	Loic Molinari
  Cc: oe-kbuild-all, dri-devel, linux-kernel, Pedro Demarchi Gomes

Hi Pedro,

kernel test robot noticed the following build warnings:

[auto build test WARNING on linus/master]
[also build test WARNING on v7.0-rc3]
[cannot apply to drm-misc/drm-misc-next next-20260311]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Pedro-Demarchi-Gomes/drm-shmem-helper-Fix-Map-huge-page-mapping-in-fault-handler/20260314-094433
base:   linus/master
patch link:    https://lore.kernel.org/r/20260312155027.1682606-1-pedrodemargomes%40gmail.com
patch subject: [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler
config: m68k-allmodconfig (https://download.01.org/0day-ci/archive/20260315/202603150233.7ZFf6Fsm-lkp@intel.com/config)
compiler: m68k-linux-gcc (GCC) 15.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260315/202603150233.7ZFf6Fsm-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603150233.7ZFf6Fsm-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/gpu/drm/drm_gem_shmem_helper.c:554:19: warning: 'drm_gem_shmem_huge_fault' defined but not used [-Wunused-function]
     554 | static vm_fault_t drm_gem_shmem_huge_fault(struct vm_fault *vmf,
         |                   ^~~~~~~~~~~~~~~~~~~~~~~~


vim +/drm_gem_shmem_huge_fault +554 drivers/gpu/drm/drm_gem_shmem_helper.c

   552	
   553	
 > 554	static vm_fault_t drm_gem_shmem_huge_fault(struct vm_fault *vmf,
   555						   unsigned int order)
   556	{
   557		struct vm_area_struct *vma = vmf->vma;
   558		struct drm_gem_object *obj = vma->vm_private_data;
   559		struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
   560		loff_t num_pages = obj->size >> PAGE_SHIFT;
   561		vm_fault_t ret = VM_FAULT_FALLBACK;
   562		struct page **pages = shmem->pages;
   563		pgoff_t page_offset;
   564		unsigned long pfn;
   565		unsigned long paddr;
   566		bool aligned;
   567		struct page *page;
   568	
   569		if (order != PMD_ORDER)
   570			goto out_unlocked;
   571	
   572		/* Offset to faulty address in the VMA. */
   573		page_offset = vmf->pgoff - vma->vm_pgoff;
   574	
   575		dma_resv_lock(shmem->base.resv, NULL);
   576	
   577		if (page_offset >= num_pages ||
   578		    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
   579		    shmem->madv < 0) {
   580			ret = VM_FAULT_SIGBUS;
   581			goto out;
   582		}
   583	
   584		page = pages[page_offset];
   585		pfn = page_to_pfn(page);
   586		paddr = pfn << PAGE_SHIFT;
   587		aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
   588	
   589		if (aligned &&
   590		    folio_test_pmd_mappable(page_folio(page))) {
   591			pfn &= PMD_MASK >> PAGE_SHIFT;
   592			ret = vmf_insert_pfn_pmd(vmf, pfn, false);
   593		}
   594	
   595	 out:
   596		dma_resv_unlock(shmem->base.resv);
   597	
   598	 out_unlocked:
   599		return ret;
   600	}
   601	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-03-14 19:06 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-12 15:50 [PATCH] drm/shmem-helper: Fix Map huge page mapping in fault handler Pedro Demarchi Gomes
2026-03-12 16:32 ` Boris Brezillon
2026-03-13 11:32   ` Pedro Demarchi Gomes
2026-03-14 19:05 ` kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox