linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: David Hildenbrand <david@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	nvdimm@lists.linux.dev, David Hildenbrand <david@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Juergen Gross <jgross@suse.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	Dan Williams <dan.j.williams@intel.com>,
	Alistair Popple <apopple@nvidia.com>,
	Matthew Wilcox <willy@infradead.org>, Jan Kara <jack@suse.cz>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Christian Brauner <brauner@kernel.org>, Zi Yan <ziy@nvidia.com>,
	Baolin Wang <baolin.wang@linux.alibaba.com>,
	Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>,
	Nico Pache <npache@redhat.com>,
	Ryan Roberts <ryan.roberts@arm.com>, Dev Jain <dev.jain@arm.com>,
	Barry Song <baohua@kernel.org>, Vlastimil Babka <vbabka@suse.cz>,
	Mike Rapoport <rppt@kernel.org>,
	Suren Baghdasaryan <surenb@google.com>,
	Michal Hocko <mhocko@suse.com>, Jann Horn <jannh@google.com>,
	Pedro Falcato <pfalcato@suse.de>
Subject: [PATCH RFC 12/14] mm: drop addr parameter from vm_normal_*_pmd()
Date: Tue, 17 Jun 2025 17:43:43 +0200	[thread overview]
Message-ID: <20250617154345.2494405-13-david@redhat.com> (raw)
In-Reply-To: <20250617154345.2494405-1-david@redhat.com>

No longer required, let's drop it.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 fs/proc/task_mmu.c | 6 +++---
 include/linux/mm.h | 6 ++----
 mm/huge_memory.c   | 4 ++--
 mm/memory.c        | 8 +++-----
 mm/pagewalk.c      | 2 +-
 5 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c4ad3083bbfa0..36ef67cdf7a3b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -861,7 +861,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 	struct folio *folio;
 
 	if (pmd_present(*pmd)) {
-		page = vm_normal_page_pmd(vma, addr, *pmd);
+		page = vm_normal_page_pmd(vma, *pmd);
 		present = true;
 	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
@@ -2177,7 +2177,7 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
 			categories |= PAGE_IS_WRITTEN;
 
 		if (p->masks_of_interest & PAGE_IS_FILE) {
-			page = vm_normal_page_pmd(vma, addr, pmd);
+			page = vm_normal_page_pmd(vma, pmd);
 			if (page && !PageAnon(page))
 				categories |= PAGE_IS_FILE;
 		}
@@ -2942,7 +2942,7 @@ static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
 	if (!pmd_present(pmd))
 		return NULL;
 
-	page = vm_normal_page_pmd(vma, addr, pmd);
+	page = vm_normal_page_pmd(vma, pmd);
 	if (!page)
 		return NULL;
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3f52871becd3f..ef709457c7076 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2359,10 +2359,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
-struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
-				  unsigned long addr, pmd_t pmd);
-struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
-				pmd_t pmd);
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, pmd_t pmd);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, pmd_t pmd);
 
 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 		  unsigned long size);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 67220c30e7818..bf2aed8d92ec2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1994,7 +1994,7 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
 
 	if (!(vma->vm_flags & VM_SHARED)) {
 		/* See can_change_pte_writable(). */
-		page = vm_normal_page_pmd(vma, addr, pmd);
+		page = vm_normal_page_pmd(vma, pmd);
 		return page && PageAnon(page) && PageAnonExclusive(page);
 	}
 
@@ -2033,7 +2033,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	    can_change_pmd_writable(vma, vmf->address, pmd))
 		writable = true;
 
-	folio = vm_normal_folio_pmd(vma, haddr, pmd);
+	folio = vm_normal_folio_pmd(vma, pmd);
 	if (!folio)
 		goto out_map;
 
diff --git a/mm/memory.c b/mm/memory.c
index ace9c59e97181..34f961024e8e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -663,8 +663,7 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
 }
 
 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
-struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
-				pmd_t pmd)
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, pmd_t pmd)
 {
 	unsigned long pfn = pmd_pfn(pmd);
 
@@ -676,10 +675,9 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 	return vm_normal_page_pfn(vma, pfn);
 }
 
-struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
-				  unsigned long addr, pmd_t pmd)
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, pmd_t pmd)
 {
-	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
+	struct page *page = vm_normal_page_pmd(vma, pmd);
 
 	if (page)
 		return page_folio(page);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 648038247a8d2..0edb7240d090c 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -944,7 +944,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
 			spin_unlock(ptl);
 			goto pte_table;
 		} else if (pmd_present(pmd)) {
-			page = vm_normal_page_pmd(vma, addr, pmd);
+			page = vm_normal_page_pmd(vma, pmd);
 			if (page) {
 				goto found;
 			} else if ((flags & FW_ZEROPAGE) &&
-- 
2.49.0



  parent reply	other threads:[~2025-06-17 15:44 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-17 15:43 [PATCH RFC 00/14] mm: vm_normal_page*() + CoW PFNMAP improvements David Hildenbrand
2025-06-17 15:43 ` [PATCH RFC 01/14] mm/memory: drop highest_memmap_pfn sanity check in vm_normal_page() David Hildenbrand
2025-06-20 12:50   ` Oscar Salvador
2025-06-23 14:04     ` David Hildenbrand
2025-06-25  7:54       ` Oscar Salvador
2025-07-03 12:34       ` Lance Yang
2025-07-03 12:39         ` David Hildenbrand
2025-07-03 14:44           ` Lance Yang
2025-07-04 12:40             ` David Hildenbrand
2025-07-07  6:31               ` Hugh Dickins
2025-07-07 13:19                 ` David Hildenbrand
2025-07-08  2:52                   ` Hugh Dickins
2025-07-11 15:30                     ` David Hildenbrand
2025-07-11 18:49                       ` Hugh Dickins
2025-07-11 18:57                         ` David Hildenbrand
2025-06-25  7:55   ` Oscar Salvador
2025-07-03 14:50   ` Lance Yang
2025-06-17 15:43 ` [PATCH RFC 02/14] mm: drop highest_memmap_pfn David Hildenbrand
2025-06-20 13:04   ` Oscar Salvador
2025-06-20 18:11   ` Pedro Falcato
2025-06-17 15:43 ` [PATCH RFC 03/14] mm: compare pfns only if the entry is present when inserting pfns/pages David Hildenbrand
2025-06-20 13:27   ` Oscar Salvador
2025-06-23 19:22     ` David Hildenbrand
2025-06-20 18:24   ` Pedro Falcato
2025-06-23 19:19     ` David Hildenbrand
2025-06-17 15:43 ` [PATCH RFC 04/14] mm/huge_memory: move more common code into insert_pmd() David Hildenbrand
2025-06-20 14:12   ` Oscar Salvador
2025-07-07  2:48     ` Alistair Popple
2025-06-17 15:43 ` [PATCH RFC 05/14] mm/huge_memory: move more common code into insert_pud() David Hildenbrand
2025-06-20 14:15   ` Oscar Salvador
2025-07-07  2:51   ` Alistair Popple
2025-06-17 15:43 ` [PATCH RFC 06/14] mm/huge_memory: support huge zero folio in vmf_insert_folio_pmd() David Hildenbrand
2025-06-25  8:15   ` Oscar Salvador
2025-06-25  8:17     ` Oscar Salvador
2025-06-25  8:20   ` Oscar Salvador
2025-06-25  8:59     ` David Hildenbrand
2025-06-17 15:43 ` [PATCH RFC 07/14] fs/dax: use vmf_insert_folio_pmd() to insert the huge zero folio David Hildenbrand
2025-06-24  1:16   ` Alistair Popple
2025-06-25  9:03     ` David Hildenbrand
2025-07-04 13:22       ` David Hildenbrand
2025-07-07 11:50         ` Alistair Popple
2025-06-17 15:43 ` [PATCH RFC 08/14] mm/huge_memory: mark PMD mappings of the huge zero folio special David Hildenbrand
2025-06-25  8:32   ` Oscar Salvador
2025-07-14 12:41     ` David Hildenbrand
2025-06-17 15:43 ` [PATCH RFC 09/14] mm/memory: introduce is_huge_zero_pfn() and use it in vm_normal_page_pmd() David Hildenbrand
2025-06-25  8:37   ` Oscar Salvador
2025-06-17 15:43 ` [PATCH RFC 10/14] mm/memory: factor out common code from vm_normal_page_*() David Hildenbrand
2025-06-25  8:53   ` Oscar Salvador
2025-06-25  8:57     ` David Hildenbrand
2025-06-25  9:20       ` Oscar Salvador
2025-06-25 10:14         ` David Hildenbrand
2025-06-17 15:43 ` [PATCH RFC 11/14] mm: remove "horrible special case to handle copy-on-write behaviour" David Hildenbrand
2025-06-25  8:47   ` David Hildenbrand
2025-06-25  9:02     ` Oscar Salvador
2025-06-25  9:04       ` David Hildenbrand
2025-06-17 15:43 ` David Hildenbrand [this message]
2025-06-17 15:43 ` [PATCH RFC 13/14] mm: introduce and use vm_normal_page_pud() David Hildenbrand
2025-06-25  9:22   ` Oscar Salvador
2025-06-17 15:43 ` [PATCH RFC 14/14] mm: rename vm_ops->find_special_page() to vm_ops->find_normal_page() David Hildenbrand
2025-06-25  9:34   ` Oscar Salvador
2025-07-14 14:19     ` David Hildenbrand
2025-06-17 16:18 ` [PATCH RFC 00/14] mm: vm_normal_page*() + CoW PFNMAP improvements David Hildenbrand
2025-06-17 18:25   ` David Hildenbrand
2025-06-25  8:49 ` Lorenzo Stoakes
2025-06-25  8:55   ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250617154345.2494405-13-david@redhat.com \
    --to=david@redhat.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=apopple@nvidia.com \
    --cc=baohua@kernel.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=brauner@kernel.org \
    --cc=dan.j.williams@intel.com \
    --cc=dev.jain@arm.com \
    --cc=jack@suse.cz \
    --cc=jannh@google.com \
    --cc=jgross@suse.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=mhocko@suse.com \
    --cc=npache@redhat.com \
    --cc=nvdimm@lists.linux.dev \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=pfalcato@suse.de \
    --cc=rppt@kernel.org \
    --cc=ryan.roberts@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).