From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>, linux-mm@kvack.org
Subject: [PATCH 3/3] mm: Rename vma_pgoff_address back to vma_address
Date: Thu, 28 Mar 2024 22:58:29 +0000 [thread overview]
Message-ID: <20240328225831.1765286-4-willy@infradead.org> (raw)
In-Reply-To: <20240328225831.1765286-1-willy@infradead.org>
With all callers converted, we can use the nice shorter name.
Take this opportunity to reorder the arguments to the logical
order (larger object first).
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/internal.h | 9 ++++-----
mm/memory-failure.c | 2 +-
mm/page_vma_mapped.c | 2 +-
mm/rmap.c | 12 ++++++------
4 files changed, 12 insertions(+), 13 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index e312cb9f7368..19e6ddbe7134 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -804,17 +804,16 @@ void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/**
- * vma_pgoff_address - Find the virtual address a page range is mapped at
+ * vma_address - Find the virtual address a page range is mapped at
+ * @vma: The vma which maps this object.
* @pgoff: The page offset within its object.
* @nr_pages: The number of pages to consider.
- * @vma: The vma which maps this object.
*
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
-static inline unsigned long
-vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
- struct vm_area_struct *vma)
+static inline unsigned long vma_address(struct vm_area_struct *vma,
+ pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index bdeeb4d2b584..07d40d40ec96 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -443,7 +443,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
if (is_zone_device_page(p)) {
if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
- tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
+ tk->addr = vma_address(vma, fsdax_pgoff, 1);
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
} else
tk->size_shift = page_shift(compound_head(p));
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index ac48d6284bad..53b8868ede61 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -334,7 +334,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.flags = PVMW_SYNC,
};
- pvmw.address = vma_pgoff_address(pgoff, 1, vma);
+ pvmw.address = vma_address(vma, pgoff, 1);
if (pvmw.address == -EFAULT)
return 0;
if (!page_vma_mapped_walk(&pvmw))
diff --git a/mm/rmap.c b/mm/rmap.c
index 4b08b1a06688..56b313aa2ebf 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -794,7 +794,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
/* The !page__anon_vma above handles KSM folios */
pgoff = folio->index + folio_page_idx(folio, page);
- return vma_pgoff_address(pgoff, 1, vma);
+ return vma_address(vma, pgoff, 1);
}
/*
@@ -1132,7 +1132,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
if (invalid_mkclean_vma(vma, NULL))
return 0;
- pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
+ pvmw.address = vma_address(vma, pgoff, nr_pages);
VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
return page_vma_mkclean_one(&pvmw);
@@ -2592,8 +2592,8 @@ static void rmap_walk_anon(struct folio *folio,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
- unsigned long address = vma_pgoff_address(pgoff_start,
- folio_nr_pages(folio), vma);
+ unsigned long address = vma_address(vma, pgoff_start,
+ folio_nr_pages(folio));
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@@ -2654,8 +2654,8 @@ static void rmap_walk_file(struct folio *folio,
lookup:
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
- unsigned long address = vma_pgoff_address(pgoff_start,
- folio_nr_pages(folio), vma);
+ unsigned long address = vma_address(vma, pgoff_start,
+ folio_nr_pages(folio));
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
--
2.43.0
prev parent reply other threads:[~2024-03-28 22:58 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-28 22:58 [PATCH 0/3] Unify vma_address and vma_pgoff_address Matthew Wilcox (Oracle)
2024-03-28 22:58 ` [PATCH 1/3] mm: Correct page_mapped_in_vma() for large folios Matthew Wilcox (Oracle)
2024-04-02 10:07 ` David Hildenbrand
2024-03-28 22:58 ` [PATCH 2/3] mm: Remove vma_address() Matthew Wilcox (Oracle)
2024-03-28 22:58 ` Matthew Wilcox (Oracle) [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240328225831.1765286-4-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).