* [PATCH v2 0/7] page->index removals in mm
@ 2024-10-05 20:01 Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 1/7] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
` (6 more replies)
0 siblings, 7 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
As part of shrinking struct page, we need to stop using page->index.
This patchset gets rid of most of the remaining references to page->index
in mm, as well as increasing the number of functions which take a
const folio/page pointer. It shrinks the text segment of mm by a few
hundred bytes in my test config, probably mostly from removing calls to
compound_head() in page_to_pgoff().
v2:
- Add the bootmem patch to the series
https://lore.kernel.org/linux-mm/20240912185602.2342148-1-willy@infradead.org/
- Expand the scope of the page_address_in_vma() patch
- Fix the build error reported by lkp@intel.com
v1 here: https://lore.kernel.org/linux-mm/20240723153503.1669586-1-willy@infradead.org/
Matthew Wilcox (Oracle) (7):
mm: Convert page_to_pgoff() to page_pgoff()
mm: Use page_pgoff() in more places
mm: Renovate page_address_in_vma()
mm: Mass constification of folio/page pointers
bootmem: Stop using page->index
mm: Remove references to page->index in huge_memory.c
mm: Use page->private instead of page->index in percpu
arch/x86/mm/init_64.c | 9 ++++----
include/linux/bootmem_info.h | 25 +++++++++++++-------
include/linux/ksm.h | 7 +++---
include/linux/mm.h | 2 +-
include/linux/pagemap.h | 31 +++++++++++++------------
include/linux/rmap.h | 17 ++++++--------
kernel/futex/core.c | 2 +-
mm/bootmem_info.c | 11 +++++----
mm/huge_memory.c | 18 +++++++--------
mm/internal.h | 9 ++++----
mm/ksm.c | 12 +++++-----
mm/memory-failure.c | 30 ++++++++++++------------
mm/mempolicy.c | 2 +-
mm/page_vma_mapped.c | 8 +++----
mm/percpu.c | 4 ++--
mm/rmap.c | 44 +++++++++++++++++++++++-------------
mm/sparse.c | 8 +++----
mm/util.c | 2 +-
18 files changed, 133 insertions(+), 108 deletions(-)
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v2 1/7] mm: Convert page_to_pgoff() to page_pgoff()
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
@ 2024-10-05 20:01 ` Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 2/7] mm: Use page_pgoff() in more places Matthew Wilcox (Oracle)
` (5 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
Change the function signature to pass in the folio as all three
callers have it. This removes a reference to page->index, which we're
trying to get rid of. And add kernel-doc.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 2 +-
include/linux/pagemap.h | 31 +++++++++++++++++--------------
mm/memory-failure.c | 4 ++--
mm/rmap.c | 2 +-
4 files changed, 21 insertions(+), 18 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ecf63d2b0582..664c01850c87 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1895,7 +1895,7 @@ static inline unsigned long page_to_section(const struct page *page)
*
* Return: The Page Frame Number of the first page in the folio.
*/
-static inline unsigned long folio_pfn(struct folio *folio)
+static inline unsigned long folio_pfn(const struct folio *folio)
{
return page_to_pfn(&folio->page);
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 68a5f1ff3301..bcf0865a38ae 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1011,22 +1011,25 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping,
return read_cache_folio(mapping, index, NULL, file);
}
-/*
- * Get the offset in PAGE_SIZE (even for hugetlb pages).
+/**
+ * page_pgoff - Calculate the logical page offset of this page.
+ * @folio: The folio containing this page.
+ * @page: The page which we need the offset of.
+ *
+ * For file pages, this is the offset from the beginning of the file
+ * in units of PAGE_SIZE. For anonymous pages, this is the offset from
+ * the beginning of the anon_vma in units of PAGE_SIZE. This will
+ * return nonsense for KSM pages.
+ *
+ * Context: Caller must have a reference on the folio or otherwise
+ * prevent it from being split or freed.
+ *
+ * Return: The offset in units of PAGE_SIZE.
*/
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_pgoff(const struct folio *folio,
+ const struct page *page)
{
- struct page *head;
-
- if (likely(!PageTransTail(page)))
- return page->index;
-
- head = compound_head(page);
- /*
- * We don't initialize ->index for tail pages: calculate based on
- * head page
- */
- return head->index + page - head;
+ return folio->index + folio_page_idx(folio, page);
}
/*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 96ce31e5a203..58a3d80961a4 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -617,7 +617,7 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
if (av == NULL) /* Not actually mapped anymore */
return;
- pgoff = page_to_pgoff(page);
+ pgoff = page_pgoff(folio, page);
rcu_read_lock();
for_each_process(tsk) {
struct vm_area_struct *vma;
@@ -653,7 +653,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
i_mmap_lock_read(mapping);
rcu_read_lock();
- pgoff = page_to_pgoff(page);
+ pgoff = page_pgoff(folio, page);
for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, force_early);
unsigned long addr;
diff --git a/mm/rmap.c b/mm/rmap.c
index a8797d1b3d49..3b11f8b6935d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1280,7 +1280,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
*/
VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
folio);
- VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
+ VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address),
page);
}
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 2/7] mm: Use page_pgoff() in more places
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 1/7] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
@ 2024-10-05 20:01 ` Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 3/7] mm: Renovate page_address_in_vma() Matthew Wilcox (Oracle)
` (4 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
There are several places which currently open-code page_pgoff(),
convert them to call it.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
kernel/futex/core.c | 2 +-
mm/page_vma_mapped.c | 3 +--
mm/rmap.c | 4 +---
3 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 136768ae2637..342dc4dd328b 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -399,7 +399,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.i_seq = get_inode_sequence_number(inode);
- key->shared.pgoff = folio->index + folio_page_idx(folio, page);
+ key->shared.pgoff = page_pgoff(folio, page);
rcu_read_unlock();
}
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index ae5cc42aa208..ade3c6833587 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -328,7 +328,6 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
struct folio *folio = page_folio(page);
- pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
struct page_vma_mapped_walk pvmw = {
.pfn = page_to_pfn(page),
.nr_pages = 1,
@@ -336,7 +335,7 @@ unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.flags = PVMW_SYNC,
};
- pvmw.address = vma_address(vma, pgoff, 1);
+ pvmw.address = vma_address(vma, page_pgoff(folio, page), 1);
if (pvmw.address == -EFAULT)
goto out;
if (!page_vma_mapped_walk(&pvmw))
diff --git a/mm/rmap.c b/mm/rmap.c
index 3b11f8b6935d..90df71c640bf 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -775,7 +775,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
struct folio *folio = page_folio(page);
- pgoff_t pgoff;
if (folio_test_anon(folio)) {
struct anon_vma *page__anon_vma = folio_anon_vma(folio);
@@ -793,8 +792,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
}
/* The !page__anon_vma above handles KSM folios */
- pgoff = folio->index + folio_page_idx(folio, page);
- return vma_address(vma, pgoff, 1);
+ return vma_address(vma, page_pgoff(folio, page), 1);
}
/*
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 3/7] mm: Renovate page_address_in_vma()
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 1/7] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 2/7] mm: Use page_pgoff() in more places Matthew Wilcox (Oracle)
@ 2024-10-05 20:01 ` Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 4/7] mm: Mass constification of folio/page pointers Matthew Wilcox (Oracle)
` (3 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
This function doesn't modify any of its arguments, so if we make a few
other functions take const pointers, we can make page_address_in_vma()
take const pointers too. All of its callers have the containing folio
already, so pass that in as an argument instead of recalculating it.
Also add kernel-doc
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/rmap.h | 7 ++-----
mm/internal.h | 4 ++--
mm/ksm.c | 7 +++----
mm/memory-failure.c | 2 +-
mm/mempolicy.c | 2 +-
mm/rmap.c | 27 ++++++++++++++++++++-------
mm/util.c | 2 +-
7 files changed, 30 insertions(+), 21 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d5e93e44322e..78923015a2e8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -728,11 +728,8 @@ page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
}
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
-
-/*
- * Used by swapoff to help locate where page is expected in vma.
- */
-unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
+unsigned long page_address_in_vma(const struct folio *folio,
+ const struct page *, const struct vm_area_struct *);
/*
* Cleans the PTEs of shared mappings.
diff --git a/mm/internal.h b/mm/internal.h
index 93083bbeeefa..fffa9df41495 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -796,7 +796,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
}
/* mm/util.c */
-struct anon_vma *folio_anon_vma(struct folio *folio);
+struct anon_vma *folio_anon_vma(const struct folio *folio);
#ifdef CONFIG_MMU
void unmap_mapping_folio(struct folio *folio);
@@ -914,7 +914,7 @@ extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
-static inline unsigned long vma_address(struct vm_area_struct *vma,
+static inline unsigned long vma_address(const struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;
diff --git a/mm/ksm.c b/mm/ksm.c
index a2e2a521df0a..2bbb321f92ac 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1257,7 +1257,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
if (WARN_ON_ONCE(folio_test_large(folio)))
return err;
- pvmw.address = page_address_in_vma(&folio->page, vma);
+ pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma);
if (pvmw.address == -EFAULT)
goto out;
@@ -1341,7 +1341,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
{
struct folio *kfolio = page_folio(kpage);
struct mm_struct *mm = vma->vm_mm;
- struct folio *folio;
+ struct folio *folio = page_folio(page);
pmd_t *pmd;
pmd_t pmde;
pte_t *ptep;
@@ -1351,7 +1351,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
int err = -EFAULT;
struct mmu_notifier_range range;
- addr = page_address_in_vma(page, vma);
+ addr = page_address_in_vma(folio, page, vma);
if (addr == -EFAULT)
goto out;
@@ -1417,7 +1417,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
ptep_clear_flush(vma, addr, ptep);
set_pte_at(mm, addr, ptep, newpte);
- folio = page_folio(page);
folio_remove_rmap_pte(folio, page, vma);
if (!folio_mapped(folio))
folio_free_swap(folio);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 58a3d80961a4..ea9d883c01c1 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -671,7 +671,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
*/
if (vma->vm_mm != t->mm)
continue;
- addr = page_address_in_vma(page, vma);
+ addr = page_address_in_vma(folio, page, vma);
add_to_kill_anon_file(t, page, vma, to_kill, addr);
}
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b646fab3e45e..b92113d27f63 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1367,7 +1367,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_entry_is_head(folio, &pagelist, lru)) {
vma_iter_init(&vmi, mm, start);
for_each_vma_range(vmi, vma, end) {
- addr = page_address_in_vma(
+ addr = page_address_in_vma(folio,
folio_page(folio, 0), vma);
if (addr != -EFAULT)
break;
diff --git a/mm/rmap.c b/mm/rmap.c
index 90df71c640bf..a7b4f9ba9a14 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -768,14 +768,27 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
-/*
- * At what user virtual address is page expected in vma?
- * Caller should check the page is actually part of the vma.
+/**
+ * page_address_in_vma - The virtual address of a page in this VMA.
+ * @folio: The folio containing the page.
+ * @page: The page within the folio.
+ * @vma: The VMA we need to know the address in.
+ *
+ * Calculates the user virtual address of this page in the specified VMA.
+ * It is the caller's responsibililty to check the page is actually
+ * within the VMA. There may not currently be a PTE pointing at this
+ * page, but if a page fault occurs at this address, this is the page
+ * which will be accessed.
+ *
+ * Context: Caller should hold a reference to the folio. Caller should
+ * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
+ * VMA from being altered.
+ *
+ * Return: The virtual address corresponding to this page in the VMA.
*/
-unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_address_in_vma(const struct folio *folio,
+ const struct page *page, const struct vm_area_struct *vma)
{
- struct folio *folio = page_folio(page);
-
if (folio_test_anon(folio)) {
struct anon_vma *page__anon_vma = folio_anon_vma(folio);
/*
@@ -791,7 +804,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return -EFAULT;
}
- /* The !page__anon_vma above handles KSM folios */
+ /* KSM folios don't reach here because of the !page__anon_vma check */
return vma_address(vma, page_pgoff(folio, page), 1);
}
diff --git a/mm/util.c b/mm/util.c
index 4f1275023eb7..60017d2a9e48 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -820,7 +820,7 @@ void *vcalloc_noprof(size_t n, size_t size)
}
EXPORT_SYMBOL(vcalloc_noprof);
-struct anon_vma *folio_anon_vma(struct folio *folio)
+struct anon_vma *folio_anon_vma(const struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 4/7] mm: Mass constification of folio/page pointers
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
` (2 preceding siblings ...)
2024-10-05 20:01 ` [PATCH v2 3/7] mm: Renovate page_address_in_vma() Matthew Wilcox (Oracle)
@ 2024-10-05 20:01 ` Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 5/7] bootmem: Stop using page->index Matthew Wilcox (Oracle)
` (2 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
Now that page_pgoff() takes const pointers, we can constify the
pointers to a lot of functions.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/ksm.h | 7 ++++---
include/linux/rmap.h | 10 +++++-----
mm/internal.h | 5 +++--
mm/ksm.c | 5 +++--
mm/memory-failure.c | 24 +++++++++++++-----------
mm/page_vma_mapped.c | 5 +++--
mm/rmap.c | 11 ++++++-----
7 files changed, 37 insertions(+), 30 deletions(-)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 11690dacd986..c4a8891f6e7d 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -92,7 +92,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
-void collect_procs_ksm(struct folio *folio, struct page *page,
+void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);
@@ -125,8 +125,9 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
}
-static inline void collect_procs_ksm(struct folio *folio, struct page *page,
- struct list_head *to_kill, int force_early)
+static inline void collect_procs_ksm(const struct folio *folio,
+ const struct page *page, struct list_head *to_kill,
+ int force_early)
{
}
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 78923015a2e8..683a04088f3f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -171,7 +171,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *folio_get_anon_vma(struct folio *folio);
+struct anon_vma *folio_get_anon_vma(const struct folio *folio);
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
@@ -194,8 +194,8 @@ enum rmap_level {
RMAP_LEVEL_PMD,
};
-static inline void __folio_rmap_sanity_checks(struct folio *folio,
- struct page *page, int nr_pages, enum rmap_level level)
+static inline void __folio_rmap_sanity_checks(const struct folio *folio,
+ const struct page *page, int nr_pages, enum rmap_level level)
{
/* hugetlb folios are handled separately. */
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
@@ -771,14 +771,14 @@ struct rmap_walk_control {
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct folio *folio);
- struct anon_vma *(*anon_lock)(struct folio *folio,
+ struct anon_vma *(*anon_lock)(const struct folio *folio,
struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
-struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
diff --git a/mm/internal.h b/mm/internal.h
index fffa9df41495..71a30e779223 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1072,10 +1072,11 @@ void ClearPageHWPoisonTakenOff(struct page *page);
bool take_page_off_buddy(struct page *page);
bool put_page_back_buddy(struct page *page);
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
-void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long ksm_addr);
-unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+unsigned long page_mapped_in_vma(const struct page *page,
+ struct vm_area_struct *vma);
#else
static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
diff --git a/mm/ksm.c b/mm/ksm.c
index 2bbb321f92ac..1fed2e3e01e0 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1052,7 +1052,8 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
return err;
}
-static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
+static inline
+struct ksm_stable_node *folio_stable_node(const struct folio *folio)
{
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
}
@@ -3066,7 +3067,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
/*
* Collect processes when the error hit an ksm page.
*/
-void collect_procs_ksm(struct folio *folio, struct page *page,
+void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early)
{
struct ksm_stable_node *stable_node;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ea9d883c01c1..7ce7ba8586f5 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -445,7 +445,7 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
*/
-static void __add_to_kill(struct task_struct *tsk, struct page *p,
+static void __add_to_kill(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
@@ -461,7 +461,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
if (is_zone_device_page(p))
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
else
- tk->size_shift = page_shift(compound_head(p));
+ tk->size_shift = folio_shift(page_folio(p));
/*
* Send SIGKILL if "tk->addr == -EFAULT". Also, as
@@ -486,7 +486,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
list_add_tail(&tk->nd, to_kill);
}
-static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
+static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
@@ -509,7 +509,7 @@ static bool task_in_to_kill_list(struct list_head *to_kill,
return false;
}
-void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long addr)
{
@@ -606,8 +606,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
/*
* Collect processes when the error hit an anonymous page.
*/
-static void collect_procs_anon(struct folio *folio, struct page *page,
- struct list_head *to_kill, int force_early)
+static void collect_procs_anon(const struct folio *folio,
+ const struct page *page, struct list_head *to_kill,
+ int force_early)
{
struct task_struct *tsk;
struct anon_vma *av;
@@ -643,8 +644,9 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
/*
* Collect processes when the error hit a file mapped page.
*/
-static void collect_procs_file(struct folio *folio, struct page *page,
- struct list_head *to_kill, int force_early)
+static void collect_procs_file(const struct folio *folio,
+ const struct page *page, struct list_head *to_kill,
+ int force_early)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -680,7 +682,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
}
#ifdef CONFIG_FS_DAX
-static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
+static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma,
struct list_head *to_kill, pgoff_t pgoff)
{
@@ -691,7 +693,7 @@ static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
/*
* Collect processes when the error hit a fsdax page.
*/
-static void collect_procs_fsdax(struct page *page,
+static void collect_procs_fsdax(const struct page *page,
struct address_space *mapping, pgoff_t pgoff,
struct list_head *to_kill, bool pre_remove)
{
@@ -725,7 +727,7 @@ static void collect_procs_fsdax(struct page *page,
/*
* Collect the processes who have the corrupted page mapped to kill.
*/
-static void collect_procs(struct folio *folio, struct page *page,
+static void collect_procs(const struct folio *folio, const struct page *page,
struct list_head *tokill, int force_early)
{
if (!folio->mapping)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index ade3c6833587..82e20dbbedb7 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -325,9 +325,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
* outside the VMA or not present, returns -EFAULT.
* Only valid for normal file or anonymous VMAs.
*/
-unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_mapped_in_vma(const struct page *page,
+ struct vm_area_struct *vma)
{
- struct folio *folio = page_folio(page);
+ const struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.pfn = page_to_pfn(page),
.nr_pages = 1,
diff --git a/mm/rmap.c b/mm/rmap.c
index a7b4f9ba9a14..2c561b1e52cc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -497,7 +497,7 @@ void __init anon_vma_init(void)
* concurrently without folio lock protection). See folio_lock_anon_vma_read()
* which has already covered that, and comment above remap_pages().
*/
-struct anon_vma *folio_get_anon_vma(struct folio *folio)
+struct anon_vma *folio_get_anon_vma(const struct folio *folio)
{
struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping;
@@ -541,7 +541,7 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio)
* reference like with folio_get_anon_vma() and then block on the mutex
* on !rwc->try_lock case.
*/
-struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma = NULL;
@@ -1275,8 +1275,9 @@ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*/
-static void __page_check_anon_rmap(struct folio *folio, struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static void __page_check_anon_rmap(const struct folio *folio,
+ const struct page *page, struct vm_area_struct *vma,
+ unsigned long address)
{
/*
* The page's anon-rmap details (mapping and index) are guaranteed to
@@ -2573,7 +2574,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
anon_vma_free(root);
}
-static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
+static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 5/7] bootmem: Stop using page->index
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
` (3 preceding siblings ...)
2024-10-05 20:01 ` [PATCH v2 4/7] mm: Mass constification of folio/page pointers Matthew Wilcox (Oracle)
@ 2024-10-05 20:01 ` Matthew Wilcox (Oracle)
2024-10-08 18:48 ` kernel test robot
2024-10-08 19:29 ` kernel test robot
2024-10-05 20:01 ` [PATCH v2 6/7] mm: Remove references to page->index in huge_memory.c Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 7/7] mm: Use page->private instead of page->index in percpu Matthew Wilcox (Oracle)
6 siblings, 2 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
Encode the type into the bottom four bits of page->private and the
info into the remaining bits. Also turn the bootmem type into a
named enum.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
arch/x86/mm/init_64.c | 9 ++++-----
include/linux/bootmem_info.h | 25 +++++++++++++++++--------
mm/bootmem_info.c | 11 ++++++-----
mm/sparse.c | 8 ++++----
4 files changed, 31 insertions(+), 22 deletions(-)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ff253648706f..4d5fde324136 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -987,13 +987,12 @@ int arch_add_memory(int nid, u64 start, u64 size,
static void __meminit free_pagetable(struct page *page, int order)
{
- unsigned long magic;
- unsigned int nr_pages = 1 << order;
-
/* bootmem page has reserved flag */
if (PageReserved(page)) {
- magic = page->index;
- if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
+ enum bootmem_type type = bootmem_type(page);
+ unsigned long nr_pages = 1 << order;
+
+ if (type == SECTION_INFO || type == MIX_SECTION_INFO) {
while (nr_pages--)
put_page_bootmem(page++);
} else
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index cffa38a73618..e2fe5de93dcc 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -6,11 +6,10 @@
#include <linux/kmemleak.h>
/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
+ * Types for free bootmem stored in the low bits of page->private.
*/
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+enum bootmem_type {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 1,
SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
MIX_SECTION_INFO,
NODE_INFO,
@@ -21,9 +20,19 @@ enum {
void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
void get_page_bootmem(unsigned long info, struct page *page,
- unsigned long type);
+ enum bootmem_type type);
void put_page_bootmem(struct page *page);
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+ return (unsigned long)page->private & 0xf;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+ return (unsigned long)page->private >> 4;
+}
+
/*
* Any memory allocated via the memblock allocator and not via the
* buddy will be marked reserved already in the memmap. For those
@@ -31,7 +40,7 @@ void put_page_bootmem(struct page *page);
*/
static inline void free_bootmem_page(struct page *page)
{
- unsigned long magic = page->index;
+ enum bootmem_type type = bootmem_type(page);
/*
* The reserve_bootmem_region sets the reserved flag on bootmem
@@ -39,7 +48,7 @@ static inline void free_bootmem_page(struct page *page)
*/
VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
- if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ if (type == SECTION_INFO || type == MIX_SECTION_INFO)
put_page_bootmem(page);
else
VM_BUG_ON_PAGE(1, page);
@@ -54,7 +63,7 @@ static inline void put_page_bootmem(struct page *page)
}
static inline void get_page_bootmem(unsigned long info, struct page *page,
- unsigned long type)
+ enum bootmem_type type)
{
}
diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c
index fa7cb0c87c03..95f288169a38 100644
--- a/mm/bootmem_info.c
+++ b/mm/bootmem_info.c
@@ -14,23 +14,24 @@
#include <linux/memory_hotplug.h>
#include <linux/kmemleak.h>
-void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
+void get_page_bootmem(unsigned long info, struct page *page,
+ enum bootmem_type type)
{
- page->index = type;
+ BUG_ON(type > 0xf);
+ BUG_ON(info > (ULONG_MAX >> 4));
SetPagePrivate(page);
- set_page_private(page, info);
+ set_page_private(page, info << 4 | type);
page_ref_inc(page);
}
void put_page_bootmem(struct page *page)
{
- unsigned long type = page->index;
+ enum bootmem_type type = bootmem_type(page);
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
if (page_ref_dec_return(page) == 1) {
- page->index = 0;
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
diff --git a/mm/sparse.c b/mm/sparse.c
index dc38539f8560..6ba5354cf2e1 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -720,19 +720,19 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
static void free_map_bootmem(struct page *memmap)
{
unsigned long maps_section_nr, removing_section_nr, i;
- unsigned long magic, nr_pages;
+ unsigned long type, nr_pages;
struct page *page = virt_to_page(memmap);
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
>> PAGE_SHIFT;
for (i = 0; i < nr_pages; i++, page++) {
- magic = page->index;
+ type = bootmem_type(page);
- BUG_ON(magic == NODE_INFO);
+ BUG_ON(type == NODE_INFO);
maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
- removing_section_nr = page_private(page);
+ removing_section_nr = bootmem_info(page);
/*
* When this function is called, the removing section is
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 6/7] mm: Remove references to page->index in huge_memory.c
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
` (4 preceding siblings ...)
2024-10-05 20:01 ` [PATCH v2 5/7] bootmem: Stop using page->index Matthew Wilcox (Oracle)
@ 2024-10-05 20:01 ` Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 7/7] mm: Use page->private instead of page->index in percpu Matthew Wilcox (Oracle)
6 siblings, 0 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
We already have folios in all these places; it's just a matter of
using them instead of the pages.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/huge_memory.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3ca89e0279a7..812287dd6221 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3135,8 +3135,8 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
/* ->mapping in first and second tail page is replaced by other uses */
VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
page_tail);
- page_tail->mapping = head->mapping;
- page_tail->index = head->index + tail;
+ new_folio->mapping = folio->mapping;
+ new_folio->index = folio->index + tail;
/*
* page->private should not be set in tail pages. Fix up and warn once
@@ -3212,11 +3212,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
ClearPageHasHWPoisoned(head);
for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
+ struct folio *tail;
__split_huge_page_tail(folio, i, lruvec, list, new_order);
+ tail = page_folio(head + i);
/* Some pages can be beyond EOF: drop them from page cache */
- if (head[i].index >= end) {
- struct folio *tail = page_folio(head + i);
-
+ if (tail->index >= end) {
if (shmem_mapping(folio->mapping))
nr_dropped++;
else if (folio_test_clear_dirty(tail))
@@ -3224,12 +3224,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
inode_to_wb(folio->mapping->host));
__filemap_remove_folio(tail, NULL);
folio_put(tail);
- } else if (!PageAnon(page)) {
- __xa_store(&folio->mapping->i_pages, head[i].index,
- head + i, 0);
+ } else if (!folio_test_anon(folio)) {
+ __xa_store(&folio->mapping->i_pages, tail->index,
+ tail, 0);
} else if (swap_cache) {
__xa_store(&swap_cache->i_pages, offset + i,
- head + i, 0);
+ tail, 0);
}
}
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 7/7] mm: Use page->private instead of page->index in percpu
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
` (5 preceding siblings ...)
2024-10-05 20:01 ` [PATCH v2 6/7] mm: Remove references to page->index in huge_memory.c Matthew Wilcox (Oracle)
@ 2024-10-05 20:01 ` Matthew Wilcox (Oracle)
6 siblings, 0 replies; 11+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-10-05 20:01 UTC (permalink / raw)
To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm
The percpu allocator only uses one field in struct page, just change
it from page->index to page->private.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/percpu.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/percpu.c b/mm/percpu.c
index da21680ff294..0d3e6b76e873 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -253,13 +253,13 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
- page->index = (unsigned long)pcpu;
+ page->private = (unsigned long)pcpu;
}
/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
- return (struct pcpu_chunk *)page->index;
+ return (struct pcpu_chunk *)page->private;
}
static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v2 5/7] bootmem: Stop using page->index
2024-10-05 20:01 ` [PATCH v2 5/7] bootmem: Stop using page->index Matthew Wilcox (Oracle)
@ 2024-10-08 18:48 ` kernel test robot
2024-10-08 19:29 ` kernel test robot
1 sibling, 0 replies; 11+ messages in thread
From: kernel test robot @ 2024-10-08 18:48 UTC (permalink / raw)
To: Matthew Wilcox (Oracle), Andrew Morton
Cc: oe-kbuild-all, Linux Memory Management List,
Matthew Wilcox (Oracle)
Hi Matthew,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on tip/locking/core tip/x86/mm dennis-percpu/for-next linus/master v6.12-rc2 next-20241008]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/mm-Convert-page_to_pgoff-to-page_pgoff/20241006-040239
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20241005200121.3231142-6-willy%40infradead.org
patch subject: [PATCH v2 5/7] bootmem: Stop using page->index
config: x86_64-randconfig-011-20241008 (https://download.01.org/0day-ci/archive/20241009/202410090224.gxQY18rm-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241009/202410090224.gxQY18rm-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202410090224.gxQY18rm-lkp@intel.com/
All errors (new ones prefixed by >>):
arch/x86/mm/init_64.c: In function 'free_pagetable':
>> arch/x86/mm/init_64.c:992:42: error: implicit declaration of function 'bootmem_type' [-Werror=implicit-function-declaration]
992 | enum bootmem_type type = bootmem_type(page);
| ^~~~~~~~~~~~
cc1: some warnings being treated as errors
vim +/bootmem_type +992 arch/x86/mm/init_64.c
987
988 static void __meminit free_pagetable(struct page *page, int order)
989 {
990 /* bootmem page has reserved flag */
991 if (PageReserved(page)) {
> 992 enum bootmem_type type = bootmem_type(page);
993 unsigned long nr_pages = 1 << order;
994
995 if (type == SECTION_INFO || type == MIX_SECTION_INFO) {
996 while (nr_pages--)
997 put_page_bootmem(page++);
998 } else
999 while (nr_pages--)
1000 free_reserved_page(page++);
1001 } else
1002 free_pages((unsigned long)page_address(page), order);
1003 }
1004
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 5/7] bootmem: Stop using page->index
2024-10-05 20:01 ` [PATCH v2 5/7] bootmem: Stop using page->index Matthew Wilcox (Oracle)
2024-10-08 18:48 ` kernel test robot
@ 2024-10-08 19:29 ` kernel test robot
2024-10-09 21:34 ` Andrew Morton
1 sibling, 1 reply; 11+ messages in thread
From: kernel test robot @ 2024-10-08 19:29 UTC (permalink / raw)
To: Matthew Wilcox (Oracle), Andrew Morton
Cc: llvm, oe-kbuild-all, Linux Memory Management List,
Matthew Wilcox (Oracle)
Hi Matthew,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on tip/locking/core tip/x86/mm dennis-percpu/for-next linus/master v6.12-rc2 next-20241008]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/mm-Convert-page_to_pgoff-to-page_pgoff/20241006-040239
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20241005200121.3231142-6-willy%40infradead.org
patch subject: [PATCH v2 5/7] bootmem: Stop using page->index
config: x86_64-randconfig-014-20241008 (https://download.01.org/0day-ci/archive/20241009/202410090311.eaqcL7IZ-lkp@intel.com/config)
compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241009/202410090311.eaqcL7IZ-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202410090311.eaqcL7IZ-lkp@intel.com/
All errors (new ones prefixed by >>):
>> arch/x86/mm/init_64.c:992:28: error: call to undeclared function 'bootmem_type'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
992 | enum bootmem_type type = bootmem_type(page);
| ^
1 error generated.
vim +/bootmem_type +992 arch/x86/mm/init_64.c
987
988 static void __meminit free_pagetable(struct page *page, int order)
989 {
990 /* bootmem page has reserved flag */
991 if (PageReserved(page)) {
> 992 enum bootmem_type type = bootmem_type(page);
993 unsigned long nr_pages = 1 << order;
994
995 if (type == SECTION_INFO || type == MIX_SECTION_INFO) {
996 while (nr_pages--)
997 put_page_bootmem(page++);
998 } else
999 while (nr_pages--)
1000 free_reserved_page(page++);
1001 } else
1002 free_pages((unsigned long)page_address(page), order);
1003 }
1004
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 5/7] bootmem: Stop using page->index
2024-10-08 19:29 ` kernel test robot
@ 2024-10-09 21:34 ` Andrew Morton
0 siblings, 0 replies; 11+ messages in thread
From: Andrew Morton @ 2024-10-09 21:34 UTC (permalink / raw)
To: kernel test robot
Cc: Matthew Wilcox (Oracle), llvm, oe-kbuild-all,
Linux Memory Management List
On Wed, 9 Oct 2024 03:29:37 +0800 kernel test robot <lkp@intel.com> wrote:
> Hi Matthew,
>
> kernel test robot noticed the following build errors:
>
> [auto build test ERROR on akpm-mm/mm-everything]
> [also build test ERROR on tip/locking/core tip/x86/mm dennis-percpu/for-next linus/master v6.12-rc2 next-20241008]
> [If your patch is applied to the wrong git tree, kindly drop us a note.
> And when submitting patch, we suggest to use '--base' as documented in
> https://git-scm.com/docs/git-format-patch#_base_tree_information]
>
> url: https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/mm-Convert-page_to_pgoff-to-page_pgoff/20241006-040239
> base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
> patch link: https://lore.kernel.org/r/20241005200121.3231142-6-willy%40infradead.org
> patch subject: [PATCH v2 5/7] bootmem: Stop using page->index
> config: x86_64-randconfig-014-20241008 (https://download.01.org/0day-ci/archive/20241009/202410090311.eaqcL7IZ-lkp@intel.com/config)
> compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff)
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241009/202410090311.eaqcL7IZ-lkp@intel.com/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202410090311.eaqcL7IZ-lkp@intel.com/
>
> All errors (new ones prefixed by >>):
>
> >> arch/x86/mm/init_64.c:992:28: error: call to undeclared function 'bootmem_type'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
> 992 | enum bootmem_type type = bootmem_type(page);
> | ^
> 1 error generated.
>
Thanks, I did the below.
I do wonder whether we should be using free_bootmem_page() in the
CONFIG_HAVE_BOOTMEM_INFO_NODE=n case,
And in the CONFIG_HAVE_BOOTMEM_INFO_NODE=y case, the logic in
free_pagetable() looks quite duplicative of the logic in
free_bootmem_page() (which seems too large to be inlined).
My new free_reserved_pages() should be moved elsewhere so riscv and
powerpc (at least) can use it.
From: Andrew Morton <akpm@linux-foundation.org>
Subject: bootmem-stop-using-page-index-fix
Date: Wed Oct 9 02:16:16 PM PDT 2024
fix arch/x86/mm/init_64.c build with !CONFIG_HAVE_BOOTMEM_INFO_NODE
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202410090311.eaqcL7IZ-lkp@intel.com/
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
arch/x86/mm/init_64.c | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
--- a/arch/x86/mm/init_64.c~bootmem-stop-using-page-index-fix
+++ a/arch/x86/mm/init_64.c
@@ -985,21 +985,32 @@ int arch_add_memory(int nid, u64 start,
return add_pages(nid, start_pfn, nr_pages, params);
}
+static void free_reserved_pages(struct page *page, unsigned long nr_pages)
+{
+ while (nr_pages--)
+ free_reserved_page(page++);
+}
+
static void __meminit free_pagetable(struct page *page, int order)
{
/* bootmem page has reserved flag */
if (PageReserved(page)) {
- enum bootmem_type type = bootmem_type(page);
unsigned long nr_pages = 1 << order;
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+ enum bootmem_type type = bootmem_type(page);
if (type == SECTION_INFO || type == MIX_SECTION_INFO) {
while (nr_pages--)
put_page_bootmem(page++);
- } else
- while (nr_pages--)
- free_reserved_page(page++);
- } else
+ } else {
+ free_reserved_pages(page, nr_pages);
+ }
+#else
+ free_reserved_pages(page, nr_pages);
+#endif
+ } else {
free_pages((unsigned long)page_address(page), order);
+ }
}
static void __meminit free_hugepage_table(struct page *page,
_
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2024-10-09 21:34 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-10-05 20:01 [PATCH v2 0/7] page->index removals in mm Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 1/7] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 2/7] mm: Use page_pgoff() in more places Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 3/7] mm: Renovate page_address_in_vma() Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 4/7] mm: Mass constification of folio/page pointers Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 5/7] bootmem: Stop using page->index Matthew Wilcox (Oracle)
2024-10-08 18:48 ` kernel test robot
2024-10-08 19:29 ` kernel test robot
2024-10-09 21:34 ` Andrew Morton
2024-10-05 20:01 ` [PATCH v2 6/7] mm: Remove references to page->index in huge_memory.c Matthew Wilcox (Oracle)
2024-10-05 20:01 ` [PATCH v2 7/7] mm: Use page->private instead of page->index in percpu Matthew Wilcox (Oracle)
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).