From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>, linux-mm@kvack.org
Subject: [PATCH 4/8] mm: Add __dump_folio()
Date: Tue, 27 Feb 2024 19:23:31 +0000 [thread overview]
Message-ID: <20240227192337.757313-5-willy@infradead.org> (raw)
In-Reply-To: <20240227192337.757313-1-willy@infradead.org>
Turn __dump_page() into a wrapper around __dump_folio(). Snapshot the
page & folio into a stack variable so we don't hit BUG_ON() if an
allocation is freed under us and what was a folio pointer becomes a
pointer to a tail page.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/debug.c | 120 +++++++++++++++++++++++++++++------------------------
1 file changed, 66 insertions(+), 54 deletions(-)
diff --git a/mm/debug.c b/mm/debug.c
index ee533a5ceb79..96555fc78f1a 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -51,84 +51,96 @@ const struct trace_print_flags vmaflag_names[] = {
{0, NULL}
};
-static void __dump_page(struct page *page)
+static void __dump_folio(struct folio *folio, struct page *page,
+ unsigned long pfn, unsigned long idx)
{
- struct folio *folio = page_folio(page);
- struct page *head = &folio->page;
- struct address_space *mapping;
- bool compound = PageCompound(page);
- /*
- * Accessing the pageblock without the zone lock. It could change to
- * "isolate" again in the meantime, but since we are just dumping the
- * state for debugging, it should be fine to accept a bit of
- * inaccuracy here due to racing.
- */
- bool page_cma = is_migrate_cma_page(page);
- int mapcount;
+ struct address_space *mapping = folio_mapping(folio);
+ bool page_cma;
+ int mapcount = 0;
char *type = "";
- if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
- /*
- * Corrupt page, so we cannot call page_mapping. Instead, do a
- * safe subset of the steps that page_mapping() does. Caution:
- * this will be misleading for tail pages, PageSwapCache pages,
- * and potentially other situations. (See the page_mapping()
- * implementation for what's missing here.)
- */
- unsigned long tmp = (unsigned long)page->mapping;
-
- if (tmp & PAGE_MAPPING_ANON)
- mapping = NULL;
- else
- mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
- head = page;
- folio = (struct folio *)page;
- compound = false;
- } else {
- mapping = page_mapping(page);
- }
-
/*
- * Avoid VM_BUG_ON() in page_mapcount().
- * page->_mapcount space in struct page is used by sl[aou]b pages to
- * encode own info.
+ * page->_mapcount space in struct page is used by slab pages to
+ * encode own info, and we must avoid calling page_folio() again.
*/
- mapcount = PageSlab(head) ? 0 : page_mapcount(page);
-
- pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
- page, page_ref_count(head), mapcount, mapping,
- page_to_pgoff(page), page_to_pfn(page));
- if (compound) {
- pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
- head, compound_order(head),
+ if (!folio_test_slab(folio)) {
+ mapcount = atomic_read(&page->_mapcount) + 1;
+ if (folio_test_large(folio))
+ mapcount += folio_entire_mapcount(folio);
+ }
+
+ pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
+ folio_ref_count(folio), mapcount, mapping,
+ folio->index + idx, pfn);
+ if (folio_test_large(folio)) {
+ pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
+ folio_order(folio),
folio_entire_mapcount(folio),
folio_nr_pages_mapped(folio),
atomic_read(&folio->_pincount));
}
#ifdef CONFIG_MEMCG
- if (head->memcg_data)
- pr_warn("memcg:%lx\n", head->memcg_data);
+ if (folio->memcg_data)
+ pr_warn("memcg:%lx\n", folio->memcg_data);
#endif
- if (PageKsm(page))
+ if (folio_test_ksm(folio))
type = "ksm ";
- else if (PageAnon(page))
+ else if (folio_test_anon(folio))
type = "anon ";
else if (mapping)
dump_mapping(mapping);
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
- pr_warn("%sflags: %pGp%s\n", type, &head->flags,
+ /*
+ * Accessing the pageblock without the zone lock. It could change to
+ * "isolate" again in the meantime, but since we are just dumping the
+ * state for debugging, it should be fine to accept a bit of
+ * inaccuracy here due to racing.
+ */
+ page_cma = is_migrate_cma_page(page);
+ pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
page_cma ? " CMA" : "");
- pr_warn("page_type: %pGt\n", &head->page_type);
+ pr_warn("page_type: %pGt\n", &folio->page.page_type);
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
sizeof(struct page), false);
- if (head != page)
+ if (folio_test_large(folio))
print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
- sizeof(unsigned long), head,
- sizeof(struct page), false);
+ sizeof(unsigned long), folio,
+ 2 * sizeof(struct page), false);
+}
+
+static void __dump_page(const struct page *page)
+{
+ struct folio *foliop, folio;
+ struct page precise;
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long idx, nr_pages = 1;
+ int loops = 5;
+
+again:
+ memcpy(&precise, page, sizeof(*page));
+ foliop = page_folio(&precise);
+ idx = folio_page_idx(foliop, page);
+ if (idx != 0) {
+ if (idx < (1UL << PUD_ORDER)) {
+ memcpy(&folio, foliop, 2 * sizeof(struct page));
+ nr_pages = folio_nr_pages(&folio);
+ }
+
+ if (idx > nr_pages) {
+ if (loops-- > 0)
+ goto again;
+ printk("page does not match folio\n");
+ precise.compound_head &= ~1UL;
+ foliop = (struct folio *)&precise;
+ idx = 0;
+ }
+ }
+
+ __dump_folio(foliop, &precise, pfn, idx);
}
void dump_page(struct page *page, const char *reason)
--
2.43.0
next prev parent reply other threads:[~2024-02-27 19:23 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-27 19:23 [PATCH 0/8] PageFlags cleanups Matthew Wilcox (Oracle)
2024-02-27 19:23 ` [PATCH 1/8] mm: Separate out FOLIO_FLAGS from PAGEFLAGS Matthew Wilcox (Oracle)
2024-03-01 11:23 ` David Hildenbrand
2024-02-27 19:23 ` [PATCH 2/8] mm: Remove PageWaiters, PageSetWaiters and PageClearWaiters Matthew Wilcox (Oracle)
2024-03-01 11:24 ` David Hildenbrand
2024-02-27 19:23 ` [PATCH 3/8] mm: Remove PageYoung and PageIdle definitions Matthew Wilcox (Oracle)
2024-03-01 11:25 ` David Hildenbrand
2024-02-27 19:23 ` Matthew Wilcox (Oracle) [this message]
2024-02-28 21:34 ` [PATCH 4/8] mm: Add __dump_folio() SeongJae Park
2024-02-29 4:37 ` Matthew Wilcox
2024-02-29 5:05 ` SeongJae Park
2024-03-01 10:21 ` Ryan Roberts
2024-03-01 21:32 ` Matthew Wilcox
2024-03-04 19:02 ` Matthew Wilcox
2024-05-14 4:33 ` Kees Cook
2024-05-14 4:53 ` Matthew Wilcox
2024-05-14 14:25 ` Matthew Wilcox
2024-02-27 19:23 ` [PATCH 5/8] mm: Make dump_page() take a const argument Matthew Wilcox (Oracle)
2024-03-01 11:26 ` David Hildenbrand
2024-02-27 19:23 ` [PATCH 6/8] mm: Constify testing page/folio flags Matthew Wilcox (Oracle)
2024-03-01 11:28 ` David Hildenbrand
2024-02-27 19:23 ` [PATCH 7/8] mm: Constify more page/folio tests Matthew Wilcox (Oracle)
2024-03-01 11:28 ` David Hildenbrand
2024-02-27 19:23 ` [PATCH 8/8] mm: Remove cast from page_to_nid() Matthew Wilcox (Oracle)
2024-03-01 11:27 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240227192337.757313-5-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).