From: Brendan Jackman <jackmanb@google.com>
To: Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Vlastimil Babka <vbabka@kernel.org>, Wei Xu <weixugc@google.com>,
Johannes Weiner <hannes@cmpxchg.org>, Zi Yan <ziy@nvidia.com>,
Lorenzo Stoakes <ljs@kernel.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, x86@kernel.org,
rppt@kernel.org, Sumit Garg <sumit.garg@oss.qualcomm.com>,
derkling@google.com, reijiw@google.com,
Will Deacon <will@kernel.org>,
rientjes@google.com, "Kalyazin, Nikita" <kalyazin@amazon.co.uk>,
patrick.roy@linux.dev, "Itazuri, Takahiro" <itazur@amazon.co.uk>,
Andy Lutomirski <luto@kernel.org>,
David Kaplan <david.kaplan@amd.com>,
Thomas Gleixner <tglx@kernel.org>,
Brendan Jackman <jackmanb@google.com>,
Yosry Ahmed <yosry@kernel.org>
Subject: [PATCH v2 21/22] mm: Minimal KUnit tests for some new page_alloc logic
Date: Fri, 20 Mar 2026 18:23:45 +0000 [thread overview]
Message-ID: <20260320-page_alloc-unmapped-v2-21-28bf1bd54f41@google.com> (raw)
In-Reply-To: <20260320-page_alloc-unmapped-v2-0-28bf1bd54f41@google.com>
Add a simple smoke test for __GFP_UNMAPPED that tries to exercise
flipping pageblocks between mapped/unmapped state.
Also add some basic tests for some freelist-indexing helpers.
Simplest way to run these on x86:
tools/testing/kunit/kunit.py run --arch=x86_64 "page_alloc.*" \
--kconfig_add CONFIG_MERMAP=y --kconfig_add CONFIG_PAGE_ALLOC_UNMAPPED=y
Signed-off-by: Brendan Jackman <jackmanb@google.com>
---
kernel/panic.c | 2 +
mm/Kconfig | 2 +-
mm/Makefile | 1 +
mm/init-mm.c | 3 +
mm/internal.h | 6 ++
mm/page_alloc.c | 11 +-
mm/tests/page_alloc_kunit.c | 250 ++++++++++++++++++++++++++++++++++++++++++++
7 files changed, 271 insertions(+), 4 deletions(-)
diff --git a/kernel/panic.c b/kernel/panic.c
index 20feada5319d4..1301444a0447a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -39,6 +39,7 @@
#include <linux/sys_info.h>
#include <trace/events/error_report.h>
#include <asm/sections.h>
+#include <kunit/visibility.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
@@ -941,6 +942,7 @@ unsigned long get_taint(void)
{
return tainted_mask;
}
+EXPORT_SYMBOL_IF_KUNIT(get_taint);
/**
* add_taint: add a taint flag if not already set.
diff --git a/mm/Kconfig b/mm/Kconfig
index 05b2bb841d0e0..2021f52a0c422 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1509,7 +1509,7 @@ config PAGE_ALLOC_UNMAPPED
bool "Support allocating pages that aren't in the direct map"
depends on MERMAP
-config PAGE_ALLOC_KUNIT_TESTS
+config PAGE_ALLOC_KUNIT_TEST
tristate "KUnit tests for the page allocator" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
diff --git a/mm/Makefile b/mm/Makefile
index 93a1756303cf9..11849162b6f5a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -152,3 +152,4 @@ obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
obj-$(CONFIG_LAZY_MMU_MODE_KUNIT_TEST) += tests/lazy_mmu_mode_kunit.o
obj-$(CONFIG_MERMAP) += mermap.o
obj-$(CONFIG_MERMAP_KUNIT_TEST) += tests/mermap_kunit.o
+obj-$(CONFIG_PAGE_ALLOC_KUNIT_TEST) += tests/page_alloc_kunit.o
diff --git a/mm/init-mm.c b/mm/init-mm.c
index c5556bb9d5f01..31103356da654 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -13,6 +13,8 @@
#include <linux/iommu.h>
#include <asm/mmu.h>
+#include <kunit/visibility.h>
+
#ifndef INIT_MM_CONTEXT
#define INIT_MM_CONTEXT(name)
#endif
@@ -50,6 +52,7 @@ struct mm_struct init_mm = {
.flexible_array = MM_STRUCT_FLEXIBLE_ARRAY_INIT,
INIT_MM_CONTEXT(init_mm)
};
+EXPORT_SYMBOL_IF_KUNIT(init_mm);
void setup_initial_init_mm(void *start_code, void *end_code,
void *end_data, void *brk)
diff --git a/mm/internal.h b/mm/internal.h
index 865991aca06ea..6c652148bc906 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1908,4 +1908,10 @@ int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn,
void *data, unsigned int flags);
+#if IS_ENABLED(CONFIG_KUNIT)
+unsigned int order_to_pindex(freetype_t freetype, int order);
+int pindex_to_order(unsigned int pindex);
+bool pcp_allowed_order(unsigned int order);
+#endif /* IS_ENABLED(CONFIG_KUNIT) */
+
#endif /* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7c91dcbe32576..291ba32f1f1ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -58,6 +58,7 @@
#include <linux/pgalloc_tag.h>
#include <linux/mmzone_lock.h>
#include <asm/div64.h>
+#include <kunit/visibility.h>
#include "internal.h"
#include "shuffle.h"
#include "page_reporting.h"
@@ -461,6 +462,7 @@ get_pfnblock_freetype(const struct page *page, unsigned long pfn)
{
return __get_pfnblock_freetype(page, pfn, 0);
}
+EXPORT_SYMBOL_IF_KUNIT(get_pfnblock_freetype);
/**
@@ -696,7 +698,7 @@ static void bad_page(struct page *page, const char *reason)
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
-static inline unsigned int order_to_pindex(freetype_t freetype, int order)
+VISIBLE_IF_KUNIT inline unsigned int order_to_pindex(freetype_t freetype, int order)
{
int migratetype = free_to_migratetype(freetype);
@@ -724,8 +726,9 @@ static inline unsigned int order_to_pindex(freetype_t freetype, int order)
return (MIGRATE_PCPTYPES * order) + migratetype;
}
+EXPORT_SYMBOL_IF_KUNIT(order_to_pindex);
-static inline int pindex_to_order(unsigned int pindex)
+VISIBLE_IF_KUNIT int pindex_to_order(unsigned int pindex)
{
unsigned int unmapped_base = NR_LOWORDER_PCP_LISTS + NR_PCP_THP;
int order;
@@ -748,8 +751,9 @@ static inline int pindex_to_order(unsigned int pindex)
return order;
}
+EXPORT_SYMBOL_IF_KUNIT(pindex_to_order);
-static inline bool pcp_allowed_order(unsigned int order)
+VISIBLE_IF_KUNIT inline bool pcp_allowed_order(unsigned int order)
{
if (order <= PAGE_ALLOC_COSTLY_ORDER)
return true;
@@ -759,6 +763,7 @@ static inline bool pcp_allowed_order(unsigned int order)
#endif
return false;
}
+EXPORT_SYMBOL_IF_KUNIT(pcp_allowed_order);
/*
* Higher-order pages are called "compound pages". They are structured thusly:
diff --git a/mm/tests/page_alloc_kunit.c b/mm/tests/page_alloc_kunit.c
new file mode 100644
index 0000000000000..bd55d0bc35ac9
--- /dev/null
+++ b/mm/tests/page_alloc_kunit.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mermap.h>
+#include <linux/mm_types.h>
+#include <linux/mm.h>
+#include <linux/pgtable.h>
+#include <linux/set_memory.h>
+#include <linux/sched/mm.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+
+#include "internal.h"
+
+struct free_pages_ctx {
+ unsigned int order;
+ struct list_head pages;
+};
+
+static inline void action_many__free_pages(void *context)
+{
+ struct free_pages_ctx *ctx = context;
+ struct page *page, *tmp;
+
+ list_for_each_entry_safe(page, tmp, &ctx->pages, lru)
+ __free_pages(page, ctx->order);
+}
+
+/*
+ * Allocate a bunch of pages with the same order and GFP flags, transparently
+ * take care of error handling and cleanup. Does this all via a single KUnit
+ * resource, i.e. has a fixed memory overhead.
+ */
+static inline struct free_pages_ctx *
+do_many_alloc_pages(struct kunit *test, gfp_t gfp,
+ unsigned int order, unsigned int count)
+{
+ struct free_pages_ctx *ctx = kunit_kzalloc(
+ test, sizeof(struct free_pages_ctx), GFP_KERNEL);
+
+ KUNIT_ASSERT_NOT_NULL(test, ctx);
+ INIT_LIST_HEAD(&ctx->pages);
+ ctx->order = order;
+
+ for (int i = 0; i < count; i++) {
+ struct page *page = alloc_pages(gfp, order);
+
+ if (!page) {
+ struct page *page, *tmp;
+
+ list_for_each_entry_safe(page, tmp, &ctx->pages, lru)
+ __free_pages(page, order);
+
+ KUNIT_FAIL_AND_ABORT(test,
+ "Failed to alloc order %d page (GFP *%pG) iter %d",
+ order, &gfp, i);
+ }
+ list_add(&page->lru, &ctx->pages);
+ }
+
+ KUNIT_ASSERT_EQ(test,
+ kunit_add_action_or_reset(test, action_many__free_pages, ctx), 0);
+ return ctx;
+}
+
+#ifdef CONFIG_PAGE_ALLOC_UNMAPPED
+
+static const gfp_t gfp_params_array[] = {
+ 0,
+ __GFP_ZERO,
+};
+
+static void gfp_param_get_desc(const gfp_t *gfp, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%pGg", gfp);
+}
+
+KUNIT_ARRAY_PARAM(gfp, gfp_params_array, gfp_param_get_desc);
+
+/* Do some allocations that force the allocator to map/unmap some blocks. */
+static void test_alloc_map_unmap(struct kunit *test)
+{
+ unsigned long page_majority;
+ struct free_pages_ctx *ctx;
+ const gfp_t *gfp_extra = test->param_value;
+ gfp_t gfp = GFP_KERNEL | __GFP_THISNODE | __GFP_UNMAPPED | *gfp_extra;
+ struct page *page;
+
+ kunit_attach_mm();
+ mermap_mm_prepare(current->mm);
+
+ /* No cleanup here - assuming kthread "belongs" to this test. */
+ set_cpus_allowed_ptr(current, cpumask_of_node(numa_node_id()));
+
+ /*
+ * First allocate more than half of the memory in the node as
+ * unmapped. Assuming the memory starts out mapped, this should
+ * exercise the unmap.
+ */
+ page_majority = (node_present_pages(numa_node_id()) / 2) + 1;
+ ctx = do_many_alloc_pages(test, gfp, 0, page_majority);
+
+ /* Check pages are unmapped */
+ list_for_each_entry(page, &ctx->pages, lru) {
+ freetype_t ft = get_pfnblock_freetype(page, page_to_pfn(page));
+
+ /*
+ * Logically it should be an EXPECT, but that would
+ * cause heavy log spam on failure so use ASSERT for
+ * concision.
+ */
+ KUNIT_ASSERT_FALSE(test, kernel_page_present(page));
+ KUNIT_ASSERT_TRUE(test, freetype_flags(ft) & FREETYPE_UNMAPPED);
+ }
+
+ /*
+ * Now free them again and allocate the same amount without
+ * __GFP_UNMAPPED. This will exercise the mapping logic.
+ */
+ kunit_release_action(test, action_many__free_pages, ctx);
+ gfp &= ~__GFP_UNMAPPED;
+ ctx = do_many_alloc_pages(test, gfp, 0, page_majority);
+
+ /* Check pages are mapped. */
+ list_for_each_entry(page, &ctx->pages, lru)
+ KUNIT_ASSERT_TRUE(test, kernel_page_present(page));
+}
+
+#endif /* CONFIG_PAGE_ALLOC_UNMAPPED */
+
+static void __test_pindex_helpers(struct kunit *test, unsigned long *bitmap,
+ int mt, unsigned int ftflags, unsigned int order)
+{
+ freetype_t ft = migrate_to_freetype(mt, ftflags);
+ unsigned int pindex;
+ int got_order;
+
+ if (!pcp_allowed_order(order))
+ return;
+
+ if (mt >= MIGRATE_PCPTYPES)
+ return;
+
+ if (freetype_idx(ft) < 0)
+ return;
+
+ pindex = order_to_pindex(ft, order);
+
+ KUNIT_ASSERT_LT_MSG(test, pindex, NR_PCP_LISTS,
+ "invalid pindex %d (order %d mt %d flags %#x)",
+ pindex, order, mt, ftflags);
+ KUNIT_EXPECT_TRUE_MSG(test, test_bit(pindex, bitmap),
+ "pindex %d reused (order %d mt %d flags %#x)",
+ pindex, order, mt, ftflags);
+
+ /*
+ * For THP, two migratetypes map to the same pindex,
+ * just manually exclude one of those cases.
+ */
+ if (!(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ order == HPAGE_PMD_ORDER &&
+ mt == min(MIGRATE_UNMOVABLE, MIGRATE_RECLAIMABLE)))
+ clear_bit(pindex, bitmap);
+
+ got_order = pindex_to_order(pindex);
+ KUNIT_EXPECT_EQ_MSG(test, order, got_order,
+ "roundtrip failed, got %d want %d (pindex %d mt %d flags %#x)",
+ got_order, order, pindex, mt, ftflags);
+}
+
+/* This just checks for basic arithmetic errors. */
+static void test_pindex_helpers(struct kunit *test)
+{
+ unsigned long bitmap[bitmap_size(NR_PCP_LISTS)];
+
+ /* Bit means "pindex not yet used". */
+ bitmap_fill(bitmap, NR_PCP_LISTS);
+
+ for (unsigned int order = 0; order < NR_PAGE_ORDERS; order++) {
+ for (int mt = 0; mt < MIGRATE_TYPES; mt++) {
+ __test_pindex_helpers(test, bitmap, mt, 0, order);
+ if (FREETYPE_UNMAPPED)
+ __test_pindex_helpers(test, bitmap, mt,
+ FREETYPE_UNMAPPED, order);
+ }
+ }
+
+ KUNIT_EXPECT_TRUE_MSG(test, bitmap_empty(bitmap, NR_PCP_LISTS),
+ "unused pindices: %*pbl", NR_PCP_LISTS, bitmap);
+}
+
+static void __test_freetype_idx(struct kunit *test, unsigned int order,
+ int migratetype, unsigned int ftflags,
+ unsigned long *bitmap)
+{
+ freetype_t ft = migrate_to_freetype(migratetype, ftflags);
+ int idx = freetype_idx(ft);
+
+ if (idx == -1)
+ return;
+ KUNIT_ASSERT_GE(test, idx, 0);
+ KUNIT_ASSERT_LT(test, idx, NR_FREETYPE_IDXS);
+
+ KUNIT_EXPECT_LT_MSG(test, idx, NR_PCP_LISTS,
+ "invalid idx %d (order %d mt %d flags %#x)",
+ idx, order, migratetype, ftflags);
+ clear_bit(idx, bitmap);
+}
+
+static void test_freetype_idx(struct kunit *test)
+{
+ unsigned long bitmap[bitmap_size(NR_FREETYPE_IDXS)];
+
+ /* Bit means "pindex not yet used". */
+ bitmap_fill(bitmap, NR_FREETYPE_IDXS);
+
+ for (unsigned int order = 0; order < NR_PAGE_ORDERS; order++) {
+ for (int mt = 0; mt < MIGRATE_TYPES; mt++) {
+ __test_freetype_idx(test, order, mt, 0, bitmap);
+ if (FREETYPE_UNMAPPED)
+ __test_freetype_idx(test, order, mt,
+ FREETYPE_UNMAPPED, bitmap);
+ }
+ }
+
+ KUNIT_EXPECT_TRUE_MSG(test, bitmap_empty(bitmap, NR_FREETYPE_IDXS),
+ "unused idxs: %*pbl", NR_PCP_LISTS, bitmap);
+}
+
+static struct kunit_case test_cases[] = {
+#ifdef CONFIG_PAGE_ALLOC_UNMAPPED
+ KUNIT_CASE_PARAM(test_alloc_map_unmap, gfp_gen_params),
+#endif
+ KUNIT_CASE(test_pindex_helpers),
+ KUNIT_CASE(test_freetype_idx),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "page_alloc",
+ .test_cases = test_cases,
+};
+
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
--
2.51.2
next prev parent reply other threads:[~2026-03-20 18:24 UTC|newest]
Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-20 18:23 [PATCH v2 00/22] mm: Add __GFP_UNMAPPED Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 01/22] x86/mm: split out preallocate_sub_pgd() Brendan Jackman
2026-03-20 19:42 ` Dave Hansen
2026-03-23 11:01 ` Brendan Jackman
2026-03-24 15:27 ` Borislav Petkov
2026-03-25 13:28 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 02/22] x86/mm: Generalize LDT remap into "mm-local region" Brendan Jackman
2026-03-20 19:47 ` Dave Hansen
2026-03-23 12:01 ` Brendan Jackman
2026-03-23 12:57 ` Brendan Jackman
2026-03-25 14:23 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 03/22] x86/tlb: Expose some flush function declarations to modules Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 04/22] mm: Create flags arg for __apply_to_page_range() Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 05/22] mm: Add more flags " Brendan Jackman
2026-03-26 16:14 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 06/22] x86/mm: introduce the mermap Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 07/22] mm: KUnit tests for " Brendan Jackman
2026-03-24 8:00 ` kernel test robot
2026-03-20 18:23 ` [PATCH v2 08/22] mm: introduce for_each_free_list() Brendan Jackman
2026-05-11 13:46 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 09/22] mm/page_alloc: don't overload migratetype in find_suitable_fallback() Brendan Jackman
2026-05-11 13:51 ` Vlastimil Babka (SUSE)
2026-05-11 16:44 ` Brendan Jackman
2026-05-11 16:53 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 10/22] mm: introduce freetype_t Brendan Jackman
2026-05-11 15:34 ` Vlastimil Babka (SUSE)
2026-05-11 16:49 ` Brendan Jackman
2026-05-11 16:58 ` Vlastimil Babka (SUSE)
2026-05-11 18:17 ` Vlastimil Babka (SUSE)
2026-05-11 18:26 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 11/22] mm: move migratetype definitions to freetype.h Brendan Jackman
2026-05-11 15:35 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 12/22] mm: add definitions for allocating unmapped pages Brendan Jackman
2026-05-11 18:01 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 13/22] mm: rejig pageblock mask definitions Brendan Jackman
2026-05-11 18:07 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 14/22] mm: encode freetype flags in pageblock flags Brendan Jackman
2026-05-11 18:29 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 15/22] mm/page_alloc: remove ifdefs from pindex helpers Brendan Jackman
2026-05-11 18:30 ` Vlastimil Babka (SUSE)
2026-05-12 9:49 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 16/22] mm/page_alloc: separate pcplists by freetype flags Brendan Jackman
2026-05-13 8:46 ` Vlastimil Babka (SUSE)
2026-03-20 18:23 ` [PATCH v2 17/22] mm/page_alloc: rename ALLOC_NON_BLOCK back to _HARDER Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 18/22] mm/page_alloc: introduce ALLOC_NOBLOCK Brendan Jackman
2026-05-13 9:43 ` Vlastimil Babka (SUSE)
2026-05-15 13:36 ` Brendan Jackman
2026-05-15 15:52 ` Gregory Price
2026-03-20 18:23 ` [PATCH v2 19/22] mm/page_alloc: implement __GFP_UNMAPPED allocations Brendan Jackman
2026-05-13 15:43 ` Vlastimil Babka (SUSE)
2026-05-15 16:46 ` Brendan Jackman
2026-03-20 18:23 ` [PATCH v2 20/22] mm/page_alloc: implement __GFP_UNMAPPED|__GFP_ZERO allocations Brendan Jackman
2026-05-13 17:00 ` Vlastimil Babka (SUSE)
2026-05-15 16:50 ` Brendan Jackman
2026-03-20 18:23 ` Brendan Jackman [this message]
2026-03-20 18:23 ` [PATCH v2 22/22] mm/secretmem: Use __GFP_UNMAPPED when available Brendan Jackman
2026-03-31 14:40 ` Brendan Jackman
2026-05-13 16:17 ` [PATCH v2 00/22] mm: Add __GFP_UNMAPPED Gregory Price
2026-05-13 17:14 ` Brendan Jackman
2026-05-13 17:28 ` Gregory Price
2026-05-13 17:38 ` Vlastimil Babka (SUSE)
2026-05-13 17:59 ` Gregory Price
2026-05-15 9:31 ` Brendan Jackman
2026-05-15 16:04 ` Gregory Price
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260320-page_alloc-unmapped-v2-21-28bf1bd54f41@google.com \
--to=jackmanb@google.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=david.kaplan@amd.com \
--cc=david@kernel.org \
--cc=derkling@google.com \
--cc=hannes@cmpxchg.org \
--cc=itazur@amazon.co.uk \
--cc=kalyazin@amazon.co.uk \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ljs@kernel.org \
--cc=luto@kernel.org \
--cc=patrick.roy@linux.dev \
--cc=peterz@infradead.org \
--cc=reijiw@google.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=sumit.garg@oss.qualcomm.com \
--cc=tglx@kernel.org \
--cc=vbabka@kernel.org \
--cc=weixugc@google.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yosry@kernel.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.