From: Alexander Potapenko <glider@google.com>
To: glider@google.com
Cc: Alexander Viro <viro@zeniv.linux.org.uk>,
Andrew Morton <akpm@linux-foundation.org>,
Andrey Konovalov <andreyknvl@google.com>,
Andy Lutomirski <luto@kernel.org>, Arnd Bergmann <arnd@arndb.de>,
Borislav Petkov <bp@alien8.de>, Christoph Hellwig <hch@lst.de>,
Christoph Lameter <cl@linux.com>,
David Rientjes <rientjes@google.com>,
Dmitry Vyukov <dvyukov@google.com>,
Eric Dumazet <edumazet@google.com>,
Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Herbert Xu <herbert@gondor.apana.org.au>,
Ilya Leoshkevich <iii@linux.ibm.com>,
Ingo Molnar <mingo@redhat.com>, Jens Axboe <axboe@kernel.dk>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Kees Cook <keescook@chromium.org>, Marco Elver <elver@google.com>,
Mark Rutland <mark.rutland@arm.com>,
Matthew Wilcox <willy@infradead.org>,
"Michael S. Tsirkin" <mst@redhat.com>,
Pekka Enberg <penberg@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Petr Mladek <pmladek@suse.com>,
Steven Rostedt <rostedt@goodmis.org>,
Thomas Gleixner <tglx@linutronix.de>,
Vasily Gorbik <gor@linux.ibm.com>,
Vegard Nossum <vegard.nossum@oracle.com>,
Vlastimil Babka <vbabka@suse.cz>,
linux-mm@kvack.org, linux-arch@vger.kernel.org,
linux-kernel@vger.kernel.org
Subject: [PATCH v2 17/48] kmsan: mm: maintain KMSAN metadata for page operations
Date: Tue, 29 Mar 2022 14:39:46 +0200 [thread overview]
Message-ID: <20220329124017.737571-18-glider@google.com> (raw)
In-Reply-To: <20220329124017.737571-1-glider@google.com>
Insert KMSAN hooks that make the necessary bookkeeping changes:
- poison page shadow and origins in alloc_pages()/free_page();
- clear page shadow and origins in clear_page(), copy_user_highpage();
- copy page metadata in copy_highpage(), wp_page_copy();
- handle vmap()/vunmap()/iounmap();
Signed-off-by: Alexander Potapenko <glider@google.com>
---
v2:
-- move page metadata hooks implementation here
-- remove call to kmsan_memblock_free_pages()
Link: https://linux-review.googlesource.com/id/I6d4f53a0e7eab46fa29f0348f3095d9f2e326850
---
arch/x86/include/asm/page_64.h | 13 ++++
arch/x86/mm/ioremap.c | 3 +
include/linux/highmem.h | 3 +
include/linux/kmsan.h | 123 +++++++++++++++++++++++++++++++++
mm/internal.h | 6 ++
mm/kmsan/hooks.c | 87 +++++++++++++++++++++++
mm/kmsan/shadow.c | 114 ++++++++++++++++++++++++++++++
mm/memory.c | 2 +
mm/page_alloc.c | 11 +++
mm/vmalloc.c | 20 +++++-
10 files changed, 380 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index e9c86299b8351..36e270a8ea9a4 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -45,14 +45,27 @@ void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
+/* This is an assembly header, avoid including too much of kmsan.h */
+#ifdef CONFIG_KMSAN
+void kmsan_unpoison_memory(const void *addr, size_t size);
+#endif
+__no_sanitize_memory
static inline void clear_page(void *page)
{
+#ifdef CONFIG_KMSAN
+ /* alternative_call_2() changes @page. */
+ void *page_copy = page;
+#endif
alternative_call_2(clear_page_orig,
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
"=D" (page),
"0" (page)
: "cc", "memory", "rax", "rcx");
+#ifdef CONFIG_KMSAN
+ /* Clear KMSAN shadow for the pages that have it. */
+ kmsan_unpoison_memory(page_copy, PAGE_SIZE);
+#endif
}
void copy_page(void *to, void *from);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 17a492c273069..0da8608778221 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -17,6 +17,7 @@
#include <linux/cc_platform.h>
#include <linux/efi.h>
#include <linux/pgtable.h>
+#include <linux/kmsan.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
@@ -474,6 +475,8 @@ void iounmap(volatile void __iomem *addr)
return;
}
+ kmsan_iounmap_page_range((unsigned long)addr,
+ (unsigned long)addr + get_vm_area_size(p));
memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
/* Finally remove it */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 39bb9b47fa9cd..3e1898a44d7e3 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
@@ -277,6 +278,7 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
kunmap_local(vto);
kunmap_local(vfrom);
}
@@ -292,6 +294,7 @@ static inline void copy_highpage(struct page *to, struct page *from)
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
kunmap_local(vto);
kunmap_local(vfrom);
}
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index 4e35f43eceaa9..da41850b46cbd 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -42,6 +42,129 @@ struct kmsan_ctx {
bool allow_reporting;
};
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range.
+ */
+void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory.
+ */
+void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+#else
+
+static inline int kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+ return 0;
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
#endif
#endif /* _LINUX_KMSAN_H */
diff --git a/mm/internal.h b/mm/internal.h
index d80300392a194..2d8ca0ae4774f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -713,8 +713,14 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
}
#endif
+int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift);
+
void vunmap_range_noflush(unsigned long start, unsigned long end);
+void __vunmap_range_noflush(unsigned long start, unsigned long end);
+
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int page_nid, int *flags);
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 4ac62fa67a02a..5d886df57adca 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -26,6 +26,93 @@
* skipping effects of functions like memset() inside instrumented code.
*/
+static unsigned long vmalloc_shadow(unsigned long addr)
+{
+ return (unsigned long)kmsan_get_metadata((void *)addr,
+ KMSAN_META_SHADOW);
+}
+
+static unsigned long vmalloc_origin(unsigned long addr)
+{
+ return (unsigned long)kmsan_get_metadata((void *)addr,
+ KMSAN_META_ORIGIN);
+}
+
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
+{
+ __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
+ __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
+ flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+}
+EXPORT_SYMBOL(kmsan_vunmap_range_noflush);
+
+/*
+ * This function creates new shadow/origin pages for the physical pages mapped
+ * into the virtual memory. If those physical pages already had shadow/origin,
+ * those are ignored.
+ */
+void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift)
+{
+ gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
+ struct page *shadow, *origin;
+ unsigned long off = 0;
+ int i, nr;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ nr = (end - start) / PAGE_SIZE;
+ kmsan_enter_runtime();
+ for (i = 0; i < nr; i++, off += PAGE_SIZE) {
+ shadow = alloc_pages(gfp_mask, 1);
+ origin = alloc_pages(gfp_mask, 1);
+ __vmap_pages_range_noflush(
+ vmalloc_shadow(start + off),
+ vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
+ page_shift);
+ __vmap_pages_range_noflush(
+ vmalloc_origin(start + off),
+ vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
+ page_shift);
+ }
+ flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+ kmsan_leave_runtime();
+}
+EXPORT_SYMBOL(kmsan_ioremap_page_range);
+
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long v_shadow, v_origin;
+ struct page *shadow, *origin;
+ int i, nr;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ nr = (end - start) / PAGE_SIZE;
+ kmsan_enter_runtime();
+ v_shadow = (unsigned long)vmalloc_shadow(start);
+ v_origin = (unsigned long)vmalloc_origin(start);
+ for (i = 0; i < nr; i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
+ shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
+ origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
+ __vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
+ __vunmap_range_noflush(v_origin, vmalloc_origin(end));
+ if (shadow)
+ __free_pages(shadow, 1);
+ if (origin)
+ __free_pages(origin, 1);
+ }
+ flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+ kmsan_leave_runtime();
+}
+EXPORT_SYMBOL(kmsan_iounmap_page_range);
+
/* Functions from kmsan-checks.h follow. */
void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
{
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
index de58cfbc55b9d..8fe6a5ed05e67 100644
--- a/mm/kmsan/shadow.c
+++ b/mm/kmsan/shadow.c
@@ -184,3 +184,117 @@ void *kmsan_get_metadata(void *address, bool is_origin)
ret = (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
return ret;
}
+
+void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ if (!dst || !page_has_metadata(dst))
+ return;
+ if (!src || !page_has_metadata(src)) {
+ kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
+ /*checked*/ false);
+ return;
+ }
+
+ kmsan_enter_runtime();
+ __memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
+ __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
+ kmsan_leave_runtime();
+}
+
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
+{
+ bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
+ struct page *shadow, *origin;
+ depot_stack_handle_t handle;
+ int pages = 1 << order;
+ int i;
+
+ if (!page)
+ return;
+
+ shadow = shadow_page_for(page);
+ origin = origin_page_for(page);
+
+ if (initialized) {
+ __memset(page_address(shadow), 0, PAGE_SIZE * pages);
+ __memset(page_address(origin), 0, PAGE_SIZE * pages);
+ return;
+ }
+
+ /* Zero pages allocated by the runtime should also be initialized. */
+ if (kmsan_in_runtime())
+ return;
+
+ __memset(page_address(shadow), -1, PAGE_SIZE * pages);
+ kmsan_enter_runtime();
+ handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
+ kmsan_leave_runtime();
+ /*
+ * Addresses are page-aligned, pages are contiguous, so it's ok
+ * to just fill the origin pages with |handle|.
+ */
+ for (i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
+ ((depot_stack_handle_t *)page_address(origin))[i] = handle;
+}
+
+void kmsan_free_page(struct page *page, unsigned int order)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ kmsan_enter_runtime();
+ kmsan_internal_poison_memory(page_address(page),
+ PAGE_SIZE << compound_order(page),
+ GFP_KERNEL,
+ KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+ kmsan_leave_runtime();
+}
+
+void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
+{
+ unsigned long shadow_start, origin_start, shadow_end, origin_end;
+ struct page **s_pages, **o_pages;
+ int nr, i, mapped;
+
+ if (!kmsan_enabled)
+ return;
+
+ shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
+ shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
+ if (!shadow_start)
+ return;
+
+ nr = (end - start) / PAGE_SIZE;
+ s_pages = kcalloc(nr, sizeof(struct page *), GFP_KERNEL);
+ o_pages = kcalloc(nr, sizeof(struct page *), GFP_KERNEL);
+ if (!s_pages || !o_pages)
+ goto ret;
+ for (i = 0; i < nr; i++) {
+ s_pages[i] = shadow_page_for(pages[i]);
+ o_pages[i] = origin_page_for(pages[i]);
+ }
+ prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
+ prot = PAGE_KERNEL;
+
+ origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
+ origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
+ kmsan_enter_runtime();
+ mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
+ s_pages, page_shift);
+ KMSAN_WARN_ON(mapped);
+ mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
+ o_pages, page_shift);
+ KMSAN_WARN_ON(mapped);
+ kmsan_leave_runtime();
+ flush_tlb_kernel_range(shadow_start, shadow_end);
+ flush_tlb_kernel_range(origin_start, origin_end);
+ flush_cache_vmap(shadow_start, shadow_end);
+ flush_cache_vmap(origin_start, origin_end);
+
+ret:
+ kfree(s_pages);
+ kfree(o_pages);
+}
diff --git a/mm/memory.c b/mm/memory.c
index c125c4969913a..7465eb43e6d3e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -52,6 +52,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/memremap.h>
+#include <linux/kmsan.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
@@ -3026,6 +3027,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
put_page(old_page);
return 0;
}
+ kmsan_copy_page_meta(new_page, old_page);
}
if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3589febc6d319..98a066c0a9f63 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -27,6 +27,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kasan.h>
+#include <linux/kmsan.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
@@ -1301,6 +1302,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
VM_BUG_ON_PAGE(PageTail(page), page);
trace_mm_page_free(page, order);
+ kmsan_free_page(page, order);
if (unlikely(PageHWPoison(page)) && !order) {
/*
@@ -3679,6 +3681,14 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
/*
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
+
+/*
+ * Do not instrument rmqueue() with KMSAN. This function may call
+ * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
+ * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
+ * may call rmqueue() again, which will result in a deadlock.
+ */
+__no_sanitize_memory
static inline
struct page *rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
@@ -5409,6 +5419,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
}
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
+ kmsan_alloc_page(page, order, alloc_gfp);
return page;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4165304d35471..7bcbf7a08597a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -321,6 +321,9 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
ioremap_max_page_shift);
flush_cache_vmap(addr, end);
+ if (!err)
+ kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+ ioremap_max_page_shift);
return err;
}
@@ -420,7 +423,7 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
*
* This is an internal function only. Do not use outside mm/.
*/
-void vunmap_range_noflush(unsigned long start, unsigned long end)
+void __vunmap_range_noflush(unsigned long start, unsigned long end)
{
unsigned long next;
pgd_t *pgd;
@@ -442,6 +445,12 @@ void vunmap_range_noflush(unsigned long start, unsigned long end)
arch_sync_kernel_mappings(start, end);
}
+void vunmap_range_noflush(unsigned long start, unsigned long end)
+{
+ kmsan_vunmap_range_noflush(start, end);
+ __vunmap_range_noflush(start, end);
+}
+
/**
* vunmap_range - unmap kernel virtual addresses
* @addr: start of the VM area to unmap
@@ -576,7 +585,7 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
*
* This is an internal function only. Do not use outside mm/.
*/
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
@@ -602,6 +611,13 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
return 0;
}
+int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ pgprot_t prot, struct page **pages, unsigned int page_shift)
+{
+ kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+ return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+}
+
/**
* vmap_pages_range - map pages to a kernel virtual address
* @addr: start of the VM area to map
--
2.35.1.1021.g381101b075-goog
next prev parent reply other threads:[~2022-03-29 12:42 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-29 12:39 [PATCH v2 00/48] Add KernelMemorySanitizer infrastructure Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 01/48] x86: add missing include to sparsemem.h Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 02/48] stackdepot: reserve 5 extra bits in depot_stack_handle_t Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 03/48] kasan: common: adapt to the new prototype of __stack_depot_save() Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 04/48] instrumented.h: allow instrumenting both sides of copy_from_user() Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 05/48] x86: asm: instrument usercopy in get_user() and __put_user_size() Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 06/48] asm-generic: instrument usercopy in cacheflush.h Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 07/48] kmsan: add ReST documentation Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 08/48] kmsan: introduce __no_sanitize_memory and __no_kmsan_checks Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 09/48] kmsan: mark noinstr as __no_sanitize_memory Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 10/48] x86: kmsan: pgtable: reduce vmalloc space Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 11/48] libnvdimm/pfn_dev: increase MAX_STRUCT_PAGE_SIZE Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 12/48] kcsan: clang: retire CONFIG_KCSAN_KCOV_BROKEN Alexander Potapenko
2022-03-30 6:00 ` Marco Elver
2022-03-29 12:39 ` [PATCH v2 13/48] kmsan: add KMSAN runtime core Alexander Potapenko
2022-03-30 8:58 ` Peter Zijlstra
2022-03-30 9:41 ` Peter Zijlstra
2022-04-04 14:39 ` Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 14/48] kmsan: implement kmsan_init(), initialize READ_ONCE_NOCHECK() Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 15/48] kmsan: disable instrumentation of unsupported common kernel code Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 16/48] MAINTAINERS: add entry for KMSAN Alexander Potapenko
2022-03-29 12:39 ` Alexander Potapenko [this message]
2022-03-29 12:39 ` [PATCH v2 18/48] kmsan: mm: call KMSAN hooks from SLUB code Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 19/48] kmsan: handle task creation and exiting Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 20/48] kmsan: init: call KMSAN initialization routines Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 21/48] instrumented.h: add KMSAN support Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 22/48] kmsan: unpoison @tlb in arch_tlb_gather_mmu() Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 23/48] kmsan: add iomap support Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 24/48] Input: libps2: mark data received in __ps2_command() as initialized Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 25/48] kmsan: dma: unpoison DMA mappings Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 26/48] kmsan: virtio: check/unpoison scatterlist in vring_map_one_sg() Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 27/48] kmsan: handle memory sent to/from USB Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 28/48] kmsan: instrumentation.h: add instrumentation_begin_with_regs() Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 29/48] kmsan: entry: handle register passing from uninstrumented code Alexander Potapenko
2022-03-29 12:39 ` [PATCH v2 30/48] kmsan: add tests for KMSAN Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 31/48] kernel: kmsan: don't instrument stacktrace.c Alexander Potapenko
2022-04-04 14:56 ` Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 32/48] kmsan: disable strscpy() optimization under KMSAN Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 33/48] crypto: kmsan: disable accelerated configs " Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 34/48] kmsan: disable physical page merging in biovec Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 35/48] kmsan: block: skip bio block merging logic for KMSAN Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 36/48] kmsan: kcov: unpoison area->list in kcov_remote_area_put() Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 37/48] security: kmsan: fix interoperability with auto-initialization Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 38/48] objtool: kmsan: list KMSAN API functions as uaccess-safe Alexander Potapenko
2022-03-30 8:46 ` Peter Zijlstra
[not found] ` <CAG_fn=UzshCuwbq7pZ93v7+eVUgFq1dZ4L_9nEWhYnotmyZtJg@mail.gmail.com>
2022-04-14 15:38 ` Peter Zijlstra
2022-03-29 12:40 ` [PATCH v2 39/48] x86: kmsan: make READ_ONCE_TASK_STACK() return initialized values Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 40/48] x86: kmsan: disable instrumentation of unsupported code Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 41/48] x86: kmsan: skip shadow checks in __switch_to() Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 42/48] x86: kmsan: handle open-coded assembly in lib/iomem.c Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 43/48] x86: kmsan: use __msan_ string functions where possible Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 44/48] x86: kmsan: sync metadata pages on page fault Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 45/48] x86: kasan: kmsan: support CONFIG_GENERIC_CSUM on x86, enable it for KASAN/KMSAN Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 46/48] x86: fs: kmsan: disable CONFIG_DCACHE_WORD_ACCESS Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 47/48] x86: kmsan: handle register passing from uninstrumented code Alexander Potapenko
2022-03-29 12:40 ` [PATCH v2 48/48] x86: kmsan: enable KMSAN builds for x86 Alexander Potapenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220329124017.737571-18-glider@google.com \
--to=glider@google.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@google.com \
--cc=arnd@arndb.de \
--cc=axboe@kernel.dk \
--cc=bp@alien8.de \
--cc=cl@linux.com \
--cc=dvyukov@google.com \
--cc=edumazet@google.com \
--cc=elver@google.com \
--cc=gor@linux.ibm.com \
--cc=gregkh@linuxfoundation.org \
--cc=hch@lst.de \
--cc=herbert@gondor.apana.org.au \
--cc=iamjoonsoo.kim@lge.com \
--cc=iii@linux.ibm.com \
--cc=keescook@chromium.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=mst@redhat.com \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=pmladek@suse.com \
--cc=rientjes@google.com \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=vbabka@suse.cz \
--cc=vegard.nossum@oracle.com \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).