From: glider@google.com
To: Jens Axboe <axboe@kernel.dk>, Andy Lutomirski <luto@kernel.org>,
Wolfram Sang <wsa@the-dreams.de>, Christoph Hellwig <hch@lst.de>,
Vegard Nossum <vegard.nossum@oracle.com>,
Dmitry Vyukov <dvyukov@google.com>,
Marco Elver <elver@google.com>,
Andrey Konovalov <andreyknvl@google.com>,
linux-mm@kvack.org
Cc: glider@google.com, viro@zeniv.linux.org.uk,
adilger.kernel@dilger.ca, akpm@linux-foundation.org,
aryabinin@virtuozzo.com, ard.biesheuvel@linaro.org,
arnd@arndb.de, hch@infradead.org, darrick.wong@oracle.com,
davem@davemloft.net, dmitry.torokhov@gmail.com,
ebiggers@google.com, edumazet@google.com, ericvh@gmail.com,
gregkh@linuxfoundation.org, harry.wentland@amd.com,
herbert@gondor.apana.org.au, iii@linux.ibm.com, mingo@elte.hu,
jasowang@redhat.com, m.szyprowski@samsung.com,
mark.rutland@arm.com, martin.petersen@oracle.com,
schwidefsky@de.ibm.com, willy@infradead.org, mst@redhat.com,
mhocko@suse.com, monstr@monstr.eu, pmladek@suse.com, cai@lca.pw,
rdunlap@infradead.org, robin.murphy@arm.com,
sergey.senozhatsky@gmail.com, rostedt@goodmis.org,
tiwai@suse.com, tytso@mit.edu, tglx@linutronix.de,
gor@linux.ibm.com
Subject: [PATCH RFC v4 10/42] kmsan: KMSAN compiler API implementation
Date: Fri, 20 Dec 2019 19:49:23 +0100 [thread overview]
Message-ID: <20191220184955.223741-11-glider@google.com> (raw)
In-Reply-To: <20191220184955.223741-1-glider@google.com>
kmsan_instr.c contains the functions called by KMSAN instrumentation.
These include functions that:
- return shadow/origin pointers for memory accesses;
- poison and unpoison local variables;
- provide KMSAN context state to pass metadata for function arguments;
- perform string operations (mem*) on metadata;
- tell KMSAN to report an error.
This patch has been split away from the rest of KMSAN runtime to
simplify the review process.
Signed-off-by: Alexander Potapenko <glider@google.com>
To: Alexander Potapenko <glider@google.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: linux-mm@kvack.org
---
v4:
- split this patch away as requested by Andrey Konovalov
- removed redundant address checks when copying shadow
- fix __msan_memmove prototype
Change-Id: I826272ed2ebe8ab8ef61a9d4cccdcf07c7b6b499
---
mm/kmsan/kmsan_instr.c | 229 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 229 insertions(+)
create mode 100644 mm/kmsan/kmsan_instr.c
diff --git a/mm/kmsan/kmsan_instr.c b/mm/kmsan/kmsan_instr.c
new file mode 100644
index 000000000000..0de8aafac510
--- /dev/null
+++ b/mm/kmsan/kmsan_instr.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN compiler API.
+ *
+ * Copyright (C) 2017-2019 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "kmsan.h"
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+static bool is_bad_asm_addr(void *addr, u64 size, bool is_store)
+{
+ if ((u64)addr < TASK_SIZE)
+ return true;
+ if (!kmsan_get_metadata(addr, size, META_SHADOW))
+ return true;
+ return false;
+}
+
+struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr, u64 size)
+{
+ return kmsan_get_shadow_origin_ptr(addr, size, /*store*/false);
+}
+EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n);
+
+struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr, u64 size)
+{
+ return kmsan_get_shadow_origin_ptr(addr, size, /*store*/true);
+}
+EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n);
+
+#define DECLARE_METADATA_PTR_GETTER(size) \
+struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size(void *addr) \
+{ \
+ return kmsan_get_shadow_origin_ptr(addr, size, /*store*/false); \
+} \
+EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size); \
+ \
+struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size(void *addr) \
+{ \
+ return kmsan_get_shadow_origin_ptr(addr, size, /*store*/true); \
+} \
+EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size)
+
+DECLARE_METADATA_PTR_GETTER(1);
+DECLARE_METADATA_PTR_GETTER(2);
+DECLARE_METADATA_PTR_GETTER(4);
+DECLARE_METADATA_PTR_GETTER(8);
+
+void __msan_instrument_asm_store(void *addr, u64 size)
+{
+ unsigned long irq_flags;
+
+ if (!kmsan_ready || kmsan_in_runtime())
+ return;
+ /*
+ * Most of the accesses are below 32 bytes. The two exceptions so far
+ * are clwb() (64 bytes) and FPU state (512 bytes).
+ * It's unlikely that the assembly will touch more than 512 bytes.
+ */
+ if (size > 512) {
+ WARN_ONCE(1, "assembly store size too big: %d\n", size);
+ size = 8;
+ }
+ if (is_bad_asm_addr(addr, size, /*is_store*/true))
+ return;
+ irq_flags = kmsan_enter_runtime();
+ /* Unpoisoning the memory on best effort. */
+ kmsan_internal_unpoison_shadow(addr, size, /*checked*/false);
+ kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(__msan_instrument_asm_store);
+
+void *__msan_memmove(void *dst, const void *src, size_t n)
+{
+ void *result;
+
+ result = __memmove(dst, src, n);
+ if (!n)
+ /* Some people call memmove() with zero length. */
+ return result;
+ if (!kmsan_ready || kmsan_in_runtime())
+ return result;
+
+ kmsan_memmove_metadata(dst, (void *)src, n);
+
+ return result;
+}
+EXPORT_SYMBOL(__msan_memmove);
+
+void *__msan_memmove_nosanitize(void *dst, void *src, u64 n)
+{
+ return __memmove(dst, src, n);
+}
+EXPORT_SYMBOL(__msan_memmove_nosanitize);
+
+void *__msan_memcpy(void *dst, const void *src, u64 n)
+{
+ void *result;
+
+ result = __memcpy(dst, src, n);
+ if (!n)
+ /* Some people call memcpy() with zero length. */
+ return result;
+
+ if (!kmsan_ready || kmsan_in_runtime())
+ return result;
+
+ kmsan_memcpy_metadata(dst, (void *)src, n);
+
+ return result;
+}
+EXPORT_SYMBOL(__msan_memcpy);
+
+void *__msan_memcpy_nosanitize(void *dst, void *src, u64 n)
+{
+ return __memcpy(dst, src, n);
+}
+EXPORT_SYMBOL(__msan_memcpy_nosanitize);
+
+void *__msan_memset(void *dst, int c, size_t n)
+{
+ void *result;
+ unsigned long irq_flags;
+
+ result = __memset(dst, c, n);
+ if (!kmsan_ready || kmsan_in_runtime())
+ return result;
+
+ irq_flags = kmsan_enter_runtime();
+ /*
+ * Clang doesn't pass parameter metadata here, so it is impossible to
+ * use shadow of @c to set up the shadow for @dst.
+ */
+ kmsan_internal_unpoison_shadow(dst, n, /*checked*/false);
+ kmsan_leave_runtime(irq_flags);
+
+ return result;
+}
+EXPORT_SYMBOL(__msan_memset);
+
+void *__msan_memset_nosanitize(void *dst, int c, size_t n)
+{
+ return __memset(dst, c, n);
+}
+EXPORT_SYMBOL(__msan_memset_nosanitize);
+
+depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin)
+{
+ depot_stack_handle_t ret = 0;
+ unsigned long irq_flags;
+
+ if (!kmsan_ready || kmsan_in_runtime())
+ return ret;
+
+ /* Creating new origins may allocate memory. */
+ irq_flags = kmsan_enter_runtime();
+ ret = kmsan_internal_chain_origin(origin);
+ kmsan_leave_runtime(irq_flags);
+ return ret;
+}
+EXPORT_SYMBOL(__msan_chain_origin);
+
+void __msan_poison_alloca(void *address, u64 size, char *descr)
+{
+ depot_stack_handle_t handle;
+ unsigned long entries[4];
+ unsigned long irq_flags;
+
+ if (!kmsan_ready || kmsan_in_runtime())
+ return;
+
+ kmsan_internal_memset_shadow(address, -1, size, /*checked*/true);
+
+ entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN;
+ entries[1] = (u64)descr;
+ entries[2] = (u64)__builtin_return_address(0);
+ entries[3] = (u64)kmsan_internal_return_address(1);
+
+ /* stack_depot_save() may allocate memory. */
+ irq_flags = kmsan_enter_runtime();
+ handle = stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC);
+ kmsan_leave_runtime(irq_flags);
+ kmsan_internal_set_origin(address, size, handle);
+}
+EXPORT_SYMBOL(__msan_poison_alloca);
+
+void __msan_unpoison_alloca(void *address, u64 size)
+{
+ unsigned long irq_flags;
+
+ if (!kmsan_ready || kmsan_in_runtime())
+ return;
+
+ irq_flags = kmsan_enter_runtime();
+ kmsan_internal_unpoison_shadow(address, size, /*checked*/true);
+ kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(__msan_unpoison_alloca);
+
+void __msan_warning(u32 origin)
+{
+ unsigned long irq_flags;
+
+ if (!kmsan_ready || kmsan_in_runtime())
+ return;
+ irq_flags = kmsan_enter_runtime();
+ kmsan_report(origin, /*address*/0, /*size*/0,
+ /*off_first*/0, /*off_last*/0, /*user_addr*/0, REASON_ANY);
+ kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(__msan_warning);
+
+struct kmsan_context_state *__msan_get_context_state(void)
+{
+ struct kmsan_context_state *ret;
+
+ ret = kmsan_task_context_state();
+ BUG_ON(!ret);
+ return ret;
+}
+EXPORT_SYMBOL(__msan_get_context_state);
--
2.24.1.735.g03f4e72817-goog
next prev parent reply other threads:[~2019-12-20 18:50 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-20 18:49 [PATCH RFC v4 00/42] Add KernelMemorySanitizer infrastructure glider
2019-12-20 18:49 ` [PATCH RFC v4 01/42] stackdepot: check depot_index before accessing the stack slab glider
2019-12-20 18:49 ` [PATCH RFC v4 02/42] stackdepot: build with -fno-builtin glider
2020-01-03 17:37 ` Steven Rostedt
2019-12-20 18:49 ` [PATCH RFC v4 03/42] kasan: stackdepot: move filter_irq_stacks() to stackdepot.c glider
2019-12-20 18:49 ` [PATCH RFC v4 04/42] stackdepot: reserve 5 extra bits in depot_stack_handle_t glider
2019-12-20 18:49 ` [PATCH RFC v4 05/42] kmsan: add ReST documentation glider
2019-12-20 18:49 ` [PATCH RFC v4 06/42] kmsan: gfp: introduce __GFP_NO_KMSAN_SHADOW glider
2019-12-20 18:49 ` [PATCH RFC v4 07/42] kmsan: introduce __no_sanitize_memory and __SANITIZE_MEMORY__ glider
2019-12-20 18:49 ` [PATCH RFC v4 08/42] kmsan: reduce vmalloc space glider
2019-12-20 18:49 ` [PATCH RFC v4 09/42] kmsan: add KMSAN runtime core glider
2019-12-20 18:49 ` glider [this message]
2019-12-20 18:49 ` [PATCH RFC v4 11/42] kmsan: add KMSAN hooks for kernel subsystems glider
2019-12-20 18:49 ` [PATCH RFC v4 12/42] kmsan: stackdepot: don't allocate KMSAN metadata for stackdepot glider
2019-12-20 18:49 ` [PATCH RFC v4 13/42] kmsan: define READ_ONCE_NOCHECK() glider
2019-12-20 18:49 ` [PATCH RFC v4 14/42] kmsan: make READ_ONCE_TASK_STACK() return initialized values glider
2019-12-20 18:49 ` [PATCH RFC v4 15/42] kmsan: x86: sync metadata pages on page fault glider
2019-12-20 18:49 ` [PATCH RFC v4 16/42] kmsan: add tests for KMSAN glider
2019-12-20 18:49 ` [PATCH RFC v4 17/42] crypto: kmsan: disable accelerated configs under KMSAN glider
2019-12-20 19:44 ` Eric Biggers
2020-01-09 14:56 ` Alexander Potapenko
2019-12-20 18:49 ` [PATCH RFC v4 18/42] kmsan: x86: disable UNWINDER_ORC " glider
2019-12-20 18:49 ` [PATCH RFC v4 19/42] kmsan: x86/asm: softirq: add KMSAN IRQ entry hooks glider
2019-12-20 18:49 ` [PATCH RFC v4 20/42] kmsan: x86: increase stack sizes in KMSAN builds glider
2019-12-30 17:39 ` Arnd Bergmann
2020-01-08 15:31 ` Alexander Potapenko
2019-12-20 18:49 ` [PATCH RFC v4 21/42] kmsan: disable KMSAN instrumentation for certain kernel parts glider
2019-12-20 18:49 ` [PATCH RFC v4 22/42] kmsan: mm: call KMSAN hooks from SLUB code glider
2019-12-20 18:49 ` [PATCH RFC v4 23/42] kmsan: mm: maintain KMSAN metadata for page operations glider
2019-12-20 18:49 ` [PATCH RFC v4 24/42] kmsan: handle memory sent to/from USB glider
2019-12-20 18:49 ` [PATCH RFC v4 25/42] kmsan: handle task creation and exiting glider
2019-12-20 18:49 ` [PATCH RFC v4 26/42] kmsan: net: check the value of skb before sending it to the network glider
2019-12-20 18:49 ` [PATCH RFC v4 27/42] kmsan: printk: treat the result of vscnprintf() as initialized glider
2019-12-20 18:49 ` [PATCH RFC v4 28/42] kmsan: disable instrumentation of certain functions glider
2019-12-20 18:49 ` [PATCH RFC v4 29/42] kmsan: unpoison |tlb| in arch_tlb_gather_mmu() glider
2019-12-20 18:49 ` [PATCH RFC v4 30/42] kmsan: use __msan_ string functions where possible glider
2019-12-20 18:49 ` [PATCH RFC v4 31/42] kmsan: hooks for copy_to_user() and friends glider
2019-12-20 18:49 ` [PATCH RFC v4 32/42] kmsan: init: call KMSAN initialization routines glider
2019-12-20 18:49 ` [PATCH RFC v4 33/42] kmsan: enable KMSAN builds glider
2019-12-20 18:49 ` [PATCH RFC v4 34/42] kmsan: handle /dev/[u]random glider
2019-12-20 18:49 ` [PATCH RFC v4 35/42] kmsan: virtio: check/unpoison scatterlist in vring_map_one_sg() glider
2019-12-20 18:49 ` [PATCH RFC v4 36/42] kmsan: disable strscpy() optimization under KMSAN glider
2019-12-20 18:49 ` [PATCH RFC v4 37/42] kmsan: add iomap support glider
2019-12-20 18:49 ` [PATCH RFC v4 38/42] kmsan: dma: unpoison memory mapped by dma_direct_map_page() glider
2019-12-20 18:49 ` [PATCH RFC v4 39/42] kmsan: disable physical page merging in biovec glider
2019-12-20 18:49 ` [PATCH RFC v4 40/42] kmsan: ext4: skip block merging logic in ext4_mpage_readpages for KMSAN glider
2019-12-20 19:18 ` Eric Biggers
2020-01-08 16:14 ` Alexander Potapenko
2019-12-20 18:49 ` [PATCH RFC v4 41/42] x86: kasan: kmsan: support CONFIG_GENERIC_CSUM on x86, enable it for KASAN/KMSAN glider
2019-12-20 18:49 ` [PATCH RFC v4 42/42] kmsan: x86/uprobes: unpoison regs in arch_uprobe_exception_notify() glider
2019-12-23 7:51 ` [PATCH RFC v4 00/42] Add KernelMemorySanitizer infrastructure Leon Romanovsky
2020-01-09 14:38 ` Alexander Potapenko
2020-01-09 16:29 ` Thomas Gleixner
2020-03-25 11:04 ` Alexander Potapenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191220184955.223741-11-glider@google.com \
--to=glider@google.com \
--cc=adilger.kernel@dilger.ca \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@google.com \
--cc=ard.biesheuvel@linaro.org \
--cc=arnd@arndb.de \
--cc=aryabinin@virtuozzo.com \
--cc=axboe@kernel.dk \
--cc=cai@lca.pw \
--cc=darrick.wong@oracle.com \
--cc=davem@davemloft.net \
--cc=dmitry.torokhov@gmail.com \
--cc=dvyukov@google.com \
--cc=ebiggers@google.com \
--cc=edumazet@google.com \
--cc=elver@google.com \
--cc=ericvh@gmail.com \
--cc=gor@linux.ibm.com \
--cc=gregkh@linuxfoundation.org \
--cc=harry.wentland@amd.com \
--cc=hch@infradead.org \
--cc=hch@lst.de \
--cc=herbert@gondor.apana.org.au \
--cc=iii@linux.ibm.com \
--cc=jasowang@redhat.com \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=mark.rutland@arm.com \
--cc=martin.petersen@oracle.com \
--cc=mhocko@suse.com \
--cc=mingo@elte.hu \
--cc=monstr@monstr.eu \
--cc=mst@redhat.com \
--cc=pmladek@suse.com \
--cc=rdunlap@infradead.org \
--cc=robin.murphy@arm.com \
--cc=rostedt@goodmis.org \
--cc=schwidefsky@de.ibm.com \
--cc=sergey.senozhatsky@gmail.com \
--cc=tglx@linutronix.de \
--cc=tiwai@suse.com \
--cc=tytso@mit.edu \
--cc=vegard.nossum@oracle.com \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
--cc=wsa@the-dreams.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).