From: Marco Elver <elver@google.com>
To: elver@google.com
Cc: "Paul E. McKenney" <paulmck@kernel.org>,
Alexander Potapenko <glider@google.com>,
Bart Van Assche <bvanassche@acm.org>,
Bill Wendling <morbo@google.com>,
Boqun Feng <boqun.feng@gmail.com>,
Dmitry Vyukov <dvyukov@google.com>,
Frederic Weisbecker <frederic@kernel.org>,
Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Ingo Molnar <mingo@kernel.org>, Jann Horn <jannh@google.com>,
Joel Fernandes <joel@joelfernandes.org>,
Jonathan Corbet <corbet@lwn.net>,
Josh Triplett <josh@joshtriplett.org>,
Justin Stitt <justinstitt@google.com>,
Kees Cook <kees@kernel.org>, Mark Rutland <mark.rutland@arm.com>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Miguel Ojeda <ojeda@kernel.org>,
Nathan Chancellor <nathan@kernel.org>,
Neeraj Upadhyay <neeraj.upadhyay@kernel.org>,
Nick Desaulniers <ndesaulniers@google.com>,
Peter Zijlstra <peterz@infradead.org>,
Steven Rostedt <rostedt@goodmis.org>,
Thomas Gleixner <tglx@linutronix.de>,
Uladzislau Rezki <urezki@gmail.com>,
Waiman Long <longman@redhat.com>, Will Deacon <will@kernel.org>,
kasan-dev@googlegroups.com, linux-kernel@vger.kernel.org,
llvm@lists.linux.dev, rcu@vger.kernel.org,
linux-crypto@vger.kernel.org
Subject: [PATCH RFC 21/24] kfence: Enable capability analysis
Date: Thu, 6 Feb 2025 19:10:15 +0100 [thread overview]
Message-ID: <20250206181711.1902989-22-elver@google.com> (raw)
In-Reply-To: <20250206181711.1902989-1-elver@google.com>
Enable capability analysis for the KFENCE subsystem.
Notable, kfence_handle_page_fault() required minor restructure, which
also fixed a subtle race; arguably that function is more readable now.
Signed-off-by: Marco Elver <elver@google.com>
---
mm/kfence/Makefile | 2 ++
mm/kfence/core.c | 24 +++++++++++++++++-------
mm/kfence/kfence.h | 18 ++++++++++++------
mm/kfence/kfence_test.c | 4 ++++
mm/kfence/report.c | 8 ++++++--
5 files changed, 41 insertions(+), 15 deletions(-)
diff --git a/mm/kfence/Makefile b/mm/kfence/Makefile
index 2de2a58d11a1..b3640bdc3c69 100644
--- a/mm/kfence/Makefile
+++ b/mm/kfence/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+CAPABILITY_ANALYSIS := y
+
obj-y := core.o report.o
CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 102048821c22..c2d1ffd20a1f 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -7,6 +7,8 @@
#define pr_fmt(fmt) "kfence: " fmt
+disable_capability_analysis();
+
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/debugfs.h>
@@ -34,6 +36,8 @@
#include <asm/kfence.h>
+enable_capability_analysis();
+
#include "kfence.h"
/* Disables KFENCE on the first warning assuming an irrecoverable error. */
@@ -132,8 +136,8 @@ struct kfence_metadata *kfence_metadata __read_mostly;
static struct kfence_metadata *kfence_metadata_init __read_mostly;
/* Freelist with available objects. */
-static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
-static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
+DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
+static struct list_head kfence_freelist __var_guarded_by(&kfence_freelist_lock) = LIST_HEAD_INIT(kfence_freelist);
/*
* The static key to set up a KFENCE allocation; or if static keys are not used
@@ -253,6 +257,7 @@ static bool kfence_unprotect(unsigned long addr)
}
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
+ __must_hold(&meta->lock)
{
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
@@ -288,6 +293,7 @@ static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
static noinline void
metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
unsigned long *stack_entries, size_t num_stack_entries)
+ __must_hold(&meta->lock)
{
struct kfence_track *track =
next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
@@ -485,7 +491,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
alloc_covered_add(alloc_stack_hash, 1);
/* Set required slab fields. */
- slab = virt_to_slab((void *)meta->addr);
+ slab = virt_to_slab(addr);
slab->slab_cache = cache;
slab->objects = 1;
@@ -514,6 +520,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
{
struct kcsan_scoped_access assert_page_exclusive;
+ u32 alloc_stack_hash;
unsigned long flags;
bool init;
@@ -546,9 +553,10 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
/* Mark the object as freed. */
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
init = slab_want_init_on_free(meta->cache);
+ alloc_stack_hash = meta->alloc_stack_hash;
raw_spin_unlock_irqrestore(&meta->lock, flags);
- alloc_covered_add(meta->alloc_stack_hash, -1);
+ alloc_covered_add(alloc_stack_hash, -1);
/* Check canary bytes for memory corruption. */
check_canary(meta);
@@ -593,6 +601,7 @@ static void rcu_guarded_free(struct rcu_head *h)
* which partial initialization succeeded.
*/
static unsigned long kfence_init_pool(void)
+ __no_capability_analysis
{
unsigned long addr;
struct page *pages;
@@ -1192,6 +1201,7 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
{
const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
struct kfence_metadata *to_report = NULL;
+ unsigned long unprotected_page = 0;
enum kfence_error_type error_type;
unsigned long flags;
@@ -1225,9 +1235,8 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
if (!to_report)
goto out;
- raw_spin_lock_irqsave(&to_report->lock, flags);
- to_report->unprotected_page = addr;
error_type = KFENCE_ERROR_OOB;
+ unprotected_page = addr;
/*
* If the object was freed before we took the look we can still
@@ -1239,7 +1248,6 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
if (!to_report)
goto out;
- raw_spin_lock_irqsave(&to_report->lock, flags);
error_type = KFENCE_ERROR_UAF;
/*
* We may race with __kfence_alloc(), and it is possible that a
@@ -1251,6 +1259,8 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
out:
if (to_report) {
+ raw_spin_lock_irqsave(&to_report->lock, flags);
+ to_report->unprotected_page = unprotected_page;
kfence_report_error(addr, is_write, regs, to_report, error_type);
raw_spin_unlock_irqrestore(&to_report->lock, flags);
} else {
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index dfba5ea06b01..27829d70baf6 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -9,6 +9,8 @@
#ifndef MM_KFENCE_KFENCE_H
#define MM_KFENCE_KFENCE_H
+disable_capability_analysis();
+
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -16,6 +18,8 @@
#include "../slab.h" /* for struct kmem_cache */
+enable_capability_analysis();
+
/*
* Get the canary byte pattern for @addr. Use a pattern that varies based on the
* lower 3 bits of the address, to detect memory corruptions with higher
@@ -34,6 +38,8 @@
/* Maximum stack depth for reports. */
#define KFENCE_STACK_DEPTH 64
+extern raw_spinlock_t kfence_freelist_lock;
+
/* KFENCE object states. */
enum kfence_object_state {
KFENCE_OBJECT_UNUSED, /* Object is unused. */
@@ -53,7 +59,7 @@ struct kfence_track {
/* KFENCE metadata per guarded allocation. */
struct kfence_metadata {
- struct list_head list; /* Freelist node; access under kfence_freelist_lock. */
+ struct list_head list __var_guarded_by(&kfence_freelist_lock); /* Freelist node. */
struct rcu_head rcu_head; /* For delayed freeing. */
/*
@@ -91,13 +97,13 @@ struct kfence_metadata {
* In case of an invalid access, the page that was unprotected; we
* optimistically only store one address.
*/
- unsigned long unprotected_page;
+ unsigned long unprotected_page __var_guarded_by(&lock);
/* Allocation and free stack information. */
- struct kfence_track alloc_track;
- struct kfence_track free_track;
+ struct kfence_track alloc_track __var_guarded_by(&lock);
+ struct kfence_track free_track __var_guarded_by(&lock);
/* For updating alloc_covered on frees. */
- u32 alloc_stack_hash;
+ u32 alloc_stack_hash __var_guarded_by(&lock);
#ifdef CONFIG_MEMCG
struct slabobj_ext obj_exts;
#endif
@@ -141,6 +147,6 @@ enum kfence_error_type {
void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
const struct kfence_metadata *meta, enum kfence_error_type type);
-void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
+void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) __must_hold(&meta->lock);
#endif /* MM_KFENCE_KFENCE_H */
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 00034e37bc9f..67eca6e9a8de 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -11,6 +11,8 @@
* Marco Elver <elver@google.com>
*/
+disable_capability_analysis();
+
#include <kunit/test.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -26,6 +28,8 @@
#include <asm/kfence.h>
+enable_capability_analysis();
+
#include "kfence.h"
/* May be overridden by <asm/kfence.h>. */
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 10e6802a2edf..bbee90d0034d 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -5,6 +5,8 @@
* Copyright (C) 2020, Google LLC.
*/
+disable_capability_analysis();
+
#include <linux/stdarg.h>
#include <linux/kernel.h>
@@ -22,6 +24,8 @@
#include <asm/kfence.h>
+enable_capability_analysis();
+
#include "kfence.h"
/* May be overridden by <asm/kfence.h>. */
@@ -106,6 +110,7 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
bool show_alloc)
+ __must_hold(&meta->lock)
{
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
u64 ts_sec = track->ts_nsec;
@@ -207,8 +212,6 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
return;
- if (meta)
- lockdep_assert_held(&meta->lock);
/*
* Because we may generate reports in printk-unfriendly parts of the
* kernel, such as scheduler code, the use of printk() could deadlock.
@@ -263,6 +266,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
if (meta) {
+ lockdep_assert_held(&meta->lock);
pr_err("\n");
kfence_print_object(NULL, meta);
}
--
2.48.1.502.g6dc24dfdaf-goog
next prev parent reply other threads:[~2025-02-06 18:18 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-06 18:09 [PATCH RFC 00/24] Compiler-Based Capability- and Locking-Analysis Marco Elver
2025-02-06 18:09 ` [PATCH RFC 01/24] compiler_types: Move lock checking attributes to compiler-capability-analysis.h Marco Elver
2025-02-06 18:40 ` Bart Van Assche
2025-02-06 18:48 ` Marco Elver
2025-02-07 8:33 ` Peter Zijlstra
2025-02-06 18:09 ` [PATCH RFC 02/24] compiler-capability-analysis: Rename __cond_lock() to __cond_acquire() Marco Elver
2025-02-07 8:28 ` Peter Zijlstra
2025-02-07 9:32 ` Marco Elver
2025-02-07 9:41 ` Peter Zijlstra
2025-02-07 9:50 ` Marco Elver
2025-02-06 18:09 ` [PATCH RFC 03/24] compiler-capability-analysis: Add infrastructure for Clang's capability analysis Marco Elver
2025-02-06 18:09 ` [PATCH RFC 04/24] compiler-capability-analysis: Add test stub Marco Elver
2025-02-06 18:09 ` [PATCH RFC 05/24] Documentation: Add documentation for Compiler-Based Capability Analysis Marco Elver
2025-02-06 18:10 ` [PATCH RFC 06/24] checkpatch: Warn about capability_unsafe() without comment Marco Elver
2025-02-06 18:10 ` [PATCH RFC 07/24] cleanup: Basic compatibility with capability analysis Marco Elver
2025-02-06 21:29 ` Bart Van Assche
2025-02-06 22:01 ` Marco Elver
2025-02-06 18:10 ` [PATCH RFC 08/24] lockdep: Annotate lockdep assertions for " Marco Elver
2025-02-10 18:09 ` Bart Van Assche
2025-02-10 18:23 ` Marco Elver
2025-02-10 18:53 ` Bart Van Assche
2025-02-11 13:55 ` Marco Elver
2025-02-06 18:10 ` [PATCH RFC 09/24] locking/rwlock, spinlock: Support Clang's " Marco Elver
2025-02-06 18:10 ` [PATCH RFC 10/24] compiler-capability-analysis: Change __cond_acquires to take return value Marco Elver
2025-02-06 18:10 ` [PATCH RFC 11/24] locking/mutex: Support Clang's capability analysis Marco Elver
2025-02-07 8:31 ` Peter Zijlstra
2025-02-07 20:58 ` Bart Van Assche
2025-02-06 18:10 ` [PATCH RFC 12/24] locking/seqlock: " Marco Elver
2025-02-06 18:10 ` [PATCH RFC 13/24] bit_spinlock: Include missing <asm/processor.h> Marco Elver
2025-02-06 18:10 ` [PATCH RFC 14/24] bit_spinlock: Support Clang's capability analysis Marco Elver
2025-02-06 18:10 ` [PATCH RFC 15/24] rcu: " Marco Elver
2025-02-20 22:00 ` Paul E. McKenney
2025-02-20 22:11 ` Marco Elver
2025-02-20 22:36 ` Paul E. McKenney
2025-02-21 0:16 ` Marco Elver
2025-02-21 1:26 ` Paul E. McKenney
2025-02-21 17:10 ` Marco Elver
2025-02-21 18:08 ` Paul E. McKenney
2025-02-21 18:52 ` Peter Zijlstra
2025-02-21 19:46 ` Marco Elver
2025-02-21 19:57 ` Peter Zijlstra
2025-02-06 18:10 ` [PATCH RFC 16/24] srcu: " Marco Elver
2025-02-06 18:10 ` [PATCH RFC 17/24] kref: Add capability-analysis annotations Marco Elver
2025-02-06 18:10 ` [PATCH RFC 18/24] locking/rwsem: Support Clang's capability analysis Marco Elver
2025-02-06 18:10 ` [PATCH RFC 19/24] locking/local_lock: " Marco Elver
2025-02-06 18:10 ` [PATCH RFC 20/24] debugfs: Make debugfs_cancellation a capability struct Marco Elver
2025-02-06 18:10 ` Marco Elver [this message]
2025-02-06 18:10 ` [PATCH RFC 22/24] kcov: Enable capability analysis Marco Elver
2025-02-06 18:10 ` [PATCH RFC 23/24] stackdepot: " Marco Elver
2025-02-06 18:10 ` [PATCH RFC 24/24] rhashtable: " Marco Elver
2025-02-27 7:00 ` [PATCH RFC 00/24] Compiler-Based Capability- and Locking-Analysis Marco Elver
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250206181711.1902989-22-elver@google.com \
--to=elver@google.com \
--cc=boqun.feng@gmail.com \
--cc=bvanassche@acm.org \
--cc=corbet@lwn.net \
--cc=dvyukov@google.com \
--cc=frederic@kernel.org \
--cc=glider@google.com \
--cc=gregkh@linuxfoundation.org \
--cc=jannh@google.com \
--cc=joel@joelfernandes.org \
--cc=josh@joshtriplett.org \
--cc=justinstitt@google.com \
--cc=kasan-dev@googlegroups.com \
--cc=kees@kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=llvm@lists.linux.dev \
--cc=longman@redhat.com \
--cc=mark.rutland@arm.com \
--cc=mathieu.desnoyers@efficios.com \
--cc=mingo@kernel.org \
--cc=morbo@google.com \
--cc=nathan@kernel.org \
--cc=ndesaulniers@google.com \
--cc=neeraj.upadhyay@kernel.org \
--cc=ojeda@kernel.org \
--cc=paulmck@kernel.org \
--cc=peterz@infradead.org \
--cc=rcu@vger.kernel.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=urezki@gmail.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox