linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Feng Tang <feng.tang@intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Vlastimil Babka <vbabka@suse.cz>,
	Christoph Lameter <cl@linux.com>,
	Pekka Enberg <penberg@kernel.org>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	Hyeonggon Yoo <42.hyeyoo@gmail.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Dave Hansen <dave.hansen@intel.com>,
	Robin Murphy <robin.murphy@arm.com>,
	John Garry <john.garry@huawei.com>,
	Kefeng Wang <wangkefeng.wang@huawei.com>,
	Feng Tang <feng.tang@intel.com>
Subject: [PATCH v3 3/3] mm/slub: extend redzone check to cover extra allocated kmalloc space than requested
Date: Wed, 27 Jul 2022 15:10:42 +0800	[thread overview]
Message-ID: <20220727071042.8796-4-feng.tang@intel.com> (raw)
In-Reply-To: <20220727071042.8796-1-feng.tang@intel.com>

kmalloc will round up the request size to a fixed size (mostly power
of 2), so there could be a extra space than what is requested, whose
size is the actual buffer size minus original request size.

To better detect out of bound access or abuse of this space, add
redzone sanity check for it.

And in current kernel, some kmalloc user already knows the existence
of the space and utilizes it after calling 'ksize()' to know the real
size of the allocated buffer. So we skip the sanity check for objects
which have been called with ksize(), as treating them as legitimate
users.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Feng Tang <feng.tang@intel.com>
---
 mm/slub.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 49 insertions(+), 3 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 946919066a4b..added2653bb0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -836,6 +836,11 @@ static inline void set_orig_size(struct kmem_cache *s,
 	*(unsigned int *)p = orig_size;
 }
 
+static inline void skip_orig_size_check(struct kmem_cache *s, const void *object)
+{
+	set_orig_size(s, (void *)object, s->object_size);
+}
+
 static unsigned int get_orig_size(struct kmem_cache *s, void *object)
 {
 	void *p = kasan_reset_tag(object);
@@ -967,13 +972,35 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
 static void init_object(struct kmem_cache *s, void *object, u8 val)
 {
 	u8 *p = kasan_reset_tag(object);
+	unsigned int orig_size = s->object_size;
 
-	if (s->flags & SLAB_RED_ZONE)
+	if (s->flags & SLAB_RED_ZONE) {
 		memset(p - s->red_left_pad, val, s->red_left_pad);
 
+		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+			unsigned int zone_start;
+
+			orig_size = get_orig_size(s, object);
+			zone_start = orig_size;
+
+			if (!freeptr_outside_object(s))
+				zone_start = max_t(unsigned int, orig_size,
+						s->offset + sizeof(void *));
+
+			/*
+			 * Redzone the extra allocated space by kmalloc
+			 * than requested.
+			 */
+			if (zone_start < s->object_size)
+				memset(p + zone_start, val,
+					s->object_size - zone_start);
+		}
+
+	}
+
 	if (s->flags & __OBJECT_POISON) {
-		memset(p, POISON_FREE, s->object_size - 1);
-		p[s->object_size - 1] = POISON_END;
+		memset(p, POISON_FREE, orig_size - 1);
+		p[orig_size - 1] = POISON_END;
 	}
 
 	if (s->flags & SLAB_RED_ZONE)
@@ -1120,6 +1147,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
 {
 	u8 *p = object;
 	u8 *endobject = object + s->object_size;
+	unsigned int orig_size;
 
 	if (s->flags & SLAB_RED_ZONE) {
 		if (!check_bytes_and_report(s, slab, object, "Left Redzone",
@@ -1129,6 +1157,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
 		if (!check_bytes_and_report(s, slab, object, "Right Redzone",
 			endobject, val, s->inuse - s->object_size))
 			return 0;
+
+		if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+			orig_size = get_orig_size(s, object);
+
+			if (!freeptr_outside_object(s))
+				orig_size = max_t(unsigned int, orig_size,
+						s->offset + sizeof(void *));
+			if (s->object_size > orig_size  &&
+				!check_bytes_and_report(s, slab, object,
+					"kmalloc Redzone", p + orig_size,
+					val, s->object_size - orig_size)) {
+				return 0;
+			}
+		}
 	} else {
 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 			check_bytes_and_report(s, slab, p, "Alignment padding",
@@ -4588,6 +4630,10 @@ size_t __ksize(const void *object)
 	if (unlikely(!folio_test_slab(folio)))
 		return folio_size(folio);
 
+#ifdef CONFIG_SLUB_DEBUG
+	skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+#endif
+
 	return slab_ksize(folio_slab(folio)->slab_cache);
 }
 EXPORT_SYMBOL(__ksize);
-- 
2.27.0



  parent reply	other threads:[~2022-07-27  7:10 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-27  7:10 [PATCH v3 0/3] mm/slub: some debug enhancements Feng Tang
2022-07-27  7:10 ` [PATCH v3 1/3] mm/slub: enable debugging memory wasting of kmalloc Feng Tang
2022-07-27 10:20   ` Christoph Lameter
2022-07-27 12:59     ` Feng Tang
2022-07-27 14:12     ` Vlastimil Babka
2022-07-27  7:10 ` [PATCH v3 2/3] mm/slub: only zero the requested size of buffer for kzalloc Feng Tang
2022-07-27  7:10 ` Feng Tang [this message]
2022-07-31  6:53   ` [mm/slub] 3616799128: BUG_kmalloc-#(Not_tainted):kmalloc_Redzone_overwritten kernel test robot
2022-07-31  8:16     ` Feng Tang
2022-08-01  6:21       ` Feng Tang
2022-08-01  7:26         ` Dmitry Vyukov
2022-08-01  7:48           ` Feng Tang
2022-08-01  8:13             ` Christoph Lameter
2022-08-01 14:23         ` Vlastimil Babka
2022-08-02  6:54           ` Feng Tang
2022-08-02  7:06             ` Dmitry Vyukov
2022-08-02  7:46               ` Feng Tang
2022-08-02  7:59                 ` Dmitry Vyukov
2022-08-02  8:44                   ` Feng Tang
2022-08-02  9:43               ` Vlastimil Babka
2022-08-02 10:30                 ` Dmitry Vyukov
2022-08-02 13:36                   ` Feng Tang
2022-08-02 14:38                     ` Dmitry Vyukov
2022-08-04  6:28                       ` Feng Tang
2022-08-04 10:47                         ` Dmitry Vyukov
2022-08-04 12:22                           ` Feng Tang
2022-08-15  7:27                             ` Feng Tang
2022-08-16 13:27                               ` Oliver Sang
2022-08-16 14:12                                 ` Feng Tang
2022-08-02 10:31                 ` Dmitry Vyukov
2022-08-02  6:59           ` Dmitry Vyukov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220727071042.8796-4-feng.tang@intel.com \
    --to=feng.tang@intel.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=dave.hansen@intel.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=john.garry@huawei.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=robin.murphy@arm.com \
    --cc=roman.gushchin@linux.dev \
    --cc=vbabka@suse.cz \
    --cc=wangkefeng.wang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).