public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kmemcheck: lazy checking for MOVS instructions
@ 2008-09-11 15:46 Vegard Nossum
  2008-09-12  9:06 ` Ingo Molnar
  0 siblings, 1 reply; 4+ messages in thread
From: Vegard Nossum @ 2008-09-11 15:46 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Pekka Enberg, Andrew Morton

Comments, anyone?

>From 36190b27a77b8ff5bbea09cf765c1f335c3920e4 Mon Sep 17 00:00:00 2001
From: Vegard Nossum <vegard.nossum@gmail.com>
Date: Thu, 11 Sep 2008 17:31:07 +0200
Subject: [PATCH] kmemcheck: lazy checking for MOVS instructions

This patch adds the support for lazy (as opposed to eager) checking
for [REP] MOVS instructions (mostly used in memcpy()). This means
that if both the source and destination addresses are tracked by
kmemcheck, we copy the shadow memory instead of checking that it is
initialized.

In this way, we get rid of a few more false positives.

Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
---
 arch/x86/mm/kmemcheck/kmemcheck.c |  121 +++++++++++++++++++++++++++++++++++--
 1 files changed, 115 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index eef8c6a..640f8bb 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -391,6 +391,118 @@ static void kmemcheck_write(struct pt_regs *regs,
 	kmemcheck_write_strict(regs, next_page, next_addr - next_page);
 }
 
+/*
+ * Copying is hard. We have two addresses, each of which may be split across
+ * a page (and each page will have different shadow addresses).
+ */
+static void kmemcheck_copy(struct pt_regs *regs,
+	unsigned long src_addr, unsigned long dst_addr, unsigned int size)
+{
+	uint8_t shadow[8];
+	enum kmemcheck_shadow status;
+
+	unsigned long page;
+	unsigned long next_addr;
+	unsigned long next_page;
+
+	uint8_t *x;
+	unsigned int i;
+	unsigned int n;
+
+	BUG_ON(size > sizeof(shadow));
+
+	page = src_addr & PAGE_MASK;
+	next_addr = src_addr + size - 1;
+	next_page = next_addr & PAGE_MASK;
+
+	if (likely(page == next_page)) {
+		/* Same page */
+		kmemcheck_save_addr(src_addr);
+		x = kmemcheck_shadow_lookup(src_addr);
+		if (x) {
+			for (i = 0; i < size; ++i)
+				shadow[i] = x[i];
+		} else {
+			for (i = 0; i < size; ++i)
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+		}
+	} else {
+		n = next_page - src_addr;
+
+		/* First page */
+		kmemcheck_save_addr(src_addr);
+		x = kmemcheck_shadow_lookup(src_addr);
+		if (x) {
+			for (i = 0; i < n; ++i)
+				shadow[i] = x[i];
+		} else {
+			/* Not tracked */
+			for (i = 0; i < n; ++i)
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+		}
+
+		/* Second page */
+		kmemcheck_save_addr(next_page);
+		x = kmemcheck_shadow_lookup(next_page);
+		if (x) {
+			for (i = n; i < size; ++i)
+				shadow[i] = x[i];
+		} else {
+			/* Not tracked */
+			for (i = n; i < size; ++i)
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+		}
+	}
+
+	page = dst_addr & PAGE_MASK;
+	next_addr = dst_addr + size - 1;
+	next_page = next_addr & PAGE_MASK;
+
+	if (likely(page == next_page)) {
+		/* Same page */
+		kmemcheck_save_addr(dst_addr);
+		x = kmemcheck_shadow_lookup(dst_addr);
+		if (x) {
+			for (i = 0; i < size; ++i) {
+				x[i] = shadow[i];
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+			}
+		}
+	} else {
+		n = next_page - dst_addr;
+
+		/* First page */
+		kmemcheck_save_addr(dst_addr);
+		x = kmemcheck_shadow_lookup(dst_addr);
+		if (x) {
+			for (i = 0; i < n; ++i) {
+				x[i] = shadow[i];
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+			}
+		}
+
+		/* Second page */
+		kmemcheck_save_addr(next_page);
+		x = kmemcheck_shadow_lookup(next_page);
+		if (x) {
+			for (i = n; i < size; ++i) {
+				x[i] = shadow[i];
+				shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+			}
+		}
+	}
+
+	status = kmemcheck_shadow_test(shadow, size);
+	if (status == KMEMCHECK_SHADOW_INITIALIZED)
+		return;
+
+	if (kmemcheck_enabled)
+		kmemcheck_error_save(status, src_addr, size, regs);
+
+	if (kmemcheck_enabled == 2)
+		kmemcheck_enabled = 0;
+}
+
 enum kmemcheck_method {
 	KMEMCHECK_READ,
 	KMEMCHECK_WRITE,
@@ -446,8 +558,7 @@ static void kmemcheck_access(struct pt_regs *regs,
 		case 0xa5:
 			BUG_ON(regs->ip != (unsigned long) rep_prefix);
 
-			kmemcheck_read(regs, regs->si, size);
-			kmemcheck_write(regs, regs->di, size);
+			kmemcheck_copy(regs, regs->si, regs->di, size);
 			data->rep = rep_prefix;
 			data->rex = rex_prefix;
 			data->insn = insn_primary;
@@ -514,8 +625,7 @@ static void kmemcheck_access(struct pt_regs *regs,
 		 * These instructions are special because they take two
 		 * addresses, but we only get one page fault.
 		 */
-		kmemcheck_read(regs, regs->si, size);
-		kmemcheck_write(regs, regs->di, size);
+		kmemcheck_copy(regs, regs->si, regs->di, size);
 		goto out;
 
 		/* CMPS, CMPSB, CMPSW, CMPSD */
@@ -607,8 +717,7 @@ bool kmemcheck_trap(struct pt_regs *regs)
 		switch (data->insn[0]) {
 		case 0xa4:
 		case 0xa5:
-			kmemcheck_read(regs, regs->si, data->size);
-			kmemcheck_write(regs, regs->di, data->size);
+			kmemcheck_copy(regs, regs->si, regs->di, data->size);
 			break;
 		case 0xaa:
 		case 0xab:
-- 
1.5.5.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] kmemcheck: lazy checking for MOVS instructions
  2008-09-11 15:46 [PATCH] kmemcheck: lazy checking for MOVS instructions Vegard Nossum
@ 2008-09-12  9:06 ` Ingo Molnar
  2008-09-12 17:09   ` Vegard Nossum
  0 siblings, 1 reply; 4+ messages in thread
From: Ingo Molnar @ 2008-09-12  9:06 UTC (permalink / raw)
  To: Vegard Nossum; +Cc: linux-kernel, Pekka Enberg, Andrew Morton


* Vegard Nossum <vegard.nossum@gmail.com> wrote:

> Comments, anyone?
> 
> >From 36190b27a77b8ff5bbea09cf765c1f335c3920e4 Mon Sep 17 00:00:00 2001
> From: Vegard Nossum <vegard.nossum@gmail.com>
> Date: Thu, 11 Sep 2008 17:31:07 +0200
> Subject: [PATCH] kmemcheck: lazy checking for MOVS instructions
> 
> This patch adds the support for lazy (as opposed to eager) checking 
> for [REP] MOVS instructions (mostly used in memcpy()). This means that 
> if both the source and destination addresses are tracked by kmemcheck, 
> we copy the shadow memory instead of checking that it is initialized.
> 
> In this way, we get rid of a few more false positives.

looks good to me. I've applied it to tip/kmemcheck - but can zap it and 
pull your for-tip branch as well.

	Ingo

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] kmemcheck: lazy checking for MOVS instructions
  2008-09-12  9:06 ` Ingo Molnar
@ 2008-09-12 17:09   ` Vegard Nossum
  2008-09-14 15:21     ` Ingo Molnar
  0 siblings, 1 reply; 4+ messages in thread
From: Vegard Nossum @ 2008-09-12 17:09 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: linux-kernel, Pekka Enberg, Andrew Morton

On Fri, Sep 12, 2008 at 11:06 AM, Ingo Molnar <mingo@elte.hu> wrote:
>> From: Vegard Nossum <vegard.nossum@gmail.com>
>> Date: Thu, 11 Sep 2008 17:31:07 +0200
>> Subject: [PATCH] kmemcheck: lazy checking for MOVS instructions
>>
>> This patch adds the support for lazy (as opposed to eager) checking
>> for [REP] MOVS instructions (mostly used in memcpy()). This means that
>> if both the source and destination addresses are tracked by kmemcheck,
>> we copy the shadow memory instead of checking that it is initialized.
>>
>> In this way, we get rid of a few more false positives.
>
> looks good to me. I've applied it to tip/kmemcheck - but can zap it and
> pull your for-tip branch as well.

Please zap, I believe it contains an error :-)

In short, when reading/writing the shadow memory of the second page in
a page-boundary-crossing memory access, the offsets into the second
shadow page will be wrong (off by up to 8 bytes). It's a pretty
obscure case, but it would be nice to have it fixed. Will send a pull
request later.

Thanks,


Vegard

-- 
"The animistic metaphor of the bug that maliciously sneaked in while
the programmer was not looking is intellectually dishonest as it
disguises that the error is the programmer's own creation."
	-- E. W. Dijkstra, EWD1036

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] kmemcheck: lazy checking for MOVS instructions
  2008-09-12 17:09   ` Vegard Nossum
@ 2008-09-14 15:21     ` Ingo Molnar
  0 siblings, 0 replies; 4+ messages in thread
From: Ingo Molnar @ 2008-09-14 15:21 UTC (permalink / raw)
  To: Vegard Nossum; +Cc: linux-kernel, Pekka Enberg, Andrew Morton


* Vegard Nossum <vegard.nossum@gmail.com> wrote:

> On Fri, Sep 12, 2008 at 11:06 AM, Ingo Molnar <mingo@elte.hu> wrote:
> >> From: Vegard Nossum <vegard.nossum@gmail.com>
> >> Date: Thu, 11 Sep 2008 17:31:07 +0200
> >> Subject: [PATCH] kmemcheck: lazy checking for MOVS instructions
> >>
> >> This patch adds the support for lazy (as opposed to eager) checking
> >> for [REP] MOVS instructions (mostly used in memcpy()). This means that
> >> if both the source and destination addresses are tracked by kmemcheck,
> >> we copy the shadow memory instead of checking that it is initialized.
> >>
> >> In this way, we get rid of a few more false positives.
> >
> > looks good to me. I've applied it to tip/kmemcheck - but can zap it and
> > pull your for-tip branch as well.
> 
> Please zap, I believe it contains an error :-)
> 
> In short, when reading/writing the shadow memory of the second page in 
> a page-boundary-crossing memory access, the offsets into the second 
> shadow page will be wrong (off by up to 8 bytes). It's a pretty 
> obscure case, but it would be nice to have it fixed. Will send a pull 
> request later.

ok, zapped it.

	Ingo

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2008-09-14 15:21 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-09-11 15:46 [PATCH] kmemcheck: lazy checking for MOVS instructions Vegard Nossum
2008-09-12  9:06 ` Ingo Molnar
2008-09-12 17:09   ` Vegard Nossum
2008-09-14 15:21     ` Ingo Molnar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox