linux-pm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Yinghai Lu <yinghai@kernel.org>
To: Thomas Garnier <thgarnie@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H . Peter Anvin" <hpa@zytor.com>,
	Kees Cook <keescook@chromium.org>,
	"Rafael J . Wysocki" <rjw@rjwysocki.net>,
	Pavel Machek <pavel@ucw.cz>,
	the arch/x86 maintainers <x86@kernel.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Linux PM list <linux-pm@vger.kernel.org>,
	"kernel-hardening@lists.openwall.com"
	<kernel-hardening@lists.openwall.com>
Subject: Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping
Date: Tue, 2 Aug 2016 12:55:44 -0700	[thread overview]
Message-ID: <CAE9FiQVtD+eBoHEeNWGUnNAtJkDCEsq2JLg0RC9grSG7Eje3tQ@mail.gmail.com> (raw)
In-Reply-To: <CAJcbSZE1kPpL_hWLK70fGhgjP3xRBaZ=r32E9q1SBwhCbqOp5Q@mail.gmail.com>

[-- Attachment #1: Type: text/plain, Size: 296 bytes --]

On Tue, Aug 2, 2016 at 10:48 AM, Thomas Garnier <thgarnie@google.com> wrote:
> On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu <yinghai@kernel.org> wrote:
>>
>> Looks like we need to change the loop from phys address to virtual
>> address instead.
>> to avoid the overflow.

something like attached.

[-- Attachment #2: fix_ident_off.patch --]
[-- Type: text/x-patch, Size: 3570 bytes --]

---
 arch/x86/mm/ident_map.c |   54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

Index: linux-2.6/arch/x86/mm/ident_map.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/ident_map.c
+++ linux-2.6/arch/x86/mm/ident_map.c
@@ -3,40 +3,47 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
 			   unsigned long addr, unsigned long end)
 {
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+
+	vaddr &= PMD_MASK;
+	for (; vaddr < vend; vaddr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(vaddr);
 
 		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
+			set_pmd(pmd, __pmd(vaddr - off | info->pmd_flag));
 	}
 }
 
 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 			  unsigned long addr, unsigned long end)
 {
-	unsigned long next;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
+	for (; vaddr < vend; vaddr = vnext) {
+		pud_t *pud = pud_page + pud_index(vaddr);
 		pmd_t *pmd;
 
-		next = (addr & PUD_MASK) + PUD_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PUD_MASK) + PUD_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pud_present(*pud)) {
 			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 			continue;
 		}
 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
 		if (!pmd)
 			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
 	}
 
@@ -46,21 +53,24 @@ static int ident_pud_init(struct x86_map
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 			      unsigned long addr, unsigned long end)
 {
-	unsigned long next;
 	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+	for (; vaddr < vend; vaddr = vnext) {
+		pgd_t *pgd = pgd_page + pgd_index(vaddr);
 		pud_t *pud;
 
-		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pgd_present(*pgd)) {
 			pud = pud_offset(pgd, 0);
-			result = ident_pud_init(info, pud, addr, next);
+			result = ident_pud_init(info, pud, vaddr - off,
+						vnext - off);
 			if (result)
 				return result;
 			continue;
@@ -69,7 +79,7 @@ int kernel_ident_mapping_init(struct x86
 		pud = (pud_t *)info->alloc_pgt_page(info->context);
 		if (!pud)
 			return -ENOMEM;
-		result = ident_pud_init(info, pud, addr, next);
+		result = ident_pud_init(info, pud, vaddr - off, vnext - off);
 		if (result)
 			return result;
 		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));

  reply	other threads:[~2016-08-02 19:55 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-01 17:07 [PATCH v1 0/2] x86/power/64: Make KASLR memory randomization compatible with hibernation Thomas Garnier
2016-08-01 17:07 ` [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping Thomas Garnier
2016-08-02  0:36   ` Rafael J. Wysocki
2016-08-02 18:01     ` Yinghai Lu
2016-08-02 17:36   ` Yinghai Lu
2016-08-02 17:48     ` Thomas Garnier
2016-08-02 19:55       ` Yinghai Lu [this message]
2016-08-03 15:29         ` Thomas Garnier
2016-08-03 18:23         ` [PATCH v2] " Yinghai Lu
2016-08-03 21:28           ` Rafael J. Wysocki
2016-08-07  1:03             ` Rafael J. Wysocki
2016-08-07  4:53               ` Yinghai Lu
2016-08-07 23:23                 ` Rafael J. Wysocki
2016-08-08  7:06                   ` Yinghai Lu
2016-08-08  7:23                     ` Yinghai Lu
2016-08-08 13:16                       ` Rafael J. Wysocki
2016-08-01 17:08 ` [PATCH v1 2/2] x86/power/64: Fix __PAGE_OFFSET usage on restore Thomas Garnier
2016-08-02  0:38   ` Rafael J. Wysocki
2016-08-02 14:34     ` Thomas Garnier
2016-08-02 20:47       ` Rafael J. Wysocki
2016-08-02 20:59         ` Thomas Garnier
2016-08-02 21:08           ` Rafael J. Wysocki
2016-08-02 23:19             ` [PATCH] x86/power/64: Do not refer to __PAGE_OFFSET from assembly code Rafael J. Wysocki
2016-08-05 10:37               ` Pavel Machek
2016-08-05 14:44                 ` Rafael J. Wysocki
2016-08-05 15:21                   ` Thomas Garnier
2016-08-05 23:12                     ` Rafael J. Wysocki
2016-08-06 19:41                       ` Pavel Machek
2016-08-01 23:48 ` [PATCH v1 0/2] x86/power/64: Make KASLR memory randomization compatible with hibernation Rafael J. Wysocki
2016-08-02  0:47   ` Rafael J. Wysocki

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAE9FiQVtD+eBoHEeNWGUnNAtJkDCEsq2JLg0RC9grSG7Eje3tQ@mail.gmail.com \
    --to=yinghai@kernel.org \
    --cc=hpa@zytor.com \
    --cc=keescook@chromium.org \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pavel@ucw.cz \
    --cc=rjw@rjwysocki.net \
    --cc=tglx@linutronix.de \
    --cc=thgarnie@google.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).