* [patch 0/3] x86, CPA: Introduce new APIs set_pages_array[uc|wb]
@ 2009-03-19 21:51 venkatesh.pallipadi
2009-03-19 21:51 ` [patch 1/3] x86, CPA: Add a flag parameter to cpa set_clr venkatesh.pallipadi
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: venkatesh.pallipadi @ 2009-03-19 21:51 UTC (permalink / raw)
To: mingo, tglx, hpa, airlied; +Cc: arjan, eric, linux-kernel, Venkatesh Pallipadi
AGP is moving over to using struct page pointer array in place of
virtual address array. So, it needs a equivalent of set_memory_array_[uc|wb]
which will operate on struct page pointer array instead.
The patch series adds two new interfaces in x86 CPA code,
set_pages_array_uc
set_pages_array_wb
which will change the memory type of a set of pages referred to by struct page
pointers, which need not be contiguous.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
--
^ permalink raw reply [flat|nested] 7+ messages in thread
* [patch 1/3] x86, CPA: Add a flag parameter to cpa set_clr
2009-03-19 21:51 [patch 0/3] x86, CPA: Introduce new APIs set_pages_array[uc|wb] venkatesh.pallipadi
@ 2009-03-19 21:51 ` venkatesh.pallipadi
2009-03-20 10:24 ` [tip:x86/mm] x86, CPA: Add a flag parameter to cpa set_clr() venkatesh.pallipadi
2009-03-19 21:51 ` [patch 2/3] x86, PAT: Add support for struct page pointer array in cpa set_clr venkatesh.pallipadi
2009-03-19 21:51 ` [patch 3/3] x86, CPA: Add set_pages_arrayuc and set_pages_array_wb venkatesh.pallipadi
2 siblings, 1 reply; 7+ messages in thread
From: venkatesh.pallipadi @ 2009-03-19 21:51 UTC (permalink / raw)
To: mingo, tglx, hpa, airlied; +Cc: arjan, eric, linux-kernel, Venkatesh Pallipadi
[-- Attachment #1: cpa_array_param_to_flag.patch --]
[-- Type: text/plain, Size: 1874 bytes --]
Change change_page_attr_set_clr() array parameter to a flag. This helps
following patches which adds an interface to change attr to uc/wb over a
set of pages referred by struct page.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
---
arch/x86/mm/pageattr.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
Index: tip/arch/x86/mm/pageattr.c
===================================================================
--- tip.orig/arch/x86/mm/pageattr.c 2009-03-13 11:51:31.000000000 -0700
+++ tip/arch/x86/mm/pageattr.c 2009-03-17 11:03:31.000000000 -0700
@@ -786,7 +786,7 @@ static inline int cache_attr(pgprot_t at
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
- int force_split, int array)
+ int force_split, int in_flag)
{
struct cpa_data cpa;
int ret, cache, checkalias;
@@ -801,7 +801,7 @@ static int change_page_attr_set_clr(unsi
return 0;
/* Ensure we are PAGE_SIZE aligned */
- if (!array) {
+ if (!(in_flag & CPA_ARRAY)) {
if (*addr & ~PAGE_MASK) {
*addr &= PAGE_MASK;
/*
@@ -839,7 +839,7 @@ static int change_page_attr_set_clr(unsi
cpa.curpage = 0;
cpa.force_split = force_split;
- if (array)
+ if (in_flag & CPA_ARRAY)
cpa.flags |= CPA_ARRAY;
/* No alias checking for _NX bit modifications */
@@ -888,14 +888,14 @@ static inline int change_page_attr_set(u
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
- array);
+ (array ? CPA_ARRAY : 0));
}
static inline int change_page_attr_clear(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
- array);
+ (array ? CPA_ARRAY : 0));
}
int _set_memory_uc(unsigned long addr, int numpages)
--
^ permalink raw reply [flat|nested] 7+ messages in thread
* [patch 2/3] x86, PAT: Add support for struct page pointer array in cpa set_clr
2009-03-19 21:51 [patch 0/3] x86, CPA: Introduce new APIs set_pages_array[uc|wb] venkatesh.pallipadi
2009-03-19 21:51 ` [patch 1/3] x86, CPA: Add a flag parameter to cpa set_clr venkatesh.pallipadi
@ 2009-03-19 21:51 ` venkatesh.pallipadi
2009-03-20 10:24 ` [tip:x86/mm] " venkatesh.pallipadi
2009-03-19 21:51 ` [patch 3/3] x86, CPA: Add set_pages_arrayuc and set_pages_array_wb venkatesh.pallipadi
2 siblings, 1 reply; 7+ messages in thread
From: venkatesh.pallipadi @ 2009-03-19 21:51 UTC (permalink / raw)
To: mingo, tglx, hpa, airlied; +Cc: arjan, eric, linux-kernel, Venkatesh Pallipadi
[-- Attachment #1: change_page_attr_set_clr_struct_page.patch --]
[-- Type: text/plain, Size: 7213 bytes --]
Add struct page array pointer to cpa struct and CPA_PAGES_ARRAY.
With that we can add change_page_attr_set_clr() a parameter to pass sruct page
array pointer and that can be handled by the underlying cpa code.
cpa_flush_array() is also changed to support both addr array or struct page
pointer array, depending on the flag.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
---
arch/x86/mm/pageattr.c | 79 +++++++++++++++++++++++++++++++------------------
1 file changed, 50 insertions(+), 29 deletions(-)
Index: tip/arch/x86/mm/pageattr.c
===================================================================
--- tip.orig/arch/x86/mm/pageattr.c 2009-03-17 11:03:31.000000000 -0700
+++ tip/arch/x86/mm/pageattr.c 2009-03-17 11:03:51.000000000 -0700
@@ -33,6 +33,7 @@ struct cpa_data {
unsigned long pfn;
unsigned force_split : 1;
int curpage;
+ struct page **pages;
};
/*
@@ -45,6 +46,7 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
+#define CPA_PAGES_ARRAY 4
#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -201,10 +203,10 @@ static void cpa_flush_range(unsigned lon
}
}
-static void cpa_flush_array(unsigned long *start, int numpages, int cache)
+static void cpa_flush_array(unsigned long *start, int numpages, int cache,
+ int in_flags, struct page **pages)
{
unsigned int i, level;
- unsigned long *addr;
BUG_ON(irqs_disabled());
@@ -225,14 +227,22 @@ static void cpa_flush_array(unsigned lon
* will cause all other CPUs to flush the same
* cachelines:
*/
- for (i = 0, addr = start; i < numpages; i++, addr++) {
- pte_t *pte = lookup_address(*addr, &level);
+ for (i = 0; i < numpages; i++) {
+ unsigned long addr;
+ pte_t *pte;
+
+ if (in_flags & CPA_PAGES_ARRAY)
+ addr = (unsigned long)page_address(pages[i]);
+ else
+ addr = start[i];
+
+ pte = lookup_address(addr, &level);
/*
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
- clflush_cache_range((void *) *addr, PAGE_SIZE);
+ clflush_cache_range((void *)addr, PAGE_SIZE);
}
}
@@ -584,7 +594,9 @@ static int __change_page_attr(struct cpa
unsigned int level;
pte_t *kpte, old_pte;
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & CPA_PAGES_ARRAY)
+ address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
+ else if (cpa->flags & CPA_ARRAY)
address = cpa->vaddr[cpa->curpage];
else
address = *cpa->vaddr;
@@ -687,7 +699,9 @@ static int cpa_process_alias(struct cpa_
* No need to redo, when the primary call touched the direct
* mapping already:
*/
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & CPA_PAGES_ARRAY)
+ vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
+ else if (cpa->flags & CPA_ARRAY)
vaddr = cpa->vaddr[cpa->curpage];
else
vaddr = *cpa->vaddr;
@@ -698,7 +712,7 @@ static int cpa_process_alias(struct cpa_
alias_cpa = *cpa;
temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
alias_cpa.vaddr = &temp_cpa_vaddr;
- alias_cpa.flags &= ~CPA_ARRAY;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
ret = __change_page_attr_set_clr(&alias_cpa, 0);
@@ -724,7 +738,7 @@ static int cpa_process_alias(struct cpa_
alias_cpa = *cpa;
temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
alias_cpa.vaddr = &temp_cpa_vaddr;
- alias_cpa.flags &= ~CPA_ARRAY;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
/*
* The high mapping range is imprecise, so ignore the return value.
@@ -745,7 +759,7 @@ static int __change_page_attr_set_clr(st
*/
cpa->numpages = numpages;
/* for array changes, we can't use large page */
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
cpa->numpages = 1;
if (!debug_pagealloc)
@@ -769,7 +783,7 @@ static int __change_page_attr_set_clr(st
*/
BUG_ON(cpa->numpages > numpages);
numpages -= cpa->numpages;
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
cpa->curpage++;
else
*cpa->vaddr += cpa->numpages * PAGE_SIZE;
@@ -786,7 +800,8 @@ static inline int cache_attr(pgprot_t at
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
- int force_split, int in_flag)
+ int force_split, int in_flag,
+ struct page **pages)
{
struct cpa_data cpa;
int ret, cache, checkalias;
@@ -801,15 +816,7 @@ static int change_page_attr_set_clr(unsi
return 0;
/* Ensure we are PAGE_SIZE aligned */
- if (!(in_flag & CPA_ARRAY)) {
- if (*addr & ~PAGE_MASK) {
- *addr &= PAGE_MASK;
- /*
- * People should not be passing in unaligned addresses:
- */
- WARN_ON_ONCE(1);
- }
- } else {
+ if (in_flag & CPA_ARRAY) {
int i;
for (i = 0; i < numpages; i++) {
if (addr[i] & ~PAGE_MASK) {
@@ -817,6 +824,18 @@ static int change_page_attr_set_clr(unsi
WARN_ON_ONCE(1);
}
}
+ } else if (!(in_flag & CPA_PAGES_ARRAY)) {
+ /*
+ * in_flag of CPA_PAGES_ARRAY implies it is aligned.
+ * No need to cehck in that case
+ */
+ if (*addr & ~PAGE_MASK) {
+ *addr &= PAGE_MASK;
+ /*
+ * People should not be passing in unaligned addresses:
+ */
+ WARN_ON_ONCE(1);
+ }
}
/* Must avoid aliasing mappings in the highmem code */
@@ -832,6 +851,7 @@ static int change_page_attr_set_clr(unsi
arch_flush_lazy_mmu_mode();
cpa.vaddr = addr;
+ cpa.pages = pages;
cpa.numpages = numpages;
cpa.mask_set = mask_set;
cpa.mask_clr = mask_clr;
@@ -839,8 +859,8 @@ static int change_page_attr_set_clr(unsi
cpa.curpage = 0;
cpa.force_split = force_split;
- if (in_flag & CPA_ARRAY)
- cpa.flags |= CPA_ARRAY;
+ if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
+ cpa.flags |= in_flag;
/* No alias checking for _NX bit modifications */
checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
@@ -866,9 +886,10 @@ static int change_page_attr_set_clr(unsi
* wbindv):
*/
if (!ret && cpu_has_clflush) {
- if (cpa.flags & CPA_ARRAY)
- cpa_flush_array(addr, numpages, cache);
- else
+ if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+ cpa_flush_array(addr, numpages, cache,
+ cpa.flags, pages);
+ } else
cpa_flush_range(*addr, numpages, cache);
} else
cpa_flush_all(cache);
@@ -888,14 +909,14 @@ static inline int change_page_attr_set(u
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
- (array ? CPA_ARRAY : 0));
+ (array ? CPA_ARRAY : 0), NULL);
}
static inline int change_page_attr_clear(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
- (array ? CPA_ARRAY : 0));
+ (array ? CPA_ARRAY : 0), NULL);
}
int _set_memory_uc(unsigned long addr, int numpages)
@@ -1043,7 +1064,7 @@ int set_memory_np(unsigned long addr, in
int set_memory_4k(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
- __pgprot(0), 1, 0);
+ __pgprot(0), 1, 0, NULL);
}
int set_pages_uc(struct page *page, int numpages)
--
^ permalink raw reply [flat|nested] 7+ messages in thread
* [patch 3/3] x86, CPA: Add set_pages_arrayuc and set_pages_array_wb
2009-03-19 21:51 [patch 0/3] x86, CPA: Introduce new APIs set_pages_array[uc|wb] venkatesh.pallipadi
2009-03-19 21:51 ` [patch 1/3] x86, CPA: Add a flag parameter to cpa set_clr venkatesh.pallipadi
2009-03-19 21:51 ` [patch 2/3] x86, PAT: Add support for struct page pointer array in cpa set_clr venkatesh.pallipadi
@ 2009-03-19 21:51 ` venkatesh.pallipadi
2009-03-20 10:24 ` [tip:x86/mm] " venkatesh.pallipadi
2 siblings, 1 reply; 7+ messages in thread
From: venkatesh.pallipadi @ 2009-03-19 21:51 UTC (permalink / raw)
To: mingo, tglx, hpa, airlied; +Cc: arjan, eric, linux-kernel, Venkatesh Pallipadi
[-- Attachment #1: set_pages_array_modified.patch --]
[-- Type: text/plain, Size: 3581 bytes --]
Add new interfaces
set_pages_array_uc()
set_pages_array_wb()
that can be used change the page attribute for a bunch of pages with flush etc
done once at the end of all the changes. These interfaces are similar to
existing set_memory_array_uc() and set_memory_array_wc().
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
---
arch/x86/include/asm/cacheflush.h | 3 +
arch/x86/mm/pageattr.c | 63 ++++++++++++++++++++++++++++++++++++++
2 files changed, 66 insertions(+)
Index: tip/arch/x86/mm/pageattr.c
===================================================================
--- tip.orig/arch/x86/mm/pageattr.c 2009-03-17 11:03:51.000000000 -0700
+++ tip/arch/x86/mm/pageattr.c 2009-03-17 11:04:08.000000000 -0700
@@ -919,6 +919,20 @@ static inline int change_page_attr_clear
(array ? CPA_ARRAY : 0), NULL);
}
+static inline int cpa_set_pages_array(struct page **pages, int numpages,
+ pgprot_t mask)
+{
+ return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
+ CPA_PAGES_ARRAY, pages);
+}
+
+static inline int cpa_clear_pages_array(struct page **pages, int numpages,
+ pgprot_t mask)
+{
+ return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
+ CPA_PAGES_ARRAY, pages);
+}
+
int _set_memory_uc(unsigned long addr, int numpages)
{
/*
@@ -1075,6 +1089,35 @@ int set_pages_uc(struct page *page, int
}
EXPORT_SYMBOL(set_pages_uc);
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ unsigned long start;
+ unsigned long end;
+ int i;
+ int free_idx;
+
+ for (i = 0; i < addrinarray; i++) {
+ start = (unsigned long)page_address(pages[i]);
+ end = start + PAGE_SIZE;
+ if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
+ goto err_out;
+ }
+
+ if (cpa_set_pages_array(pages, addrinarray,
+ __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
+ return 0; /* Success */
+ }
+err_out:
+ free_idx = i;
+ for (i = 0; i < free_idx; i++) {
+ start = (unsigned long)page_address(pages[i]);
+ end = start + PAGE_SIZE;
+ free_memtype(start, end);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(set_pages_array_uc);
+
int set_pages_wb(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
@@ -1083,6 +1126,26 @@ int set_pages_wb(struct page *page, int
}
EXPORT_SYMBOL(set_pages_wb);
+int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ int retval;
+ unsigned long start;
+ unsigned long end;
+ int i;
+
+ retval = cpa_clear_pages_array(pages, addrinarray,
+ __pgprot(_PAGE_CACHE_MASK));
+
+ for (i = 0; i < addrinarray; i++) {
+ start = (unsigned long)page_address(pages[i]);
+ end = start + PAGE_SIZE;
+ free_memtype(start, end);
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(set_pages_array_wb);
+
int set_pages_x(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
Index: tip/arch/x86/include/asm/cacheflush.h
===================================================================
--- tip.orig/arch/x86/include/asm/cacheflush.h 2009-03-13 11:51:31.000000000 -0700
+++ tip/arch/x86/include/asm/cacheflush.h 2009-03-17 11:04:08.000000000 -0700
@@ -90,6 +90,9 @@ int set_memory_4k(unsigned long addr, in
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
+int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wb(struct page **pages, int addrinarray);
+
/*
* For legacy compatibility with the old APIs, a few functions
* are provided that work on a "struct page".
--
^ permalink raw reply [flat|nested] 7+ messages in thread
* [tip:x86/mm] x86, CPA: Add a flag parameter to cpa set_clr()
2009-03-19 21:51 ` [patch 1/3] x86, CPA: Add a flag parameter to cpa set_clr venkatesh.pallipadi
@ 2009-03-20 10:24 ` venkatesh.pallipadi
0 siblings, 0 replies; 7+ messages in thread
From: venkatesh.pallipadi @ 2009-03-20 10:24 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, venkatesh.pallipadi, tglx, mingo
Commit-ID: 728c9518873de0bbb92b66daa1943b12e5b9f80f
Gitweb: http://git.kernel.org/tip/728c9518873de0bbb92b66daa1943b12e5b9f80f
Author: venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>
AuthorDate: Thu, 19 Mar 2009 14:51:13 -0700
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Fri, 20 Mar 2009 10:34:47 +0100
x86, CPA: Add a flag parameter to cpa set_clr()
Change change_page_attr_set_clr() array parameter to a flag. This helps
following patches which adds an interface to change attr to uc/wb over a
set of pages referred by struct page.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: arjan@infradead.org
Cc: eric@anholt.net
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: airlied@redhat.com
LKML-Reference: <20090319215358.611346000@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
arch/x86/mm/pageattr.c | 10 +++++-----
1 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1280565..69009af 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -787,7 +787,7 @@ static inline int cache_attr(pgprot_t attr)
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
- int force_split, int array)
+ int force_split, int in_flag)
{
struct cpa_data cpa;
int ret, cache, checkalias;
@@ -802,7 +802,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
return 0;
/* Ensure we are PAGE_SIZE aligned */
- if (!array) {
+ if (!(in_flag & CPA_ARRAY)) {
if (*addr & ~PAGE_MASK) {
*addr &= PAGE_MASK;
/*
@@ -840,7 +840,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
cpa.curpage = 0;
cpa.force_split = force_split;
- if (array)
+ if (in_flag & CPA_ARRAY)
cpa.flags |= CPA_ARRAY;
/* No alias checking for _NX bit modifications */
@@ -889,14 +889,14 @@ static inline int change_page_attr_set(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
- array);
+ (array ? CPA_ARRAY : 0));
}
static inline int change_page_attr_clear(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
- array);
+ (array ? CPA_ARRAY : 0));
}
int _set_memory_uc(unsigned long addr, int numpages)
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [tip:x86/mm] x86, PAT: Add support for struct page pointer array in cpa set_clr
2009-03-19 21:51 ` [patch 2/3] x86, PAT: Add support for struct page pointer array in cpa set_clr venkatesh.pallipadi
@ 2009-03-20 10:24 ` venkatesh.pallipadi
0 siblings, 0 replies; 7+ messages in thread
From: venkatesh.pallipadi @ 2009-03-20 10:24 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, venkatesh.pallipadi, tglx, mingo
Commit-ID: 9ae2847591c857bed44bc094b908b412bfa1b244
Gitweb: http://git.kernel.org/tip/9ae2847591c857bed44bc094b908b412bfa1b244
Author: venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>
AuthorDate: Thu, 19 Mar 2009 14:51:14 -0700
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Fri, 20 Mar 2009 10:34:48 +0100
x86, PAT: Add support for struct page pointer array in cpa set_clr
Add struct page array pointer to cpa struct and CPA_PAGES_ARRAY.
With that we can add change_page_attr_set_clr() a parameter to pass
struct page array pointer and that can be handled by the underlying
cpa code.
cpa_flush_array() is also changed to support both addr array or
struct page pointer array, depending on the flag.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: arjan@infradead.org
Cc: eric@anholt.net
Cc: airlied@redhat.com
LKML-Reference: <20090319215358.758513000@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
arch/x86/mm/pageattr.c | 79 ++++++++++++++++++++++++++++++-----------------
1 files changed, 50 insertions(+), 29 deletions(-)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 69009af..e5c257f 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -34,6 +34,7 @@ struct cpa_data {
unsigned long pfn;
unsigned force_split : 1;
int curpage;
+ struct page **pages;
};
/*
@@ -46,6 +47,7 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
+#define CPA_PAGES_ARRAY 4
#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -202,10 +204,10 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
}
}
-static void cpa_flush_array(unsigned long *start, int numpages, int cache)
+static void cpa_flush_array(unsigned long *start, int numpages, int cache,
+ int in_flags, struct page **pages)
{
unsigned int i, level;
- unsigned long *addr;
BUG_ON(irqs_disabled());
@@ -226,14 +228,22 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache)
* will cause all other CPUs to flush the same
* cachelines:
*/
- for (i = 0, addr = start; i < numpages; i++, addr++) {
- pte_t *pte = lookup_address(*addr, &level);
+ for (i = 0; i < numpages; i++) {
+ unsigned long addr;
+ pte_t *pte;
+
+ if (in_flags & CPA_PAGES_ARRAY)
+ addr = (unsigned long)page_address(pages[i]);
+ else
+ addr = start[i];
+
+ pte = lookup_address(addr, &level);
/*
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
- clflush_cache_range((void *) *addr, PAGE_SIZE);
+ clflush_cache_range((void *)addr, PAGE_SIZE);
}
}
@@ -585,7 +595,9 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
unsigned int level;
pte_t *kpte, old_pte;
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & CPA_PAGES_ARRAY)
+ address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
+ else if (cpa->flags & CPA_ARRAY)
address = cpa->vaddr[cpa->curpage];
else
address = *cpa->vaddr;
@@ -688,7 +700,9 @@ static int cpa_process_alias(struct cpa_data *cpa)
* No need to redo, when the primary call touched the direct
* mapping already:
*/
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & CPA_PAGES_ARRAY)
+ vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
+ else if (cpa->flags & CPA_ARRAY)
vaddr = cpa->vaddr[cpa->curpage];
else
vaddr = *cpa->vaddr;
@@ -699,7 +713,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
alias_cpa = *cpa;
temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
alias_cpa.vaddr = &temp_cpa_vaddr;
- alias_cpa.flags &= ~CPA_ARRAY;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
ret = __change_page_attr_set_clr(&alias_cpa, 0);
@@ -725,7 +739,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
alias_cpa = *cpa;
temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
alias_cpa.vaddr = &temp_cpa_vaddr;
- alias_cpa.flags &= ~CPA_ARRAY;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
/*
* The high mapping range is imprecise, so ignore the return value.
@@ -746,7 +760,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
*/
cpa->numpages = numpages;
/* for array changes, we can't use large page */
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
cpa->numpages = 1;
if (!debug_pagealloc)
@@ -770,7 +784,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
*/
BUG_ON(cpa->numpages > numpages);
numpages -= cpa->numpages;
- if (cpa->flags & CPA_ARRAY)
+ if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
cpa->curpage++;
else
*cpa->vaddr += cpa->numpages * PAGE_SIZE;
@@ -787,7 +801,8 @@ static inline int cache_attr(pgprot_t attr)
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
- int force_split, int in_flag)
+ int force_split, int in_flag,
+ struct page **pages)
{
struct cpa_data cpa;
int ret, cache, checkalias;
@@ -802,15 +817,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
return 0;
/* Ensure we are PAGE_SIZE aligned */
- if (!(in_flag & CPA_ARRAY)) {
- if (*addr & ~PAGE_MASK) {
- *addr &= PAGE_MASK;
- /*
- * People should not be passing in unaligned addresses:
- */
- WARN_ON_ONCE(1);
- }
- } else {
+ if (in_flag & CPA_ARRAY) {
int i;
for (i = 0; i < numpages; i++) {
if (addr[i] & ~PAGE_MASK) {
@@ -818,6 +825,18 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
WARN_ON_ONCE(1);
}
}
+ } else if (!(in_flag & CPA_PAGES_ARRAY)) {
+ /*
+ * in_flag of CPA_PAGES_ARRAY implies it is aligned.
+ * No need to cehck in that case
+ */
+ if (*addr & ~PAGE_MASK) {
+ *addr &= PAGE_MASK;
+ /*
+ * People should not be passing in unaligned addresses:
+ */
+ WARN_ON_ONCE(1);
+ }
}
/* Must avoid aliasing mappings in the highmem code */
@@ -833,6 +852,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
arch_flush_lazy_mmu_mode();
cpa.vaddr = addr;
+ cpa.pages = pages;
cpa.numpages = numpages;
cpa.mask_set = mask_set;
cpa.mask_clr = mask_clr;
@@ -840,8 +860,8 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
cpa.curpage = 0;
cpa.force_split = force_split;
- if (in_flag & CPA_ARRAY)
- cpa.flags |= CPA_ARRAY;
+ if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
+ cpa.flags |= in_flag;
/* No alias checking for _NX bit modifications */
checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
@@ -867,9 +887,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
* wbindv):
*/
if (!ret && cpu_has_clflush) {
- if (cpa.flags & CPA_ARRAY)
- cpa_flush_array(addr, numpages, cache);
- else
+ if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+ cpa_flush_array(addr, numpages, cache,
+ cpa.flags, pages);
+ } else
cpa_flush_range(*addr, numpages, cache);
} else
cpa_flush_all(cache);
@@ -889,14 +910,14 @@ static inline int change_page_attr_set(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
- (array ? CPA_ARRAY : 0));
+ (array ? CPA_ARRAY : 0), NULL);
}
static inline int change_page_attr_clear(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
- (array ? CPA_ARRAY : 0));
+ (array ? CPA_ARRAY : 0), NULL);
}
int _set_memory_uc(unsigned long addr, int numpages)
@@ -1044,7 +1065,7 @@ int set_memory_np(unsigned long addr, int numpages)
int set_memory_4k(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
- __pgprot(0), 1, 0);
+ __pgprot(0), 1, 0, NULL);
}
int set_pages_uc(struct page *page, int numpages)
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [tip:x86/mm] x86, CPA: Add set_pages_arrayuc and set_pages_array_wb
2009-03-19 21:51 ` [patch 3/3] x86, CPA: Add set_pages_arrayuc and set_pages_array_wb venkatesh.pallipadi
@ 2009-03-20 10:24 ` venkatesh.pallipadi
0 siblings, 0 replies; 7+ messages in thread
From: venkatesh.pallipadi @ 2009-03-20 10:24 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, venkatesh.pallipadi, tglx, mingo
Commit-ID: 0f3507555f6fa4acbc85a646d6e8766230db38fc
Gitweb: http://git.kernel.org/tip/0f3507555f6fa4acbc85a646d6e8766230db38fc
Author: venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>
AuthorDate: Thu, 19 Mar 2009 14:51:15 -0700
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Fri, 20 Mar 2009 10:34:49 +0100
x86, CPA: Add set_pages_arrayuc and set_pages_array_wb
Add new interfaces:
set_pages_array_uc()
set_pages_array_wb()
that can be used change the page attribute for a bunch of pages with
flush etc done once at the end of all the changes. These interfaces
are similar to existing set_memory_array_uc() and set_memory_array_wc().
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: arjan@infradead.org
Cc: eric@anholt.net
Cc: airlied@redhat.com
LKML-Reference: <20090319215358.901545000@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
arch/x86/include/asm/cacheflush.h | 3 ++
arch/x86/mm/pageattr.c | 63 +++++++++++++++++++++++++++++++++++++
2 files changed, 66 insertions(+), 0 deletions(-)
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 5b301b7..b3894bf 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -90,6 +90,9 @@ int set_memory_4k(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
+int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wb(struct page **pages, int addrinarray);
+
/*
* For legacy compatibility with the old APIs, a few functions
* are provided that work on a "struct page".
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e5c257f..d71e1b6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -920,6 +920,20 @@ static inline int change_page_attr_clear(unsigned long *addr, int numpages,
(array ? CPA_ARRAY : 0), NULL);
}
+static inline int cpa_set_pages_array(struct page **pages, int numpages,
+ pgprot_t mask)
+{
+ return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
+ CPA_PAGES_ARRAY, pages);
+}
+
+static inline int cpa_clear_pages_array(struct page **pages, int numpages,
+ pgprot_t mask)
+{
+ return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
+ CPA_PAGES_ARRAY, pages);
+}
+
int _set_memory_uc(unsigned long addr, int numpages)
{
/*
@@ -1076,6 +1090,35 @@ int set_pages_uc(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_uc);
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ unsigned long start;
+ unsigned long end;
+ int i;
+ int free_idx;
+
+ for (i = 0; i < addrinarray; i++) {
+ start = (unsigned long)page_address(pages[i]);
+ end = start + PAGE_SIZE;
+ if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
+ goto err_out;
+ }
+
+ if (cpa_set_pages_array(pages, addrinarray,
+ __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
+ return 0; /* Success */
+ }
+err_out:
+ free_idx = i;
+ for (i = 0; i < free_idx; i++) {
+ start = (unsigned long)page_address(pages[i]);
+ end = start + PAGE_SIZE;
+ free_memtype(start, end);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(set_pages_array_uc);
+
int set_pages_wb(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
@@ -1084,6 +1127,26 @@ int set_pages_wb(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_wb);
+int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ int retval;
+ unsigned long start;
+ unsigned long end;
+ int i;
+
+ retval = cpa_clear_pages_array(pages, addrinarray,
+ __pgprot(_PAGE_CACHE_MASK));
+
+ for (i = 0; i < addrinarray; i++) {
+ start = (unsigned long)page_address(pages[i]);
+ end = start + PAGE_SIZE;
+ free_memtype(start, end);
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(set_pages_array_wb);
+
int set_pages_x(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2009-03-20 10:26 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-03-19 21:51 [patch 0/3] x86, CPA: Introduce new APIs set_pages_array[uc|wb] venkatesh.pallipadi
2009-03-19 21:51 ` [patch 1/3] x86, CPA: Add a flag parameter to cpa set_clr venkatesh.pallipadi
2009-03-20 10:24 ` [tip:x86/mm] x86, CPA: Add a flag parameter to cpa set_clr() venkatesh.pallipadi
2009-03-19 21:51 ` [patch 2/3] x86, PAT: Add support for struct page pointer array in cpa set_clr venkatesh.pallipadi
2009-03-20 10:24 ` [tip:x86/mm] " venkatesh.pallipadi
2009-03-19 21:51 ` [patch 3/3] x86, CPA: Add set_pages_arrayuc and set_pages_array_wb venkatesh.pallipadi
2009-03-20 10:24 ` [tip:x86/mm] " venkatesh.pallipadi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox