From: "Jan Beulich" <JBeulich@novell.com>
To: xen-devel@lists.xensource.com
Subject: [PATCH] x86: add support for domain initiated global cache flush
Date: Thu, 29 Apr 2010 16:59:49 +0100 [thread overview]
Message-ID: <4BD9C9150200007800000B14@vpn.id2.novell.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 2857 bytes --]
Newer Linux' AGP code wants to flush caches on all CPUs under certain
circumstances. Since doing this on all vCPU-s of the domain in
question doesn't yield the intended effect, this needs to be done in
the hypervisor. Add a new MMUEXT operation for this.
While looking at the pre-existing flush implementation, I also noticed
an off-by-one error in the cache flush portion of flush_area_local().
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- 2010-04-22.orig/xen/arch/x86/flushtlb.c 2008-10-14 19:44:12.000000000 +0200
+++ 2010-04-22/xen/arch/x86/flushtlb.c 2010-04-29 17:47:22.000000000 +0200
@@ -149,7 +149,7 @@ void flush_area_local(const void *va, un
{
unsigned long i, sz = 0;
- if ( order < (BITS_PER_LONG - PAGE_SHIFT - 1) )
+ if ( order < (BITS_PER_LONG - PAGE_SHIFT) )
sz = 1UL << (order + PAGE_SHIFT);
if ( c->x86_clflush_size && c->x86_cache_size && sz &&
--- 2010-04-22.orig/xen/arch/x86/mm.c 2010-04-29 17:35:20.000000000 +0200
+++ 2010-04-22/xen/arch/x86/mm.c 2010-04-29 17:51:08.000000000 +0200
@@ -2889,6 +2889,27 @@ int do_mmuext_op(
}
break;
+ case MMUEXT_FLUSH_CACHE_GLOBAL:
+ if ( unlikely(foreigndom != DOMID_SELF) )
+ okay = 0;
+ else if ( likely(cache_flush_permitted(d)) )
+ {
+ unsigned int cpu;
+ cpumask_t mask = CPU_MASK_NONE;
+
+ for_each_online_cpu(cpu)
+ if ( !cpus_intersects(mask,
+ per_cpu(cpu_sibling_map, cpu)) )
+ cpu_set(cpu, mask);
+ flush_mask(&mask, FLUSH_CACHE);
+ }
+ else
+ {
+ MEM_LOG("Non-physdev domain tried to FLUSH_CACHE_GLOBAL");
+ okay = 0;
+ }
+ break;
+
case MMUEXT_SET_LDT:
{
unsigned long ptr = op.arg1.linear_addr;
--- 2010-04-22.orig/xen/include/public/xen.h 2010-04-29 17:35:20.000000000 +0200
+++ 2010-04-22/xen/include/public/xen.h 2010-04-29 17:09:30.000000000 +0200
@@ -239,6 +239,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
*
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
*
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
@@ -268,6 +272,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define MMUEXT_NEW_USER_BASEPTR 15
#define MMUEXT_CLEAR_PAGE 16
#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
#ifndef __ASSEMBLY__
struct mmuext_op {
[-- Attachment #2: x86-cache-flush-global.patch --]
[-- Type: text/plain, Size: 2851 bytes --]
Newer Linux' AGP code wants to flush caches on all CPUs under certain
circumstances. Since doing this on all vCPU-s of the domain in
question doesn't yield the intended effect, this needs to be done in
the hypervisor. Add a new MMUEXT operation for this.
While looking at the pre-existing flush implementation, I also noticed
an off-by-one error in the cache flush portion of flush_area_local().
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- 2010-04-22.orig/xen/arch/x86/flushtlb.c 2008-10-14 19:44:12.000000000 +0200
+++ 2010-04-22/xen/arch/x86/flushtlb.c 2010-04-29 17:47:22.000000000 +0200
@@ -149,7 +149,7 @@ void flush_area_local(const void *va, un
{
unsigned long i, sz = 0;
- if ( order < (BITS_PER_LONG - PAGE_SHIFT - 1) )
+ if ( order < (BITS_PER_LONG - PAGE_SHIFT) )
sz = 1UL << (order + PAGE_SHIFT);
if ( c->x86_clflush_size && c->x86_cache_size && sz &&
--- 2010-04-22.orig/xen/arch/x86/mm.c 2010-04-29 17:35:20.000000000 +0200
+++ 2010-04-22/xen/arch/x86/mm.c 2010-04-29 17:51:08.000000000 +0200
@@ -2889,6 +2889,27 @@ int do_mmuext_op(
}
break;
+ case MMUEXT_FLUSH_CACHE_GLOBAL:
+ if ( unlikely(foreigndom != DOMID_SELF) )
+ okay = 0;
+ else if ( likely(cache_flush_permitted(d)) )
+ {
+ unsigned int cpu;
+ cpumask_t mask = CPU_MASK_NONE;
+
+ for_each_online_cpu(cpu)
+ if ( !cpus_intersects(mask,
+ per_cpu(cpu_sibling_map, cpu)) )
+ cpu_set(cpu, mask);
+ flush_mask(&mask, FLUSH_CACHE);
+ }
+ else
+ {
+ MEM_LOG("Non-physdev domain tried to FLUSH_CACHE_GLOBAL");
+ okay = 0;
+ }
+ break;
+
case MMUEXT_SET_LDT:
{
unsigned long ptr = op.arg1.linear_addr;
--- 2010-04-22.orig/xen/include/public/xen.h 2010-04-29 17:35:20.000000000 +0200
+++ 2010-04-22/xen/include/public/xen.h 2010-04-29 17:09:30.000000000 +0200
@@ -239,6 +239,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
*
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
*
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
@@ -268,6 +272,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define MMUEXT_NEW_USER_BASEPTR 15
#define MMUEXT_CLEAR_PAGE 16
#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
#ifndef __ASSEMBLY__
struct mmuext_op {
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next reply other threads:[~2010-04-29 15:59 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-04-29 15:59 Jan Beulich [this message]
2010-05-03 17:37 ` [PATCH] x86: add support for domain initiated global cache flush Konrad Rzeszutek Wilk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4BD9C9150200007800000B14@vpn.id2.novell.com \
--to=jbeulich@novell.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).