From: Wei Liu <wei.liu2@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Wei Liu <wei.liu2@citrix.com>, Jan Beulich <JBeulich@suse.com>
Subject: [PATCH v5 12/23] x86/mm: move and rename map_ldt_shadow_page
Date: Thu, 14 Sep 2017 13:58:41 +0100 [thread overview]
Message-ID: <20170914125852.22129-13-wei.liu2@citrix.com> (raw)
In-Reply-To: <20170914125852.22129-1-wei.liu2@citrix.com>
Add pv prefix to it. Move it to pv/mm.c. Fix call sites.
Take the chance to change v to curr and d to currd.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
xen/arch/x86/mm.c | 73 -------------------------------------------
xen/arch/x86/pv/mm.c | 75 +++++++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/traps.c | 4 +--
xen/include/asm-x86/mm.h | 2 --
xen/include/asm-x86/pv/mm.h | 4 +++
5 files changed, 81 insertions(+), 77 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index bfdba34468..8e25d15631 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -526,27 +526,6 @@ void update_cr3(struct vcpu *v)
make_cr3(v, cr3_mfn);
}
-/*
- * Read the guest's l1e that maps this address, from the kernel-mode
- * page tables.
- */
-static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear)
-{
- struct vcpu *curr = current;
- const bool user_mode = !(curr->arch.flags & TF_kernel_mode);
- l1_pgentry_t l1e;
-
- if ( user_mode )
- toggle_guest_mode(curr);
-
- l1e = guest_get_eff_l1e(linear);
-
- if ( user_mode )
- toggle_guest_mode(curr);
-
- return l1e;
-}
-
static inline void page_set_tlbflush_timestamp(struct page_info *page)
{
/*
@@ -615,58 +594,6 @@ static int alloc_segdesc_page(struct page_info *page)
return i == 512 ? 0 : -EINVAL;
}
-
-/*
- * Map a guest's LDT page (covering the byte at @offset from start of the LDT)
- * into Xen's virtual range. Returns true if the mapping changed, false
- * otherwise.
- */
-bool map_ldt_shadow_page(unsigned int offset)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- struct page_info *page;
- l1_pgentry_t gl1e, *pl1e;
- unsigned long linear = v->arch.pv_vcpu.ldt_base + offset;
-
- BUG_ON(unlikely(in_irq()));
-
- /*
- * Hardware limit checking should guarantee this property. NB. This is
- * safe as updates to the LDT can only be made by MMUEXT_SET_LDT to the
- * current vcpu, and vcpu_reset() will block until this vcpu has been
- * descheduled before continuing.
- */
- ASSERT((offset >> 3) <= v->arch.pv_vcpu.ldt_ents);
-
- if ( is_pv_32bit_domain(d) )
- linear = (uint32_t)linear;
-
- gl1e = guest_get_eff_kern_l1e(linear);
- if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
- return false;
-
- page = get_page_from_gfn(d, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
- if ( unlikely(!page) )
- return false;
-
- if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
- {
- put_page(page);
- return false;
- }
-
- pl1e = &pv_ldt_ptes(v)[offset >> PAGE_SHIFT];
- l1e_add_flags(gl1e, _PAGE_RW);
-
- spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
- l1e_write(pl1e, gl1e);
- v->arch.pv_vcpu.shadow_ldt_mapcnt++;
- spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
-
- return true;
-}
-
static int get_page_and_type_from_mfn(
mfn_t mfn, unsigned long type, struct domain *d,
int partial, int preemptible)
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index 4bfa322788..6890e80efd 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -22,6 +22,9 @@
#include <xen/guest_access.h>
#include <asm/current.h>
+#include <asm/p2m.h>
+
+#include "mm.h"
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
@@ -58,6 +61,78 @@ l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn)
return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(linear);
}
+/*
+ * Read the guest's l1e that maps this address, from the kernel-mode
+ * page tables.
+ */
+static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear)
+{
+ struct vcpu *curr = current;
+ const bool user_mode = !(curr->arch.flags & TF_kernel_mode);
+ l1_pgentry_t l1e;
+
+ if ( user_mode )
+ toggle_guest_mode(curr);
+
+ l1e = guest_get_eff_l1e(linear);
+
+ if ( user_mode )
+ toggle_guest_mode(curr);
+
+ return l1e;
+}
+
+/*
+ * Map a guest's LDT page (covering the byte at @offset from start of the LDT)
+ * into Xen's virtual range. Returns true if the mapping changed, false
+ * otherwise.
+ */
+bool pv_map_ldt_shadow_page(unsigned int offset)
+{
+ struct vcpu *curr = current;
+ struct domain *currd = curr->domain;
+ struct page_info *page;
+ l1_pgentry_t gl1e, *pl1e;
+ unsigned long linear = curr->arch.pv_vcpu.ldt_base + offset;
+
+ BUG_ON(unlikely(in_irq()));
+
+ /*
+ * Hardware limit checking should guarantee this property. NB. This is
+ * safe as updates to the LDT can only be made by MMUEXT_SET_LDT to the
+ * current vcpu, and vcpu_reset() will block until this vcpu has been
+ * descheduled before continuing.
+ */
+ ASSERT((offset >> 3) <= curr->arch.pv_vcpu.ldt_ents);
+
+ if ( is_pv_32bit_domain(currd) )
+ linear = (uint32_t)linear;
+
+ gl1e = guest_get_eff_kern_l1e(linear);
+ if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
+ return false;
+
+ page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
+ if ( unlikely(!page) )
+ return false;
+
+ if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
+ {
+ put_page(page);
+ return false;
+ }
+
+ pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
+ l1e_add_flags(gl1e, _PAGE_RW);
+
+ spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+ l1e_write(pl1e, gl1e);
+ curr->arch.pv_vcpu.shadow_ldt_mapcnt++;
+ spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+
+ return true;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d84db4acda..d8feef2942 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1101,7 +1101,7 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset,
/*
* If the fault is in another vcpu's area, it cannot be due to
* a GDT/LDT descriptor load. Thus we can reasonably exit immediately, and
- * indeed we have to since map_ldt_shadow_page() works correctly only on
+ * indeed we have to since pv_map_ldt_shadow_page() works correctly only on
* accesses to a vcpu's own area.
*/
if ( vcpu_area != curr->vcpu_id )
@@ -1113,7 +1113,7 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset,
if ( likely(is_ldt_area) )
{
/* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
- if ( likely(map_ldt_shadow_page(offset)) )
+ if ( likely(pv_map_ldt_shadow_page(offset)) )
{
if ( guest_mode(regs) )
trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index a48d75d434..8a56bed454 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -562,8 +562,6 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
-bool map_ldt_shadow_page(unsigned int);
-
#define NIL(type) ((type *)-sizeof(type))
#define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 3ca24cc70a..47223e38eb 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -28,6 +28,8 @@ int pv_ro_page_fault(unsigned long addr, struct cpu_user_regs *regs);
long pv_set_gdt(struct vcpu *d, unsigned long *frames, unsigned int entries);
void pv_destroy_gdt(struct vcpu *d);
+bool pv_map_ldt_shadow_page(unsigned int off);
+
#else
#include <xen/errno.h>
@@ -43,6 +45,8 @@ static inline long pv_set_gdt(struct vcpu *d, unsigned long *frames,
{ return -EINVAL; }
static inline void pv_destroy_gdt(struct vcpu *d) {}
+static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; }
+
#endif
#endif /* __X86_PV_MM_H__ */
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-09-14 13:29 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-14 12:58 [PATCH v5 00/23] x86: refactor mm.c Wei Liu
2017-09-14 12:58 ` [PATCH v5 01/23] x86/mm: move guest_get_eff_l1e to pv/mm.h Wei Liu
[not found] ` <1505408055.662832341@apps.rackspace.com>
2017-09-14 16:58 ` Wei Liu
2017-09-22 11:36 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 02/23] x86/mm: export get_page_from_mfn Wei Liu
2017-09-22 11:44 ` Jan Beulich
2017-09-22 11:51 ` Wei Liu
2017-09-22 12:11 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 03/23] x86/mm: move update_intpte to pv/mm.h Wei Liu
2017-09-22 12:32 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 04/23] x86/mm: move {un, }adjust_guest_l*e " Wei Liu
2017-09-22 12:33 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 05/23] x86/mm: move ro page fault emulation code Wei Liu
2017-09-22 12:37 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 06/23] x86/mm: remove the now unused inclusion of pv/emulate.h Wei Liu
2017-09-22 12:37 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 07/23] x86/mm: move map_guest_l1e to pv/mm.c Wei Liu
2017-09-22 12:58 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 08/23] x86/mm: split out pv grant table code Wei Liu
2017-09-22 12:59 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 09/23] x86/mm: add pv prefix to {set, destroy}_gdt Wei Liu
2017-09-22 13:02 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 10/23] x86/mm: split out descriptor table manipulation code Wei Liu
2017-09-22 13:07 ` Jan Beulich
2017-09-22 13:12 ` Wei Liu
2017-09-14 12:58 ` [PATCH v5 11/23] x86/mm: move compat " Wei Liu
2017-09-22 13:09 ` Jan Beulich
2017-09-14 12:58 ` Wei Liu [this message]
2017-09-22 13:18 ` [PATCH v5 12/23] x86/mm: move and rename map_ldt_shadow_page Jan Beulich
2017-09-14 12:58 ` [PATCH v5 13/23] x86/mm: factor out pv_arch_init_memory Wei Liu
2017-09-22 13:20 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 14/23] x86/mm: move PV l4 table setup code Wei Liu
2017-09-22 13:23 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 15/23] x86/mm: move declaration of new_guest_cr3 to local pv/mm.h Wei Liu
2017-09-22 13:23 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 16/23] x86/mm: add pv prefix to {alloc, free}_page_type Wei Liu
2017-09-22 13:40 ` Jan Beulich
2017-09-22 14:07 ` Wei Liu
2017-09-22 14:24 ` Jan Beulich
2017-09-22 14:34 ` Wei Liu
2017-09-14 12:58 ` [PATCH v5 17/23] x86/mm: export base_disallow_mask and l1 mask in asm-x86/mm.h Wei Liu
2017-09-22 13:52 ` Jan Beulich
2017-09-22 15:52 ` Wei Liu
2017-09-23 16:52 ` Tim Deegan
2017-09-14 12:58 ` [PATCH v5 18/23] x86/mm: export some stuff via local mm.h Wei Liu
2017-09-22 15:44 ` Jan Beulich
2017-09-22 15:56 ` Wei Liu
2017-09-22 16:00 ` Jan Beulich
2017-09-22 16:07 ` Wei Liu
2017-09-22 16:09 ` Andrew Cooper
2017-09-14 12:58 ` [PATCH v5 19/23] x86/mm: export get_page_light via asm-x86/mm.h Wei Liu
2017-09-22 15:49 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 20/23] x86/mm: split out PV mm code to pv/mm.c Wei Liu
2017-09-22 15:53 ` Jan Beulich
2017-09-14 12:58 ` [PATCH v5 21/23] x86/mm: move and add pv prefix to invalidate_shadow_ldt Wei Liu
2017-09-14 12:58 ` [PATCH v5 22/23] x86/mm: split out PV mm hypercalls to pv/mm-hypercalls.c Wei Liu
2017-09-14 12:58 ` [PATCH v5 23/23] x86/mm: remove the now unused inclusion of pv/mm.h Wei Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170914125852.22129-13-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).