From: Wei Liu <wei.liu2@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Kevin Tian <kevin.tian@intel.com>, Wei Liu <wei.liu2@citrix.com>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Jun Nakajima <jun.nakajima@intel.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Tim Deegan <tim@xen.org>,
George Dunlap <george.dunlap@eu.citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Brian Woods <brian.woods@amd.com>
Subject: [PATCH v3 01/16] x86: change name of parameter for various invlpg functions
Date: Tue, 4 Sep 2018 17:15:18 +0100 [thread overview]
Message-ID: <20180904161533.11575-2-wei.liu2@citrix.com> (raw)
In-Reply-To: <20180904161533.11575-1-wei.liu2@citrix.com>
They all incorrectly named a parameter virtual address while it should
have been linear address.
Requested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
xen/arch/x86/hvm/svm/svm.c | 14 +++++++-------
xen/arch/x86/hvm/vmx/vmx.c | 12 ++++++------
xen/arch/x86/mm.c | 10 +++++-----
xen/arch/x86/mm/hap/hap.c | 2 +-
xen/arch/x86/mm/shadow/multi.c | 14 +++++++-------
xen/arch/x86/mm/shadow/none.c | 2 +-
xen/include/asm-x86/hvm/hvm.h | 6 +++---
xen/include/asm-x86/hvm/svm/asid.h | 4 ++--
xen/include/asm-x86/hvm/svm/svm.h | 4 ++--
xen/include/asm-x86/paging.h | 3 ++-
10 files changed, 36 insertions(+), 35 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 0b06e2ff11..34d55b4938 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2488,18 +2488,18 @@ static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
}
static void svm_invlpga_intercept(
- struct vcpu *v, unsigned long vaddr, uint32_t asid)
+ struct vcpu *v, unsigned long linear, uint32_t asid)
{
- svm_invlpga(vaddr,
+ svm_invlpga(linear,
(asid == 0)
? v->arch.hvm.n1asid.asid
: vcpu_nestedhvm(v).nv_n2asid.asid);
}
-static void svm_invlpg_intercept(unsigned long vaddr)
+static void svm_invlpg_intercept(unsigned long linear)
{
- HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
- paging_invlpg(current, vaddr);
+ HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear));
+ paging_invlpg(current, linear);
}
static bool is_invlpg(const struct x86_emulate_state *state,
@@ -2512,9 +2512,9 @@ static bool is_invlpg(const struct x86_emulate_state *state,
(ext & 7) == 7;
}
-static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
+static void svm_invlpg(struct vcpu *v, unsigned long linear)
{
- svm_asid_g_invlpg(v, vaddr);
+ svm_asid_g_invlpg(v, linear);
}
static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e926b0b28e..b2e1a28038 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -75,7 +75,7 @@ static void vmx_wbinvd_intercept(void);
static void vmx_fpu_dirty_intercept(void);
static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
-static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
+static void vmx_invlpg(struct vcpu *v, unsigned long linear);
/* Values for domain's ->arch.hvm_domain.pi_ops.flags. */
#define PI_CSW_FROM (1u << 0)
@@ -2595,16 +2595,16 @@ static void vmx_dr_access(unsigned long exit_qualification,
vmx_update_cpu_exec_control(v);
}
-static void vmx_invlpg_intercept(unsigned long vaddr)
+static void vmx_invlpg_intercept(unsigned long linear)
{
- HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
- paging_invlpg(current, vaddr);
+ HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear));
+ paging_invlpg(current, linear);
}
-static void vmx_invlpg(struct vcpu *v, unsigned long vaddr)
+static void vmx_invlpg(struct vcpu *v, unsigned long linear)
{
if ( cpu_has_vmx_vpid )
- vpid_sync_vcpu_gva(v, vaddr);
+ vpid_sync_vcpu_gva(v, linear);
}
static int vmx_vmfunc_intercept(struct cpu_user_regs *regs)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 84979f28d5..409814ce0a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5793,19 +5793,19 @@ const unsigned long *__init get_platform_badpages(unsigned int *array_size)
return bad_pages;
}
-void paging_invlpg(struct vcpu *v, unsigned long va)
+void paging_invlpg(struct vcpu *v, unsigned long linear)
{
- if ( !is_canonical_address(va) )
+ if ( !is_canonical_address(linear) )
return;
if ( paging_mode_enabled(v->domain) &&
- !paging_get_hostmode(v)->invlpg(v, va) )
+ !paging_get_hostmode(v)->invlpg(v, linear) )
return;
if ( is_pv_vcpu(v) )
- flush_tlb_one_local(va);
+ flush_tlb_one_local(linear);
else
- hvm_invlpg(v, va);
+ hvm_invlpg(v, linear);
}
/* Build a 32bit PSE page table using 4MB pages. */
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index c53d76cf69..3d651b94c3 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -650,7 +650,7 @@ static int hap_page_fault(struct vcpu *v, unsigned long va,
* should not be intercepting it. However, we need to correctly handle
* getting here from instruction emulation.
*/
-static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
+static bool_t hap_invlpg(struct vcpu *v, unsigned long linear)
{
/*
* Emulate INVLPGA:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7bb6f47155..bba573ae87 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3549,7 +3549,7 @@ propagate:
* instruction should be issued on the hardware, or false if it's safe not
* to do so.
*/
-static bool sh_invlpg(struct vcpu *v, unsigned long va)
+static bool sh_invlpg(struct vcpu *v, unsigned long linear)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
@@ -3572,14 +3572,14 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
{
shadow_l3e_t sl3e;
if ( !(shadow_l4e_get_flags(
- sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
+ sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)])
& _PAGE_PRESENT) )
return false;
/* This must still be a copy-from-user because we don't have the
* paging lock, and the higher-level shadows might disappear
* under our feet. */
if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
- + shadow_l3_linear_offset(va)),
+ + shadow_l3_linear_offset(linear)),
sizeof (sl3e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
@@ -3589,7 +3589,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
return false;
}
#else /* SHADOW_PAGING_LEVELS == 3 */
- if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
+ if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(linear)])
& _PAGE_PRESENT) )
// no need to flush anything if there's no SL2...
return false;
@@ -3598,7 +3598,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
/* This must still be a copy-from-user because we don't have the shadow
* lock, and the higher-level shadows might disappear under our feet. */
if ( __copy_from_user(&sl2e,
- sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
+ sh_linear_l2_table(v) + shadow_l2_linear_offset(linear),
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
@@ -3642,7 +3642,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
* feet. */
if ( __copy_from_user(&sl2e,
sh_linear_l2_table(v)
- + shadow_l2_linear_offset(va),
+ + shadow_l2_linear_offset(linear),
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
@@ -3664,7 +3664,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long va)
&& page_is_out_of_sync(pg) ) )
{
shadow_l1e_t *sl1;
- sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(va);
+ sl1 = sh_linear_l1_table(v) + shadow_l1_linear_offset(linear);
/* Remove the shadow entry that maps this VA */
(void) shadow_set_l1e(d, sl1, shadow_l1e_empty(),
p2m_invalid, sl1mfn);
diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
index a8c9604cdf..4de645a433 100644
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -37,7 +37,7 @@ static int _page_fault(struct vcpu *v, unsigned long va,
return 0;
}
-static bool _invlpg(struct vcpu *v, unsigned long va)
+static bool _invlpg(struct vcpu *v, unsigned long linear)
{
ASSERT_UNREACHABLE();
return true;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 132e62b4f6..6b0e088750 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -160,7 +160,7 @@ struct hvm_function_table {
int (*event_pending)(struct vcpu *v);
bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
- void (*invlpg)(struct vcpu *v, unsigned long vaddr);
+ void (*invlpg)(struct vcpu *v, unsigned long linear);
int (*cpu_up_prepare)(unsigned int cpu);
void (*cpu_dead)(unsigned int cpu);
@@ -454,9 +454,9 @@ static inline int hvm_event_pending(struct vcpu *v)
return hvm_funcs.event_pending(v);
}
-static inline void hvm_invlpg(struct vcpu *v, unsigned long va)
+static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
{
- hvm_funcs.invlpg(v, va);
+ hvm_funcs.invlpg(v, linear);
}
/* These bits in CR4 are owned by the host. */
diff --git a/xen/include/asm-x86/hvm/svm/asid.h b/xen/include/asm-x86/hvm/svm/asid.h
index 60cbb7b881..0e5ec3ab78 100644
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -25,11 +25,11 @@
void svm_asid_init(const struct cpuinfo_x86 *c);
void svm_asid_handle_vmrun(void);
-static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
+static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_linear)
{
#if 0
/* Optimization? */
- svm_invlpga(g_vaddr, v->arch.hvm.svm.vmcb->guest_asid);
+ svm_invlpga(g_linear, v->arch.hvm.svm.vmcb->guest_asid);
#endif
/* Safe fallback. Take a new ASID. */
diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h
index 4e5e142910..8166046a6d 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -40,13 +40,13 @@ static inline void svm_vmsave_pa(paddr_t vmcb)
: : "a" (vmcb) : "memory" );
}
-static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
+static inline void svm_invlpga(unsigned long linear, uint32_t asid)
{
asm volatile (
".byte 0x0f,0x01,0xdf"
: /* output */
: /* input */
- "a" (vaddr), "c" (asid));
+ "a" (linear), "c" (asid));
}
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index f440e3e53c..b51e1709d3 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -110,7 +110,8 @@ struct shadow_paging_mode {
struct paging_mode {
int (*page_fault )(struct vcpu *v, unsigned long va,
struct cpu_user_regs *regs);
- bool (*invlpg )(struct vcpu *v, unsigned long va);
+ bool (*invlpg )(struct vcpu *v,
+ unsigned long linear);
unsigned long (*gva_to_gfn )(struct vcpu *v,
struct p2m_domain *p2m,
unsigned long va,
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-09-04 16:15 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-04 16:15 [PATCH v3 00/16] Make CONFIG_HVM work Wei Liu
2018-09-04 16:15 ` Wei Liu [this message]
2018-09-06 11:12 ` [PATCH v3 01/16] x86: change name of parameter for various invlpg functions George Dunlap
2018-09-13 16:11 ` George Dunlap
2018-09-04 16:15 ` [PATCH v3 02/16] x86: introduce and use a set of internal emulation flags Wei Liu
2018-09-06 13:27 ` Jan Beulich
2018-09-06 13:47 ` Wei Liu
2018-09-04 16:15 ` [PATCH v3 03/16] x86: XENMEM_resource_ioreq_server is HVM only Wei Liu
2018-09-04 16:24 ` Paul Durrant
2018-09-04 16:42 ` Wei Liu
2018-09-06 13:29 ` Jan Beulich
2018-09-04 16:15 ` [PATCH v3 04/16] x86: monitor.o is currently " Wei Liu
2018-09-04 16:35 ` Razvan Cojocaru
2018-09-04 16:15 ` [PATCH v3 05/16] x86: PIT emulation is common to both PV and HVM Wei Liu
2018-09-06 14:26 ` Jan Beulich
2018-09-04 16:15 ` [PATCH v3 06/16] libxl: don't set PoD target for PV guests Wei Liu
2018-09-07 13:44 ` Ian Jackson
2018-09-04 16:15 ` [PATCH v3 07/16] x86/p2m/pod: make it build with !CONFIG_HVM Wei Liu
2018-09-04 17:08 ` Razvan Cojocaru
2018-09-04 17:10 ` Razvan Cojocaru
2018-09-04 17:24 ` Julien Grall
2018-09-06 10:57 ` Wei Liu
2018-09-06 15:30 ` George Dunlap
2018-09-06 15:05 ` Jan Beulich
2018-09-06 16:06 ` George Dunlap
2018-09-04 16:15 ` [PATCH v3 08/16] x86/hvm: rearrange content of hvm.h Wei Liu
2018-09-07 6:52 ` Jan Beulich
2018-09-04 16:15 ` [PATCH v3 09/16] x86: provide stubs, declarations and macros in hvm.h Wei Liu
2018-09-07 7:02 ` Jan Beulich
2018-09-13 15:31 ` Wei Liu
2018-09-04 16:15 ` [PATCH v3 10/16] x86/mm: put nested p2m code under CONFIG_HVM Wei Liu
2018-09-06 16:20 ` George Dunlap
2018-09-13 15:46 ` Wei Liu
2018-09-13 16:01 ` George Dunlap
2018-09-07 7:06 ` Jan Beulich
2018-09-13 15:07 ` Wei Liu
2018-09-14 8:01 ` Jan Beulich
2018-09-04 16:15 ` [PATCH v3 11/16] x86/mm: put HVM only " Wei Liu
2018-09-04 17:10 ` Razvan Cojocaru
2018-09-07 7:12 ` Jan Beulich
2018-09-07 21:27 ` Tamas K Lengyel
2018-09-13 15:43 ` Wei Liu
2018-09-13 16:31 ` Tamas K Lengyel
2018-09-04 16:15 ` [PATCH v3 12/16] x86/mm: put paging_update_nestedmode " Wei Liu
2018-09-13 16:39 ` George Dunlap
2018-09-04 16:15 ` [PATCH v3 13/16] xen: connect guest creation with CONFIG_{HVM, PV} Wei Liu
2018-09-04 16:15 ` [PATCH v3 14/16] x86: expose CONFIG_HVM Wei Liu
2018-09-07 7:15 ` Jan Beulich
2018-09-13 16:01 ` Wei Liu
2018-09-14 8:07 ` Jan Beulich
2018-09-14 10:36 ` Wei Liu
2018-09-04 16:15 ` [PATCH v3 15/16] x86/pvshim: disable HVM for PV shim Wei Liu
2018-09-07 7:18 ` Jan Beulich
2018-09-07 7:46 ` Wei Liu
2018-09-07 7:48 ` Wei Liu
2018-09-04 16:15 ` [PATCH v3 16/16] xen: decouple HVM and IOMMU capabilities Wei Liu
2018-09-13 15:52 ` Ian Jackson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180904161533.11575-2-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=boris.ostrovsky@oracle.com \
--cc=brian.woods@amd.com \
--cc=george.dunlap@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=jun.nakajima@intel.com \
--cc=kevin.tian@intel.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).