From: Paul Durrant <Paul.Durrant@citrix.com>
To: Jan Beulich <JBeulich@suse.com>,
xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>
Subject: Re: [PATCH] x86/HVM: rename mmio_gva field to mmio_gla
Date: Tue, 14 Jun 2016 14:50:22 +0000 [thread overview]
Message-ID: <4325e3ec5c4c4498b9dbd27a5703f1f6@AMSPEX02CL03.citrite.net> (raw)
In-Reply-To: <5760348B02000078000F4DC9@prv-mh.provo.novell.com>
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 14 June 2016 15:45
> To: xen-devel
> Cc: Andrew Cooper; Paul Durrant
> Subject: [PATCH] x86/HVM: rename mmio_gva field to mmio_gla
>
> ... to correctly reflect its purpose. To make things consistent also
> rename handle_mmio_with_translation()'s respective parameter (but don't
> touch sh_page_fault(), as renaming its parameter would require quite a
> few more changes there).
>
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -684,7 +684,7 @@ static void latch_linear_to_phys(struct
> if ( vio->mmio_access.gla_valid )
> return;
>
> - vio->mmio_gva = gla & PAGE_MASK;
> + vio->mmio_gla = gla & PAGE_MASK;
> vio->mmio_gpfn = PFN_DOWN(gpa);
> vio->mmio_access = (struct npfec){ .gla_valid = 1,
> .read_access = 1,
> @@ -782,7 +782,7 @@ static int __hvmemul_read(
> if ( ((access_type != hvm_access_insn_fetch
> ? vio->mmio_access.read_access
> : vio->mmio_access.insn_fetch)) &&
> - (vio->mmio_gva == (addr & PAGE_MASK)) )
> + (vio->mmio_gla == (addr & PAGE_MASK)) )
> return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
>
> if ( (seg != x86_seg_none) &&
> @@ -889,7 +889,7 @@ static int hvmemul_write(
> return rc;
>
> if ( vio->mmio_access.write_access &&
> - (vio->mmio_gva == (addr & PAGE_MASK)) )
> + (vio->mmio_gla == (addr & PAGE_MASK)) )
> return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
>
> if ( (seg != x86_seg_none) &&
> @@ -1181,7 +1181,7 @@ static int hvmemul_rep_movs(
>
> bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
> if ( vio->mmio_access.read_access &&
> - (vio->mmio_gva == (saddr & PAGE_MASK)) &&
> + (vio->mmio_gla == (saddr & PAGE_MASK)) &&
> bytes >= bytes_per_rep )
> {
> sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
> @@ -1200,7 +1200,7 @@ static int hvmemul_rep_movs(
>
> bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
> if ( vio->mmio_access.write_access &&
> - (vio->mmio_gva == (daddr & PAGE_MASK)) &&
> + (vio->mmio_gla == (daddr & PAGE_MASK)) &&
> bytes >= bytes_per_rep )
> {
> dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
> @@ -1320,7 +1320,7 @@ static int hvmemul_rep_stos(
>
> bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
> if ( vio->mmio_access.write_access &&
> - (vio->mmio_gva == (addr & PAGE_MASK)) &&
> + (vio->mmio_gla == (addr & PAGE_MASK)) &&
> bytes >= bytes_per_rep )
> {
> gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -114,7 +114,7 @@ int handle_mmio(void)
> return 1;
> }
>
> -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
> +int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
> struct npfec access)
> {
> struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
> @@ -122,7 +122,7 @@ int handle_mmio_with_translation(unsigne
> vio->mmio_access = access.gla_valid &&
> access.kind == npfec_kind_with_gla
> ? access : (struct npfec){};
> - vio->mmio_gva = gva & PAGE_MASK;
> + vio->mmio_gla = gla & PAGE_MASK;
> vio->mmio_gpfn = gpfn;
> return handle_mmio();
> }
> --- a/xen/include/asm-x86/hvm/io.h
> +++ b/xen/include/asm-x86/hvm/io.h
> @@ -119,7 +119,7 @@ void relocate_portio_handler(
> void send_timeoffset_req(unsigned long timeoff);
> void send_invalidate_req(void);
> int handle_mmio(void);
> -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
> +int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
> struct npfec);
> int handle_pio(uint16_t port, unsigned int size, int dir);
> void hvm_interrupt_post(struct vcpu *v, int vector, int type);
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -60,12 +60,12 @@ struct hvm_vcpu_io {
>
> /*
> * HVM emulation:
> - * Virtual address @mmio_gva maps to MMIO physical frame
> @mmio_gpfn.
> + * Linear address @mmio_gla maps to MMIO physical frame
> @mmio_gpfn.
> * The latter is known to be an MMIO frame (not RAM).
> * This translation is only valid for accesses as per @mmio_access.
> */
> struct npfec mmio_access;
> - unsigned long mmio_gva;
> + unsigned long mmio_gla;
> unsigned long mmio_gpfn;
>
> /*
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
prev parent reply other threads:[~2016-06-14 15:06 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-14 14:44 [PATCH] x86/HVM: rename mmio_gva field to mmio_gla Jan Beulich
2016-06-14 14:48 ` Andrew Cooper
2016-06-14 14:50 ` Paul Durrant [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4325e3ec5c4c4498b9dbd27a5703f1f6@AMSPEX02CL03.citrite.net \
--to=paul.durrant@citrix.com \
--cc=Andrew.Cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).