From: Paul Durrant <Paul.Durrant@citrix.com>
To: 'Jan Beulich' <JBeulich@suse.com>,
xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>,
Wei Liu <wei.liu2@citrix.com>,
George Dunlap <George.Dunlap@citrix.com>
Subject: Re: [PATCH v2 2/4] x86/mm: use optional cache in guest_walk_tables()
Date: Tue, 11 Sep 2018 16:17:05 +0000 [thread overview]
Message-ID: <fd20bd8ab3a54decbb014b36cbfb5a5d@AMSPEX02CL03.citrite.net> (raw)
In-Reply-To: <5B97BFC902000078001E740D@prv1-mh.provo.novell.com>
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 11 September 2018 14:15
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>; Wei Liu <wei.liu2@citrix.com>; George Dunlap
> <George.Dunlap@citrix.com>
> Subject: [PATCH v2 2/4] x86/mm: use optional cache in guest_walk_tables()
>
> The caching isn't actually implemented here, this is just setting the
> stage.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Don't wrongly use top_gfn for non-root gpa calculation. Re-write
> cache entries after setting A/D bits (an alternative would be to
> suppress their setting upon cache hits).
>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -2664,6 +2664,18 @@ void hvm_dump_emulation_state(const char
> hvmemul_ctxt->insn_buf);
> }
>
> +bool hvmemul_read_cache(const struct hvmemul_cache *cache, paddr_t
> gpa,
> + unsigned int level, void *buffer, unsigned int size)
> +{
> + return false;
> +}
> +
> +void hvmemul_write_cache(struct hvmemul_cache *cache, paddr_t gpa,
> + unsigned int level, const void *buffer,
> + unsigned int size)
> +{
> +}
> +
> /*
> * Local variables:
> * mode: C
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -92,8 +92,13 @@ guest_walk_tables(struct vcpu *v, struct
> #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
> guest_l3e_t *l3p = NULL;
Shouldn't the above line be...
> guest_l4e_t *l4p;
> + paddr_t l4gpa;
> +#endif
> +#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
...here?
> + paddr_t l3gpa;
> #endif
> uint32_t gflags, rc;
> + paddr_t l1gpa = 0, l2gpa = 0;
> unsigned int leaf_level;
> p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
>
> @@ -134,7 +139,15 @@ guest_walk_tables(struct vcpu *v, struct
> /* Get the l4e from the top level table and check its flags*/
> gw->l4mfn = top_mfn;
> l4p = (guest_l4e_t *) top_map;
> - gw->l4e = l4p[guest_l4_table_offset(gla)];
> + l4gpa = gfn_to_gaddr(top_gfn) +
> + guest_l4_table_offset(gla) * sizeof(gw->l4e);
> + if ( !cache ||
> + !hvmemul_read_cache(cache, l4gpa, 4, &gw->l4e, sizeof(gw->l4e)) )
> + {
> + gw->l4e = l4p[guest_l4_table_offset(gla)];
> + if ( cache )
> + hvmemul_write_cache(cache, l4gpa, 4, &gw->l4e, sizeof(gw->l4e));
No need to test cache here or below since neither the read or write functions (yet) dereference it.
Paul
> + }
> gflags = guest_l4e_get_flags(gw->l4e);
> if ( !(gflags & _PAGE_PRESENT) )
> goto out;
> @@ -164,7 +177,15 @@ guest_walk_tables(struct vcpu *v, struct
> }
>
> /* Get the l3e and check its flags*/
> - gw->l3e = l3p[guest_l3_table_offset(gla)];
> + l3gpa = gfn_to_gaddr(guest_l4e_get_gfn(gw->l4e)) +
> + guest_l3_table_offset(gla) * sizeof(gw->l3e);
> + if ( !cache ||
> + !hvmemul_read_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e)) )
> + {
> + gw->l3e = l3p[guest_l3_table_offset(gla)];
> + if ( cache )
> + hvmemul_write_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e));
> + }
> gflags = guest_l3e_get_flags(gw->l3e);
> if ( !(gflags & _PAGE_PRESENT) )
> goto out;
> @@ -216,7 +237,16 @@ guest_walk_tables(struct vcpu *v, struct
> #else /* PAE only... */
>
> /* Get the l3e and check its flag */
> - gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)];
> + l3gpa = gfn_to_gaddr(top_gfn) + ((unsigned long)top_map &
> ~PAGE_MASK) +
> + guest_l3_table_offset(gla) * sizeof(gw->l3e);
> + if ( !cache ||
> + !hvmemul_read_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e)) )
> + {
> + gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)];
> + if ( cache )
> + hvmemul_write_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e));
> + }
> +
> gflags = guest_l3e_get_flags(gw->l3e);
> if ( !(gflags & _PAGE_PRESENT) )
> goto out;
> @@ -242,18 +272,26 @@ guest_walk_tables(struct vcpu *v, struct
> goto out;
> }
>
> - /* Get the l2e */
> - gw->l2e = l2p[guest_l2_table_offset(gla)];
> + l2gpa = gfn_to_gaddr(guest_l3e_get_gfn(gw->l3e));
>
> #else /* 32-bit only... */
>
> - /* Get l2e from the top level table */
> gw->l2mfn = top_mfn;
> l2p = (guest_l2e_t *) top_map;
> - gw->l2e = l2p[guest_l2_table_offset(gla)];
> + l2gpa = gfn_to_gaddr(top_gfn);
>
> #endif /* All levels... */
>
> + /* Get the l2e */
> + l2gpa += guest_l2_table_offset(gla) * sizeof(gw->l2e);
> + if ( !cache ||
> + !hvmemul_read_cache(cache, l2gpa, 2, &gw->l2e, sizeof(gw->l2e)) )
> + {
> + gw->l2e = l2p[guest_l2_table_offset(gla)];
> + if ( cache )
> + hvmemul_write_cache(cache, l2gpa, 2, &gw->l2e, sizeof(gw->l2e));
> + }
> +
> /* Check the l2e flags. */
> gflags = guest_l2e_get_flags(gw->l2e);
> if ( !(gflags & _PAGE_PRESENT) )
> @@ -335,7 +373,17 @@ guest_walk_tables(struct vcpu *v, struct
> gw->pfec |= rc & PFEC_synth_mask;
> goto out;
> }
> - gw->l1e = l1p[guest_l1_table_offset(gla)];
> +
> + l1gpa = gfn_to_gaddr(guest_l2e_get_gfn(gw->l2e)) +
> + guest_l1_table_offset(gla) * sizeof(gw->l1e);
> + if ( !cache ||
> + !hvmemul_read_cache(cache, l1gpa, 1, &gw->l1e, sizeof(gw->l1e)) )
> + {
> + gw->l1e = l1p[guest_l1_table_offset(gla)];
> + if ( cache )
> + hvmemul_write_cache(cache, l1gpa, 1, &gw->l1e, sizeof(gw->l1e));
> + }
> +
> gflags = guest_l1e_get_flags(gw->l1e);
> if ( !(gflags & _PAGE_PRESENT) )
> goto out;
> @@ -446,22 +494,38 @@ guest_walk_tables(struct vcpu *v, struct
> case 1:
> if ( set_ad_bits(&l1p[guest_l1_table_offset(gla)].l1, &gw->l1e.l1,
> (walk & PFEC_write_access)) )
> + {
> paging_mark_dirty(d, gw->l1mfn);
> + if ( cache )
> + hvmemul_write_cache(cache, l1gpa, 1, &gw->l1e, sizeof(gw->l1e));
> + }
> /* Fallthrough */
> case 2:
> if ( set_ad_bits(&l2p[guest_l2_table_offset(gla)].l2, &gw->l2e.l2,
> (walk & PFEC_write_access) && leaf_level == 2) )
> + {
> paging_mark_dirty(d, gw->l2mfn);
> + if ( cache )
> + hvmemul_write_cache(cache, l2gpa, 2, &gw->l2e, sizeof(gw->l2e));
> + }
> /* Fallthrough */
> #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */
> case 3:
> if ( set_ad_bits(&l3p[guest_l3_table_offset(gla)].l3, &gw->l3e.l3,
> (walk & PFEC_write_access) && leaf_level == 3) )
> + {
> paging_mark_dirty(d, gw->l3mfn);
> + if ( cache )
> + hvmemul_write_cache(cache, l3gpa, 3, &gw->l3e, sizeof(gw->l3e));
> + }
>
> if ( set_ad_bits(&l4p[guest_l4_table_offset(gla)].l4, &gw->l4e.l4,
> false) )
> + {
> paging_mark_dirty(d, gw->l4mfn);
> + if ( cache )
> + hvmemul_write_cache(cache, l4gpa, 4, &gw->l4e, sizeof(gw->l4e));
> + }
> #endif
> }
>
> --- a/xen/include/asm-x86/hvm/emulate.h
> +++ b/xen/include/asm-x86/hvm/emulate.h
> @@ -98,6 +98,13 @@ int hvmemul_do_pio_buffer(uint16_t port,
> uint8_t dir,
> void *buffer);
>
> +struct hvmemul_cache;
> +bool hvmemul_read_cache(const struct hvmemul_cache *, paddr_t gpa,
> + unsigned int level, void *buffer, unsigned int size);
> +void hvmemul_write_cache(struct hvmemul_cache *, paddr_t gpa,
> + unsigned int level, const void *buffer,
> + unsigned int size);
> +
> void hvm_dump_emulation_state(const char *loglvl, const char *prefix,
> struct hvm_emulate_ctxt *hvmemul_ctxt, int rc);
>
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-09-11 16:18 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-11 13:10 [PATCH v2 0/4] x86/HVM: implement memory read caching Jan Beulich
2018-09-11 13:13 ` [PATCH v2 1/4] x86/mm: add optional cache to GLA->GFN translation Jan Beulich
2018-09-11 13:40 ` Razvan Cojocaru
2018-09-19 15:09 ` Wei Liu
2018-09-11 13:14 ` [PATCH v2 2/4] x86/mm: use optional cache in guest_walk_tables() Jan Beulich
2018-09-11 16:17 ` Paul Durrant [this message]
2018-09-12 8:30 ` Jan Beulich
2018-09-19 15:50 ` Wei Liu
2018-09-11 13:15 ` [PATCH v2 3/4] x86/HVM: implement memory read caching Jan Beulich
2018-09-11 16:20 ` Paul Durrant
2018-09-12 8:38 ` Jan Beulich
2018-09-12 8:49 ` Paul Durrant
2018-09-19 15:57 ` Wei Liu
2018-09-20 6:39 ` Jan Beulich
2018-09-20 15:42 ` Wei Liu
2018-09-11 13:16 ` [PATCH v2 4/4] x86/HVM: prefill cache with PDPTEs when possible Jan Beulich
2018-09-13 6:30 ` Tian, Kevin
2018-09-13 8:55 ` Jan Beulich
2018-09-14 2:18 ` Tian, Kevin
2018-09-14 8:12 ` Jan Beulich
2018-09-25 14:14 ` [PATCH v3 0/4] x86/HVM: implement memory read caching Jan Beulich
2018-09-25 14:23 ` [PATCH v3 1/4] x86/mm: add optional cache to GLA->GFN translation Jan Beulich
2018-09-25 14:24 ` [PATCH v3 2/4] x86/mm: use optional cache in guest_walk_tables() Jan Beulich
2018-09-25 14:25 ` [PATCH v3 3/4] x86/HVM: implement memory read caching Jan Beulich
2018-09-26 11:05 ` Wei Liu
2018-10-02 10:39 ` Ping: " Jan Beulich
2018-10-02 13:53 ` Boris Ostrovsky
2018-10-09 5:19 ` Tian, Kevin
2018-09-25 14:26 ` [PATCH v3 4/4] x86/HVM: prefill cache with PDPTEs when possible Jan Beulich
2018-09-25 14:38 ` Paul Durrant
2018-10-02 10:36 ` Ping: [PATCH v3 0/4] x86/HVM: implement memory read caching Jan Beulich
2018-10-02 10:51 ` Andrew Cooper
2018-10-02 12:47 ` Jan Beulich
2018-10-11 15:54 ` George Dunlap
2018-10-11 16:15 ` Jan Beulich
2018-10-11 16:33 ` George Dunlap
2018-10-12 6:32 ` Jan Beulich
2018-10-11 6:51 ` Jan Beulich
2018-10-11 17:36 ` George Dunlap
2018-10-12 13:55 ` [PATCH v2 " Andrew Cooper
2018-10-12 14:19 ` Jan Beulich
2018-10-18 15:20 ` George Dunlap
2019-05-07 16:22 ` George Dunlap
2019-05-07 16:22 ` [Xen-devel] " George Dunlap
2019-05-07 16:26 ` Jan Beulich
2019-05-07 16:26 ` [Xen-devel] " Jan Beulich
2018-11-09 10:17 ` Jan Beulich
2019-02-14 15:14 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=fd20bd8ab3a54decbb014b36cbfb5a5d@AMSPEX02CL03.citrite.net \
--to=paul.durrant@citrix.com \
--cc=Andrew.Cooper3@citrix.com \
--cc=George.Dunlap@citrix.com \
--cc=JBeulich@suse.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).