qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH v2 01/28] target/i386: Add tcg/access.[ch]
Date: Tue, 9 Apr 2024 09:09:48 +0200	[thread overview]
Message-ID: <c009e60a-1445-4064-943a-cd7eeb843132@redhat.com> (raw)
In-Reply-To: <20240409050302.1523277-2-richard.henderson@linaro.org>

On 4/9/24 07:02, Richard Henderson wrote:
> Provide a method to amortize page lookup across large blocks.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   target/i386/tcg/access.h    |  40 +++++++++
>   target/i386/tcg/access.c    | 160 ++++++++++++++++++++++++++++++++++++
>   target/i386/tcg/meson.build |   1 +
>   3 files changed, 201 insertions(+)
>   create mode 100644 target/i386/tcg/access.h
>   create mode 100644 target/i386/tcg/access.c
> 
> diff --git a/target/i386/tcg/access.h b/target/i386/tcg/access.h
> new file mode 100644
> index 0000000000..d70808a3a3
> --- /dev/null
> +++ b/target/i386/tcg/access.h
> @@ -0,0 +1,40 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/* Access guest memory in blocks. */
> +
> +#ifndef X86_TCG_ACCESS_H
> +#define X86_TCG_ACCESS_H
> +
> +/* An access covers at most sizeof(X86XSaveArea), at most 2 pages. */
> +typedef struct X86Access {
> +    target_ulong vaddr;
> +    void *haddr1;
> +    void *haddr2;
> +    uint16_t size;
> +    uint16_t size1;
> +    /*
> +     * If we can't access the host page directly, we'll have to do I/O access
> +     * via ld/st helpers. These are internal details, so we store the rest
> +     * to do the access here instead of passing it around in the helpers.
> +     */
> +    int mmu_idx;
> +    CPUX86State *env;
> +    uintptr_t ra;
> +} X86Access;
> +
> +void access_prepare_mmu(X86Access *ret, CPUX86State *env,
> +                        vaddr vaddr, unsigned size,
> +                        MMUAccessType type, int mmu_idx, uintptr_t ra);
> +void access_prepare(X86Access *ret, CPUX86State *env, vaddr vaddr,
> +                    unsigned size, MMUAccessType type, uintptr_t ra);
> +
> +uint8_t  access_ldb(X86Access *ac, vaddr addr);
> +uint16_t access_ldw(X86Access *ac, vaddr addr);
> +uint32_t access_ldl(X86Access *ac, vaddr addr);
> +uint64_t access_ldq(X86Access *ac, vaddr addr);
> +
> +void access_stb(X86Access *ac, vaddr addr, uint8_t val);
> +void access_stw(X86Access *ac, vaddr addr, uint16_t val);
> +void access_stl(X86Access *ac, vaddr addr, uint32_t val);
> +void access_stq(X86Access *ac, vaddr addr, uint64_t val);
> +
> +#endif
> diff --git a/target/i386/tcg/access.c b/target/i386/tcg/access.c
> new file mode 100644
> index 0000000000..8b70f3244b
> --- /dev/null
> +++ b/target/i386/tcg/access.c
> @@ -0,0 +1,160 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/* Access guest memory in blocks. */
> +
> +#include "qemu/osdep.h"
> +#include "cpu.h"
> +#include "exec/cpu_ldst.h"
> +#include "exec/exec-all.h"
> +#include "access.h"
> +
> +
> +void access_prepare_mmu(X86Access *ret, CPUX86State *env,
> +                        vaddr vaddr, unsigned size,
> +                        MMUAccessType type, int mmu_idx, uintptr_t ra)
> +{
> +    int size1, size2;
> +    void *haddr1, *haddr2;
> +
> +    assert(size > 0 && size <= TARGET_PAGE_SIZE);
> +
> +    size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
> +    size2 = size - size1;
> +
> +    memset(ret, 0, sizeof(*ret));
> +    ret->vaddr = vaddr;
> +    ret->size = size;
> +    ret->size1 = size1;
> +    ret->mmu_idx = mmu_idx;
> +    ret->env = env;
> +    ret->ra = ra;
> +
> +    haddr1 = probe_access(env, vaddr, size1, type, mmu_idx, ra);
> +    ret->haddr1 = haddr1;
> +
> +    if (unlikely(size2)) {
> +        haddr2 = probe_access(env, vaddr + size1, size2, type, mmu_idx, ra);
> +        if (haddr2 == haddr1 + size1) {
> +            ret->size1 = size;
> +        } else {
> +            ret->haddr2 = haddr2;
> +        }
> +    }

Should there be an assert(!ret->haddr2) here for the CONFIG_USER_ONLY 
case, or alternatively a g_assert_unreachable() in the "else" above?

> +}
> +
> +void access_prepare(X86Access *ret, CPUX86State *env, vaddr vaddr,
> +                    unsigned size, MMUAccessType type, uintptr_t ra)
> +{
> +    int mmu_idx = cpu_mmu_index(env_cpu(env), false);
> +    access_prepare_mmu(ret, env, vaddr, size, type, mmu_idx, ra);
> +}
> +
> +static void *access_ptr(X86Access *ac, vaddr addr, unsigned len)
> +{
> +    vaddr offset = addr - ac->vaddr;
> +
> +    assert(addr >= ac->vaddr);
> +
> +#ifdef CONFIG_USER_ONLY
> +    assert(offset <= ac->size1 - len);
> +    return ac->haddr1 + offset;
> +#else
> +    if (likely(offset <= ac->size1 - len)) {
> +        return ac->haddr1;
> +    }
> +    assert(offset <= ac->size - len);
> +    if (likely(offset >= ac->size1)) {
> +        return ac->haddr2;
> +    }

I think the returns should be (respectively) ac->haddr1 + offset and 
ac->haddr2 + (offset - ac->size1)?

Also I would add a comment above the second "if", like

     /*
      * If the address is not naturally aligned, it might span
      * both pages.  Only return ac->haddr2 if the area is
      * entirely within the second page, otherwise fall back
      * to slow accesses.
      */

Paolo

> +uint8_t access_ldb(X86Access *ac, vaddr addr)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint8_t));
> +
> +    if (test_ptr(p)) {
> +        return ldub_p(p);
> +    }
> +    return cpu_ldub_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
> +}
> +
> +uint16_t access_ldw(X86Access *ac, vaddr addr)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint16_t));
> +
> +    if (test_ptr(p)) {
> +        return lduw_le_p(p);
> +    }
> +    return cpu_lduw_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
> +}
> +
> +uint32_t access_ldl(X86Access *ac, vaddr addr)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint32_t));
> +
> +    if (test_ptr(p)) {
> +        return ldl_le_p(p);
> +    }
> +    return cpu_ldl_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
> +}
> +
> +uint64_t access_ldq(X86Access *ac, vaddr addr)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint64_t));
> +
> +    if (test_ptr(p)) {
> +        return ldq_le_p(p);
> +    }
> +    return cpu_ldq_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra);
> +}
> +
> +void access_stb(X86Access *ac, vaddr addr, uint8_t val)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint8_t));
> +
> +    if (test_ptr(p)) {
> +        stb_p(p, val);
> +    } else {
> +        cpu_stb_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
> +    }
> +}
> +
> +void access_stw(X86Access *ac, vaddr addr, uint16_t val)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint16_t));
> +
> +    if (test_ptr(p)) {
> +        stw_le_p(p, val);
> +    } else {
> +        cpu_stw_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
> +    }
> +}
> +
> +void access_stl(X86Access *ac, vaddr addr, uint32_t val)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint32_t));
> +
> +    if (test_ptr(p)) {
> +        stl_le_p(p, val);
> +    } else {
> +        cpu_stl_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
> +    }
> +}
> +
> +void access_stq(X86Access *ac, vaddr addr, uint64_t val)
> +{
> +    void *p = access_ptr(ac, addr, sizeof(uint64_t));
> +
> +    if (test_ptr(p)) {
> +        stq_le_p(p, val);
> +    } else {
> +        cpu_stq_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra);
> +    }
> +}
> diff --git a/target/i386/tcg/meson.build b/target/i386/tcg/meson.build
> index f9110e890c..1105b35d92 100644
> --- a/target/i386/tcg/meson.build
> +++ b/target/i386/tcg/meson.build
> @@ -1,4 +1,5 @@
>   i386_ss.add(when: 'CONFIG_TCG', if_true: files(
> +  'access.c',
>     'bpt_helper.c',
>     'cc_helper.c',
>     'excp_helper.c',



  reply	other threads:[~2024-04-09  7:10 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-09  5:02 [PATCH for-9.1 v2 00/28] linux-user/i386: Properly align signal frame Richard Henderson
2024-04-09  5:02 ` [PATCH v2 01/28] target/i386: Add tcg/access.[ch] Richard Henderson
2024-04-09  7:09   ` Paolo Bonzini [this message]
2024-04-09  5:02 ` [PATCH v2 02/28] target/i386: Convert do_fldt, do_fstt to X86Access Richard Henderson
2024-04-09  7:52   ` Paolo Bonzini
2024-04-09  5:02 ` [PATCH v2 03/28] target/i386: Convert helper_{fbld, fbst}_ST0 " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 04/28] target/i386: Convert do_fldenv " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 05/28] target/i386: Convert do_fstenv " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 06/28] target/i386: Convert do_fsave, do_frstor " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 07/28] target/i386: Convert do_xsave_{fpu, mxcr, sse} " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 08/28] target/i386: Convert do_xrstor_{fpu, " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 09/28] tagret/i386: Convert do_fxsave, do_fxrstor " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 10/28] target/i386: Convert do_xsave_* " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 11/28] target/i386: Convert do_xrstor_* " Richard Henderson
2024-04-09  5:02 ` [PATCH v2 12/28] target/i386: Split out do_xsave_chk Richard Henderson
2024-04-09  5:02 ` [PATCH v2 13/28] target/i386: Add rbfm argument to cpu_x86_{xsave, xrstor} Richard Henderson
2024-04-09  5:02 ` [PATCH v2 14/28] target/i386: Add {hw, sw}_reserved to X86LegacyXSaveArea Richard Henderson
2024-04-09  5:02 ` [PATCH v2 15/28] linux-user/i386: Drop xfeatures_size from sigcontext arithmetic Richard Henderson
2024-04-09  5:02 ` [PATCH v2 16/28] linux-user/i386: Remove xfeatures from target_fpstate_fxsave Richard Henderson
2024-04-09  5:02 ` [PATCH v2 17/28] linux-user/i386: Replace target_fpstate_fxsave with X86LegacyXSaveArea Richard Henderson
2024-04-09  5:02 ` [PATCH v2 18/28] linux-user/i386: Split out struct target_fregs_state Richard Henderson
2024-04-09  5:02 ` [PATCH v2 19/28] linux-user/i386: Fix -mregparm=3 for signal delivery Richard Henderson
2024-04-09  7:31   ` Paolo Bonzini
2024-04-09  5:02 ` [PATCH v2 20/28] linux-user/i386: Return boolean success from restore_sigcontext Richard Henderson
2024-04-09  5:02 ` [PATCH v2 21/28] linux-user/i386: Return boolean success from xrstor_sigcontext Richard Henderson
2024-04-09  5:02 ` [PATCH v2 22/28] linux-user/i386: Fix allocation and alignment of fp state Richard Henderson
2024-04-09  5:02 ` [PATCH v2 23/28] target/i386: Honor xfeatures in xrstor_sigcontext Richard Henderson
2024-04-09  7:44   ` Paolo Bonzini
2024-04-09 18:09     ` Richard Henderson
2024-04-10  0:27       ` Richard Henderson
2024-04-09  5:02 ` [PATCH v2 24/28] target/i386: Convert do_xsave to X86Access Richard Henderson
2024-04-09  5:02 ` [PATCH v2 25/28] target/i386: Convert do_xrstor " Richard Henderson
2024-04-09  5:03 ` [PATCH v2 26/28] target/i386: Pass host pointer and size to cpu_x86_{fsave, frstor} Richard Henderson
2024-04-09  5:03 ` [PATCH v2 27/28] target/i386: Pass host pointer and size to cpu_x86_{fxsave, fxrstor} Richard Henderson
2024-04-09  5:03 ` [PATCH v2 28/28] target/i386: Pass host pointer and size to cpu_x86_{xsave, xrstor} Richard Henderson
2024-04-09  7:52 ` [PATCH for-9.1 v2 00/28] linux-user/i386: Properly align signal frame Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c009e60a-1445-4064-943a-cd7eeb843132@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).