qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Peter Xu <peterx@redhat.com>
To: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"David Hildenbrand" <david@redhat.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	qemu-devel@nongnu.org, linuxarm@huawei.com
Subject: Re: [PATCH 2/3] physmem: Factor out body of flatview_read/write_continue() loop
Date: Fri, 1 Mar 2024 13:29:04 +0800	[thread overview]
Message-ID: <ZeFnoBLJ_fKhLD8r@x1n> (raw)
In-Reply-To: <20240215142817.1904-3-Jonathan.Cameron@huawei.com>

On Thu, Feb 15, 2024 at 02:28:16PM +0000, Jonathan Cameron wrote:
> This code will be reused for the address_space_cached accessors
> shortly.
> 
> Also reduce scope of result variable now we aren't directly
> calling this in the loop.
> 
> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> ---
>  system/physmem.c | 165 ++++++++++++++++++++++++++++-------------------
>  1 file changed, 98 insertions(+), 67 deletions(-)
> 
> diff --git a/system/physmem.c b/system/physmem.c
> index 39b5ac751e..74f92bb3b8 100644
> --- a/system/physmem.c
> +++ b/system/physmem.c
> @@ -2677,6 +2677,54 @@ static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
>      return false;
>  }
>  
> +static MemTxResult flatview_write_continue_step(hwaddr addr,
> +                                                MemTxAttrs attrs,
> +                                                const uint8_t *buf,
> +                                                hwaddr len, hwaddr addr1,
> +                                                hwaddr *l, MemoryRegion *mr)
> +{
> +    if (!flatview_access_allowed(mr, attrs, addr1, *l)) {
> +        return MEMTX_ACCESS_ERROR;
> +    }
> +
> +    if (!memory_access_is_direct(mr, true)) {
> +        uint64_t val;
> +        MemTxResult result;
> +        bool release_lock = prepare_mmio_access(mr);
> +
> +        *l = memory_access_size(mr, *l, addr1);
> +        /* XXX: could force current_cpu to NULL to avoid
> +           potential bugs */
> +
> +        /*
> +         * Assure Coverity (and ourselves) that we are not going to OVERRUN
> +         * the buffer by following ldn_he_p().
> +         */
> +#ifdef QEMU_STATIC_ANALYSIS
> +        assert((*l == 1 && len >= 1) ||
> +               (*l == 2 && len >= 2) ||
> +               (*l == 4 && len >= 4) ||
> +               (*l == 8 && len >= 8));
> +#endif
> +        val = ldn_he_p(buf, *l);
> +        result = memory_region_dispatch_write(mr, addr1, val,
> +                                              size_memop(*l), attrs);
> +        if (release_lock) {
> +            bql_unlock();
> +        }
> +
> +        return result;
> +    } else {
> +        /* RAM case */
> +        uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, l, false);
> +
> +        memmove(ram_ptr, buf, *l);
> +        invalidate_and_set_dirty(mr, addr1, *l);
> +
> +        return MEMTX_OK;
> +    }
> +}
> +
>  /* Called within RCU critical section.  */
>  static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
>                                             MemTxAttrs attrs,
> @@ -2688,42 +2736,9 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
>      const uint8_t *buf = ptr;
>  
>      for (;;) {
> -        if (!flatview_access_allowed(mr, attrs, addr1, l)) {
> -            result |= MEMTX_ACCESS_ERROR;
> -            /* Keep going. */
> -        } else if (!memory_access_is_direct(mr, true)) {
> -            uint64_t val;
> -            bool release_lock = prepare_mmio_access(mr);
> -
> -            l = memory_access_size(mr, l, addr1);
> -            /* XXX: could force current_cpu to NULL to avoid
> -               potential bugs */
> -
> -            /*
> -             * Assure Coverity (and ourselves) that we are not going to OVERRUN
> -             * the buffer by following ldn_he_p().
> -             */
> -#ifdef QEMU_STATIC_ANALYSIS
> -            assert((l == 1 && len >= 1) ||
> -                   (l == 2 && len >= 2) ||
> -                   (l == 4 && len >= 4) ||
> -                   (l == 8 && len >= 8));
> -#endif
> -            val = ldn_he_p(buf, l);
> -            result |= memory_region_dispatch_write(mr, addr1, val,
> -                                                   size_memop(l), attrs);
> -            if (release_lock) {
> -                bql_unlock();
> -            }
> -
>  
> -        } else {
> -            /* RAM case */
> -            uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l,
> -                                                   false);
> -            memmove(ram_ptr, buf, l);
> -            invalidate_and_set_dirty(mr, addr1, l);
> -        }
> +        result |= flatview_write_continue_step(addr, attrs, buf, len, addr1, &l,
> +                                               mr);
>  
>          len -= l;
>          buf += l;
> @@ -2757,6 +2772,52 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
>                                     addr1, l, mr);
>  }
>  
> +static MemTxResult flatview_read_continue_step(hwaddr addr,
> +                                               MemTxAttrs attrs, uint8_t *buf,
> +                                               hwaddr len, hwaddr addr1,
> +                                               hwaddr *l,
> +                                               MemoryRegion *mr)
> +{
> +    if (!flatview_access_allowed(mr, attrs, addr1, *l)) {
> +        return  MEMTX_ACCESS_ERROR;
                  |
                  ^ space

> +    }
> +
> +    if (!memory_access_is_direct(mr, false)) {
> +        /* I/O case */
> +        uint64_t val;
> +        MemTxResult result;
> +        bool release_lock = prepare_mmio_access(mr);
> +
> +        *l = memory_access_size(mr, *l, addr1);
> +        result = memory_region_dispatch_read(mr, addr1, &val,
> +                                                  size_memop(*l), attrs);

Please do proper indents.

Other than that:

Reviewed-by: Peter Xu <peterx@redhat.com>

-- 
Peter Xu



  reply	other threads:[~2024-03-01  5:29 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-15 14:28 [PATCH 0/3] physmem: Fix MemoryRegion for second access to cached MMIO Address Space Jonathan Cameron via
2024-02-15 14:28 ` [PATCH 1/3] physmem: Reduce local variable scope in flatview_read/write_continue() Jonathan Cameron via
2024-03-01  5:26   ` Peter Xu
2024-02-15 14:28 ` [PATCH 2/3] physmem: Factor out body of flatview_read/write_continue() loop Jonathan Cameron via
2024-03-01  5:29   ` Peter Xu [this message]
2024-03-01  5:35     ` Peter Xu
2024-03-07 14:09       ` Jonathan Cameron via
2024-02-15 14:28 ` [PATCH 3/3] physmem: Fix wrong MR in large address_space_read/write_cached_slow() Jonathan Cameron via
2024-03-01  5:44   ` Peter Xu
2024-03-07 14:51     ` Jonathan Cameron via
2024-02-29 10:49 ` [PATCH 0/3] physmem: Fix MemoryRegion for second access to cached MMIO Address Space Jonathan Cameron via

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZeFnoBLJ_fKhLD8r@x1n \
    --to=peterx@redhat.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=david@redhat.com \
    --cc=linuxarm@huawei.com \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).