qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Fam Zheng <famz@redhat.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: borntraeger@de.ibm.com,
	Frederic Konrad <fred.konrad@greensocs.com>,
	qemu-devel@nongnu.org, Jan Kiszka <jan.kiszka@siemens.com>
Subject: Re: [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL
Date: Thu, 25 Jun 2015 13:11:36 +0800	[thread overview]
Message-ID: <20150625051136.GJ17695@ad.nay.redhat.com> (raw)
In-Reply-To: <1435163110-2724-6-git-send-email-pbonzini@redhat.com>

On Wed, 06/24 18:25, Paolo Bonzini wrote:
> From: Jan Kiszka <jan.kiszka@siemens.com>
> 
> The MMIO case is further broken up in two cases: if the caller does not
> hold the BQL on invocation, the unlocked one takes or avoids BQL depending
> on the locking strategy of the target memory region and its coalesced
> MMIO handling.  In this case, the caller should not hold _any_ lock
> (a friendly suggestion which is disregarded by virtio-scsi-dataplane).
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> Cc: Frederic Konrad <fred.konrad@greensocs.com>
> Message-Id: <1434646046-27150-6-git-send-email-pbonzini@redhat.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  exec.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
>  1 file changed, 60 insertions(+), 9 deletions(-)
> 
> diff --git a/exec.c b/exec.c
> index f2e6603..fd0401e 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -48,6 +48,7 @@
>  #endif
>  #include "exec/cpu-all.h"
>  #include "qemu/rcu_queue.h"
> +#include "qemu/main-loop.h"
>  #include "exec/cputlb.h"
>  #include "translate-all.h"
>  
> @@ -2316,11 +2317,27 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
>      return l;
>  }
>  
> -static void prepare_mmio_access(MemoryRegion *mr)
> +static bool prepare_mmio_access(MemoryRegion *mr)
>  {
> +    bool unlocked = !qemu_mutex_iothread_locked();
> +    bool release_lock = false;
> +
> +    if (unlocked && mr->global_locking) {
> +        qemu_mutex_lock_iothread();
> +        unlocked = false;
> +        release_lock = true;
> +    }
>      if (mr->flush_coalesced_mmio) {
> +        if (unlocked) {
> +            qemu_mutex_lock_iothread();
> +        }
>          qemu_flush_coalesced_mmio_buffer();
> +        if (unlocked) {
> +            qemu_mutex_unlock_iothread();
> +        }
>      }
> +
> +    return release_lock;
>  }
>  
>  MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
> @@ -2332,6 +2349,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
>      hwaddr addr1;
>      MemoryRegion *mr;
>      MemTxResult result = MEMTX_OK;
> +    bool release_lock = false;
>  
>      rcu_read_lock();
>      while (len > 0) {
> @@ -2340,7 +2358,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
>  
>          if (is_write) {
>              if (!memory_access_is_direct(mr, is_write)) {
> -                prepare_mmio_access(mr);
> +                release_lock |= prepare_mmio_access(mr);
>                  l = memory_access_size(mr, l, addr1);
>                  /* XXX: could force current_cpu to NULL to avoid
>                     potential bugs */
> @@ -2382,7 +2400,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
>          } else {
>              if (!memory_access_is_direct(mr, is_write)) {
>                  /* I/O case */
> -                prepare_mmio_access(mr);
> +                release_lock |= prepare_mmio_access(mr);
>                  l = memory_access_size(mr, l, addr1);
>                  switch (l) {
>                  case 8:
> @@ -2418,6 +2436,12 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
>                  memcpy(buf, ptr, l);
>              }
>          }
> +
> +        if (release_lock) {
> +            qemu_mutex_unlock_iothread();
> +            release_lock = false;
> +        }
> +
>          len -= l;
>          buf += l;
>          addr += l;
> @@ -2744,11 +2768,12 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
>      hwaddr l = 4;
>      hwaddr addr1;
>      MemTxResult r;
> +    bool release_lock = false;
>  
>      rcu_read_lock();
>      mr = address_space_translate(as, addr, &addr1, &l, false);
>      if (l < 4 || !memory_access_is_direct(mr, false)) {
> -        prepare_mmio_access(mr);
> +        release_lock |= prepare_mmio_access(mr);
>  
>          /* I/O case */
>          r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
> @@ -2782,6 +2807,9 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
>      if (result) {
>          *result = r;
>      }
> +    if (release_lock) {
> +        qemu_mutex_unlock_iothread();
> +    }
>      rcu_read_unlock();
>      return val;
>  }
> @@ -2834,12 +2862,13 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
>      hwaddr l = 8;
>      hwaddr addr1;
>      MemTxResult r;
> +    bool release_lock = false;
>  
>      rcu_read_lock();
>      mr = address_space_translate(as, addr, &addr1, &l,
>                                   false);
>      if (l < 8 || !memory_access_is_direct(mr, false)) {
> -        prepare_mmio_access(mr);
> +        release_lock |= prepare_mmio_access(mr);
>  
>          /* I/O case */
>          r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
> @@ -2873,6 +2902,9 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
>      if (result) {
>          *result = r;
>      }
> +    if (release_lock) {
> +        qemu_mutex_unlock_iothread();
> +    }
>      rcu_read_unlock();
>      return val;
>  }
> @@ -2945,12 +2977,13 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
>      hwaddr l = 2;
>      hwaddr addr1;
>      MemTxResult r;
> +    bool release_lock = false;
>  
>      rcu_read_lock();
>      mr = address_space_translate(as, addr, &addr1, &l,
>                                   false);
>      if (l < 2 || !memory_access_is_direct(mr, false)) {
> -        prepare_mmio_access(mr);
> +        release_lock |= prepare_mmio_access(mr);
>  
>          /* I/O case */
>          r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
> @@ -2984,6 +3017,9 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
>      if (result) {
>          *result = r;
>      }
> +    if (release_lock) {
> +        qemu_mutex_unlock_iothread();
> +    }
>      rcu_read_unlock();
>      return val;
>  }
> @@ -3036,12 +3072,13 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
>      hwaddr addr1;
>      MemTxResult r;
>      uint8_t dirty_log_mask;
> +    bool release_lock = false;
>  
>      rcu_read_lock();
>      mr = address_space_translate(as, addr, &addr1, &l,
>                                   true);
>      if (l < 4 || !memory_access_is_direct(mr, true)) {
> -        prepare_mmio_access(mr);
> +        release_lock |= prepare_mmio_access(mr);
>  
>          r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
>      } else {
> @@ -3057,6 +3094,9 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
>      if (result) {
>          *result = r;
>      }
> +    if (release_lock) {
> +        qemu_mutex_unlock_iothread();
> +    }
>      rcu_read_unlock();
>  }
>  
> @@ -3077,12 +3117,13 @@ static inline void address_space_stl_internal(AddressSpace *as,
>      hwaddr l = 4;
>      hwaddr addr1;
>      MemTxResult r;
> +    bool release_lock = false;
>  
>      rcu_read_lock();
>      mr = address_space_translate(as, addr, &addr1, &l,
>                                   true);
>      if (l < 4 || !memory_access_is_direct(mr, true)) {
> -        prepare_mmio_access(mr);
> +        release_lock |= prepare_mmio_access(mr);
>  
>  #if defined(TARGET_WORDS_BIGENDIAN)
>          if (endian == DEVICE_LITTLE_ENDIAN) {
> @@ -3115,6 +3156,9 @@ static inline void address_space_stl_internal(AddressSpace *as,
>      if (result) {
>          *result = r;
>      }
> +    if (release_lock) {
> +        qemu_mutex_unlock_iothread();
> +    }
>      rcu_read_unlock();
>  }
>  
> @@ -3184,11 +3228,12 @@ static inline void address_space_stw_internal(AddressSpace *as,
>      hwaddr l = 2;
>      hwaddr addr1;
>      MemTxResult r;
> +    bool release_lock = false;
>  
>      rcu_read_lock();
>      mr = address_space_translate(as, addr, &addr1, &l, true);
>      if (l < 2 || !memory_access_is_direct(mr, true)) {
> -        prepare_mmio_access(mr);
> +        release_lock |= prepare_mmio_access(mr);
>  
>  #if defined(TARGET_WORDS_BIGENDIAN)
>          if (endian == DEVICE_LITTLE_ENDIAN) {
> @@ -3221,6 +3266,12 @@ static inline void address_space_stw_internal(AddressSpace *as,
>      if (result) {
>          *result = r;
>      }
> +    if (release_lock) {
> +        qemu_mutex_unlock_iothread();
> +    }
> +    if (release_lock) {
> +        qemu_mutex_unlock_iothread();
> +    }

Bad rebase?

Fam

>      rcu_read_unlock();
>  }
>  
> -- 
> 1.8.3.1
> 
> 

  reply	other threads:[~2015-06-25  5:11 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-24 16:25 [Qemu-devel] [PATCH for-2.4 v2 0/9] KVM: Do I/O outside BQL whenever possible Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 1/9] main-loop: use qemu_mutex_lock_iothread consistently Paolo Bonzini
2015-06-25  3:39   ` Fam Zheng
2015-06-25  8:20     ` Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 2/9] main-loop: introduce qemu_mutex_iothread_locked Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 3/9] memory: Add global-locking property to memory regions Paolo Bonzini
2015-06-25  3:44   ` Fam Zheng
2015-06-25  7:46     ` Paolo Bonzini
2015-06-25 10:59       ` Fam Zheng
2015-06-25 11:10         ` Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 4/9] exec: pull qemu_flush_coalesced_mmio_buffer() into address_space_rw/ld*/st* Paolo Bonzini
2015-06-25  4:59   ` Fam Zheng
2015-06-24 16:25 ` [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL Paolo Bonzini
2015-06-25  5:11   ` Fam Zheng [this message]
2015-06-25  7:47     ` Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 6/9] kvm: First step to push iothread lock out of inner run loop Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 7/9] kvm: Switch to unlocked PIO Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 8/9] acpi: mark PMTIMER as unlocked Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 9/9] kvm: Switch to unlocked MMIO Paolo Bonzini
  -- strict thread matches above, loose matches on Subject: below --
2015-07-02  8:20 [Qemu-devel] [PATCH for-2.4 0/9 v3] KVM: Do I/O outside BQL whenever possible Paolo Bonzini
2015-07-02  8:20 ` [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL Paolo Bonzini
2015-06-18 16:47 [Qemu-devel] [PATCH for-2.4 0/9] KVM: Do I/O outside BQL whenever possible Paolo Bonzini
2015-06-18 16:47 ` [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL Paolo Bonzini
2015-06-24 16:56   ` Alex Bennée
2015-06-24 17:21     ` Paolo Bonzini
2015-06-24 18:50       ` Alex Bennée
2015-06-25  8:13         ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20150625051136.GJ17695@ad.nay.redhat.com \
    --to=famz@redhat.com \
    --cc=borntraeger@de.ibm.com \
    --cc=fred.konrad@greensocs.com \
    --cc=jan.kiszka@siemens.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).