From: "Alex Bennée" <alex.bennee@linaro.org>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: jan.kiszka@siemens.com, qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL
Date: Wed, 24 Jun 2015 17:56:02 +0100 [thread overview]
Message-ID: <87zj3pxcyl.fsf@linaro.org> (raw)
In-Reply-To: <1434646046-27150-6-git-send-email-pbonzini@redhat.com>
Paolo Bonzini <pbonzini@redhat.com> writes:
> From: Jan Kiszka <jan.kiszka@siemens.com>
>
> The MMIO case is further broken up in two cases: if the caller does not
> hold the BQL on invocation, the unlocked one takes or avoids BQL depending
> on the locking strategy of the target memory region and its coalesced
> MMIO handling. In this case, the caller should not hold _any_ lock
> (a friendly suggestion which is disregarded by virtio-scsi-dataplane).
>
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> exec.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 57 insertions(+), 9 deletions(-)
>
> diff --git a/exec.c b/exec.c
> index 094f87e..78c99f6 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -48,6 +48,7 @@
> #endif
> #include "exec/cpu-all.h"
> #include "qemu/rcu_queue.h"
> +#include "qemu/main-loop.h"
> #include "exec/cputlb.h"
> #include "translate-all.h"
>
> @@ -2318,11 +2319,27 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
> return l;
> }
>
> -static void prepare_mmio_access(MemoryRegion *mr)
> +static bool prepare_mmio_access(MemoryRegion *mr)
> {
> + bool unlocked = !qemu_mutex_iothread_locked();
> + bool release_lock = false;
> +
> + if (unlocked && mr->global_locking) {
> + qemu_mutex_lock_iothread();
> + unlocked = false;
> + release_lock = true;
> + }
> if (mr->flush_coalesced_mmio) {
> + if (unlocked) {
> + qemu_mutex_lock_iothread();
> + }
> qemu_flush_coalesced_mmio_buffer();
> + if (unlocked) {
> + qemu_mutex_unlock_iothread();
> + }
> }
> +
> + return release_lock;
> }
This is where I get confused between the advantage of this over however
same pid recursive locking. If you use recursive locking you don't need
to add a bunch of state to remind you of when to release the lock. Then
you'd just need:
static void prepare_mmio_access(MemoryRegion *mr)
{
if (mr->global_locking) {
qemu_mutex_lock_iothread();
}
if (mr->flush_coalesced_mmio) {
qemu_mutex_lock_iothread();
qemu_flush_coalesced_mmio_buffer();
qemu_mutex_unlock_iothread();
}
}
and a bunch of:
if (mr->global_locking)
qemu_mutex_unlock_iothread();
in the access functions. Although I suspect you could push the
mr->global_locking up to the dispatch functions.
Am I missing something here?
>
> MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
> @@ -2334,6 +2351,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
> hwaddr addr1;
> MemoryRegion *mr;
> MemTxResult result = MEMTX_OK;
> + bool release_lock = false;
>
> rcu_read_lock();
> while (len > 0) {
> @@ -2342,7 +2360,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
>
> if (is_write) {
> if (!memory_access_is_direct(mr, is_write)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
> l = memory_access_size(mr, l, addr1);
> /* XXX: could force current_cpu to NULL to avoid
> potential bugs */
> @@ -2384,7 +2402,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
> } else {
> if (!memory_access_is_direct(mr, is_write)) {
> /* I/O case */
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
> l = memory_access_size(mr, l, addr1);
> switch (l) {
> case 8:
> @@ -2420,6 +2438,12 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
> memcpy(buf, ptr, l);
> }
> }
> +
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + release_lock = false;
> + }
> +
> len -= l;
> buf += l;
> addr += l;
> @@ -2746,11 +2770,12 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
> hwaddr l = 4;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l, false);
> if (l < 4 || !memory_access_is_direct(mr, false)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> /* I/O case */
> r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
> @@ -2784,6 +2809,9 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> return val;
> }
> @@ -2836,12 +2864,13 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
> hwaddr l = 8;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> false);
> if (l < 8 || !memory_access_is_direct(mr, false)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> /* I/O case */
> r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
> @@ -2875,6 +2904,9 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> return val;
> }
> @@ -2947,12 +2979,13 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
> hwaddr l = 2;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> false);
> if (l < 2 || !memory_access_is_direct(mr, false)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> /* I/O case */
> r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
> @@ -2986,6 +3019,9 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> return val;
> }
> @@ -3037,12 +3073,13 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
> hwaddr l = 4;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> true);
> if (l < 4 || !memory_access_is_direct(mr, true)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
> } else {
> @@ -3063,6 +3100,9 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> }
>
> @@ -3083,12 +3123,13 @@ static inline void address_space_stl_internal(AddressSpace *as,
> hwaddr l = 4;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> true);
> if (l < 4 || !memory_access_is_direct(mr, true)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> #if defined(TARGET_WORDS_BIGENDIAN)
> if (endian == DEVICE_LITTLE_ENDIAN) {
> @@ -3121,6 +3162,9 @@ static inline void address_space_stl_internal(AddressSpace *as,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> }
>
> @@ -3190,11 +3234,12 @@ static inline void address_space_stw_internal(AddressSpace *as,
> hwaddr l = 2;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l, true);
> if (l < 2 || !memory_access_is_direct(mr, true)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> #if defined(TARGET_WORDS_BIGENDIAN)
> if (endian == DEVICE_LITTLE_ENDIAN) {
> @@ -3227,6 +3272,9 @@ static inline void address_space_stw_internal(AddressSpace *as,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> }
--
Alex Bennée
next prev parent reply other threads:[~2015-06-24 16:55 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-18 16:47 [Qemu-devel] [PATCH for-2.4 0/9] KVM: Do I/O outside BQL whenever possible Paolo Bonzini
2015-06-18 16:47 ` [Qemu-devel] [PATCH 1/9] main-loop: use qemu_mutex_lock_iothread consistently Paolo Bonzini
2015-06-23 13:49 ` Frederic Konrad
2015-06-23 13:56 ` Paolo Bonzini
2015-06-23 14:18 ` Frederic Konrad
2015-06-18 16:47 ` [Qemu-devel] [PATCH 2/9] main-loop: introduce qemu_mutex_iothread_locked Paolo Bonzini
2015-06-23 8:48 ` Fam Zheng
2015-06-18 16:47 ` [Qemu-devel] [PATCH 3/9] memory: Add global-locking property to memory regions Paolo Bonzini
2015-06-23 8:51 ` Fam Zheng
2015-06-18 16:47 ` [Qemu-devel] [PATCH 4/9] exec: pull qemu_flush_coalesced_mmio_buffer() into address_space_rw/ld*/st* Paolo Bonzini
2015-06-23 9:05 ` Fam Zheng
2015-06-23 9:12 ` Paolo Bonzini
2015-06-18 16:47 ` [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL Paolo Bonzini
2015-06-24 16:56 ` Alex Bennée [this message]
2015-06-24 17:21 ` Paolo Bonzini
2015-06-24 18:50 ` Alex Bennée
2015-06-25 8:13 ` Paolo Bonzini
2015-06-18 16:47 ` [Qemu-devel] [PATCH 6/9] kvm: First step to push iothread lock out of inner run loop Paolo Bonzini
2015-06-18 18:19 ` Christian Borntraeger
2015-06-23 9:26 ` Fam Zheng
2015-06-23 9:29 ` Paolo Bonzini
2015-06-23 9:45 ` Fam Zheng
2015-06-23 9:49 ` Paolo Bonzini
2015-06-18 16:47 ` [Qemu-devel] [PATCH 7/9] kvm: Switch to unlocked PIO Paolo Bonzini
2015-06-18 16:47 ` [Qemu-devel] [PATCH 8/9] acpi: mark PMTIMER as unlocked Paolo Bonzini
2015-06-18 16:47 ` [Qemu-devel] [PATCH 9/9] kvm: Switch to unlocked MMIO Paolo Bonzini
-- strict thread matches above, loose matches on Subject: below --
2015-06-24 16:25 [Qemu-devel] [PATCH for-2.4 v2 0/9] KVM: Do I/O outside BQL whenever possible Paolo Bonzini
2015-06-24 16:25 ` [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL Paolo Bonzini
2015-06-25 5:11 ` Fam Zheng
2015-06-25 7:47 ` Paolo Bonzini
2015-07-02 8:20 [Qemu-devel] [PATCH for-2.4 0/9 v3] KVM: Do I/O outside BQL whenever possible Paolo Bonzini
2015-07-02 8:20 ` [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87zj3pxcyl.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=jan.kiszka@siemens.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).