From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
Stefan Hajnoczi <stefanha@gmail.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
Avi Kivity <avi@redhat.com>,
Anthony Liguori <anthony@codemonkey.ws>,
Jan Kiszka <jan.kiszka@siemens.com>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [patch v5 7/8] memory: introduce tls context to record nested dma
Date: Mon, 29 Oct 2012 07:48:46 +0800 [thread overview]
Message-ID: <1351468127-15025-8-git-send-email-pingfank@linux.vnet.ibm.com> (raw)
In-Reply-To: <1351468127-15025-1-git-send-email-pingfank@linux.vnet.ibm.com>
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
cpus.c | 3 ++
exec.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
qemu-thread.h | 8 +++++++
vl.c | 1 +
4 files changed, 70 insertions(+), 0 deletions(-)
diff --git a/cpus.c b/cpus.c
index 191cbf5..e67d80f 100644
--- a/cpus.c
+++ b/cpus.c
@@ -733,6 +733,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
qemu_mutex_lock(&qemu_global_mutex);
qemu_thread_get_self(cpu->thread);
+ qemu_thread_init_context();
env->thread_id = qemu_get_thread_id();
cpu_single_env = env;
@@ -774,6 +775,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
+ qemu_thread_init_context();
env->thread_id = qemu_get_thread_id();
sigemptyset(&waitset);
@@ -813,6 +815,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
qemu_tcg_init_cpu_signals();
qemu_thread_get_self(cpu->thread);
+ qemu_thread_init_context();
/* signal CPU creation */
qemu_mutex_lock(&qemu_global_mutex);
diff --git a/exec.c b/exec.c
index 46da08c..ea672c6 100644
--- a/exec.c
+++ b/exec.c
@@ -3449,6 +3449,49 @@ static bool address_space_section_lookup_ref(AddressSpace *as,
return safe_ref;
}
+typedef struct ThreadContext {
+ DispatchType dispatch_type;
+ unsigned int mmio_req_pending;
+} ThreadContext;
+
+static __thread ThreadContext *thread_context;
+
+void qemu_thread_init_context(void)
+{
+ thread_context = g_new(ThreadContext, 1);
+ thread_context->dispatch_type = DISPATCH_INIT;
+ thread_context->mmio_req_pending = 0;
+}
+
+void qemu_thread_set_dispatch_type(DispatchType type)
+{
+ thread_context->dispatch_type = type;
+}
+
+void qemu_thread_reset_dispatch_type(void)
+{
+ thread_context->dispatch_type = DISPATCH_INIT;
+}
+
+static bool address_space_inc_req_pending(void)
+{
+ bool nested = false;
+
+ /* currently, only mmio out of big lock, and need this to avoid dead lock */
+ if (thread_context->dispatch_type == DISPATCH_MMIO) {
+ nested = ++thread_context->mmio_req_pending > 1 ? true : false;
+ }
+
+ return nested;
+}
+
+static void address_space_dec_req_pending(void)
+{
+ if (thread_context->dispatch_type == DISPATCH_MMIO) {
+ thread_context->mmio_req_pending--;
+ }
+}
+
void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
int len, bool is_write)
{
@@ -3459,6 +3502,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
target_phys_addr_t page;
bool safe_ref = false;
MemoryRegionSection *section, obj_mrs;
+ bool nested_dma = false;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
@@ -3485,10 +3529,17 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
memory_region_section_lookup_ref(d, page, &obj_mrs);
}
section = &obj_mrs;
+ nested_dma = address_space_inc_req_pending();
if (is_write) {
if (!memory_region_is_ram(section->mr)) {
target_phys_addr_t addr1;
+
+ /* To fix, will filter iommu case */
+ if (nested_dma) {
+ fprintf(stderr, "can not support nested DMA");
+ abort();
+ }
addr1 = memory_region_section_addr(section, addr);
/* XXX: could force cpu_single_env to NULL to avoid
potential bugs */
@@ -3522,6 +3573,12 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
if (!(memory_region_is_ram(section->mr) ||
memory_region_is_romd(section->mr))) {
target_phys_addr_t addr1;
+
+ /* To fix, will filter iommu case */
+ if (nested_dma) {
+ fprintf(stderr, "can not support nested DMA");
+ abort();
+ }
/* I/O case */
addr1 = memory_region_section_addr(section, addr);
if (l >= 4 && ((addr1 & 3) == 0)) {
@@ -3549,6 +3606,7 @@ void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
qemu_put_ram_ptr(ptr);
}
}
+ address_space_dec_req_pending();
memory_region_section_unref(&obj_mrs);
len -= l;
buf += l;
diff --git a/qemu-thread.h b/qemu-thread.h
index 05fdaaf..bb9535e 100644
--- a/qemu-thread.h
+++ b/qemu-thread.h
@@ -7,6 +7,11 @@
typedef struct QemuMutex QemuMutex;
typedef struct QemuCond QemuCond;
typedef struct QemuThread QemuThread;
+typedef enum {
+ DISPATCH_INIT = 0,
+ DISPATCH_MMIO,
+ DISPATCH_IO,
+} DispatchType;
#ifdef _WIN32
#include "qemu-thread-win32.h"
@@ -46,4 +51,7 @@ void qemu_thread_get_self(QemuThread *thread);
bool qemu_thread_is_self(QemuThread *thread);
void qemu_thread_exit(void *retval);
+void qemu_thread_init_context(void);
+void qemu_thread_set_dispatch_type(DispatchType type);
+void qemu_thread_reset_dispatch_type(void);
#endif
diff --git a/vl.c b/vl.c
index ee3c43a..be8d825 100644
--- a/vl.c
+++ b/vl.c
@@ -3439,6 +3439,7 @@ int main(int argc, char **argv, char **envp)
add_device_config(DEV_VIRTCON, "vc:80Cx24C");
}
+ qemu_thread_init_context();
socket_init();
if (qemu_opts_foreach(qemu_find_opts("chardev"), chardev_init_func, NULL, 1) != 0)
--
1.7.4.4
next prev parent reply other threads:[~2012-10-28 23:49 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-10-28 23:48 [Qemu-devel] [patch v5 0/8] push mmio dispatch out of big lock Liu Ping Fan
2012-10-28 23:48 ` [Qemu-devel] [patch v5 1/8] atomic: introduce atomic operations Liu Ping Fan
2012-10-28 23:48 ` [Qemu-devel] [patch v5 2/8] qom: apply atomic on object's refcount Liu Ping Fan
2012-10-28 23:48 ` [Qemu-devel] [patch v5 3/8] hotplug: introduce qdev_unplug_complete() to remove device from views Liu Ping Fan
2012-10-28 23:48 ` [Qemu-devel] [patch v5 4/8] pci: remove pci device from mem view when unplug Liu Ping Fan
2012-10-28 23:48 ` [Qemu-devel] [patch v5 5/8] memory: introduce local lock for address space Liu Ping Fan
2012-10-29 7:42 ` Peter Maydell
2012-10-29 8:41 ` liu ping fan
2012-10-29 9:32 ` Avi Kivity
2012-10-29 9:46 ` liu ping fan
2012-11-01 15:45 ` Avi Kivity
2012-11-01 18:44 ` Jan Kiszka
2012-11-02 0:52 ` liu ping fan
2012-11-02 8:00 ` Jan Kiszka
2012-11-05 12:36 ` Avi Kivity
2012-10-28 23:48 ` [Qemu-devel] [patch v5 6/8] memory: make mmio dispatch able to be out of biglock Liu Ping Fan
2012-10-29 9:41 ` Avi Kivity
2012-10-30 7:06 ` liu ping fan
2012-11-01 2:04 ` liu ping fan
2012-11-01 15:46 ` Avi Kivity
2012-10-28 23:48 ` Liu Ping Fan [this message]
2012-10-29 8:51 ` [Qemu-devel] [patch v5 7/8] memory: introduce tls context to record nested dma Paolo Bonzini
2012-11-05 5:35 ` liu ping fan
2012-11-02 10:39 ` Jan Kiszka
2012-11-05 5:35 ` liu ping fan
2012-10-28 23:48 ` [Qemu-devel] [patch v5 8/8] vcpu: push mmio dispatcher out of big lock Liu Ping Fan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1351468127-15025-8-git-send-email-pingfank@linux.vnet.ibm.com \
--to=pingfank@linux.vnet.ibm.com \
--cc=anthony@codemonkey.ws \
--cc=avi@redhat.com \
--cc=jan.kiszka@siemens.com \
--cc=mtosatti@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).