From: Sasha Levin <levinsasha928@gmail.com>
To: penberg@kernel.org
Cc: kvm@vger.kernel.org, mingo@elte.hu, asias.hejun@gmail.com,
gorcunov@gmail.com, prasadjoshi124@gmail.com,
Sasha Levin <levinsasha928@gmail.com>
Subject: [PATCH] kvm tools: Add MMIO coalescing support
Date: Fri, 3 Jun 2011 22:51:08 +0300 [thread overview]
Message-ID: <1307130668-5652-1-git-send-email-levinsasha928@gmail.com> (raw)
Coalescing MMIO allows us to avoid an exit every time we have a
MMIO write, instead - MMIO writes are coalesced in a ring which
can be flushed once an exit for a different reason is needed.
A MMIO exit is also trigged once the ring is full.
Coalesce all MMIO regions registered in the MMIO mapper.
Add a coalescing handler under kvm_cpu.
Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
tools/kvm/hw/vesa.c | 2 +-
tools/kvm/include/kvm/kvm-cpu.h | 2 ++
tools/kvm/include/kvm/kvm.h | 4 ++--
tools/kvm/kvm-cpu.c | 24 ++++++++++++++++++++++++
tools/kvm/mmio.c | 24 ++++++++++++++++++++++--
5 files changed, 51 insertions(+), 5 deletions(-)
diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
index b99f2de..a12c601 100644
--- a/tools/kvm/hw/vesa.c
+++ b/tools/kvm/hw/vesa.c
@@ -77,7 +77,7 @@ void vesa__init(struct kvm *kvm)
vesa_pci_device.bar[0] = vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
pci__register(&vesa_pci_device, dev);
- kvm__register_mmio(VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
+ kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
pthread_create(&thread, NULL, vesa__dovnc, kvm);
}
diff --git a/tools/kvm/include/kvm/kvm-cpu.h b/tools/kvm/include/kvm/kvm-cpu.h
index 4d99246..1eb4a52 100644
--- a/tools/kvm/include/kvm/kvm-cpu.h
+++ b/tools/kvm/include/kvm/kvm-cpu.h
@@ -24,6 +24,8 @@ struct kvm_cpu {
u8 is_running;
u8 paused;
+
+ struct kvm_coalesced_mmio_ring *ring;
};
struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id);
diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
index d22a849..55551de 100644
--- a/tools/kvm/include/kvm/kvm.h
+++ b/tools/kvm/include/kvm/kvm.h
@@ -49,8 +49,8 @@ void kvm__stop_timer(struct kvm *kvm);
void kvm__irq_line(struct kvm *kvm, int irq, int level);
bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
-bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
-bool kvm__deregister_mmio(u64 phys_addr);
+bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
+bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
void kvm__pause(void);
void kvm__continue(void);
void kvm__notify_paused(void);
diff --git a/tools/kvm/kvm-cpu.c b/tools/kvm/kvm-cpu.c
index be0528b..1fb1c74 100644
--- a/tools/kvm/kvm-cpu.c
+++ b/tools/kvm/kvm-cpu.c
@@ -14,6 +14,8 @@
#include <errno.h>
#include <stdio.h>
+#define PAGE_SIZE (sysconf(_SC_PAGE_SIZE))
+
extern __thread struct kvm_cpu *current_kvm_cpu;
static inline bool is_in_protected_mode(struct kvm_cpu *vcpu)
@@ -70,6 +72,7 @@ struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id)
{
struct kvm_cpu *vcpu;
int mmap_size;
+ int coalesced_offset;
vcpu = kvm_cpu__new(kvm);
if (!vcpu)
@@ -89,6 +92,10 @@ struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id)
if (vcpu->kvm_run == MAP_FAILED)
die("unable to mmap vcpu fd");
+ coalesced_offset = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
+ if (coalesced_offset)
+ vcpu->ring = (void *)vcpu->kvm_run + (coalesced_offset * PAGE_SIZE);
+
vcpu->is_running = true;
return vcpu;
@@ -395,6 +402,22 @@ static void kvm_cpu_signal_handler(int signum)
}
}
+static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
+{
+ if (cpu->ring) {
+ while (cpu->ring->first != cpu->ring->last) {
+ struct kvm_coalesced_mmio *m;
+ m = &cpu->ring->coalesced_mmio[cpu->ring->first];
+ kvm__emulate_mmio(cpu->kvm,
+ m->phys_addr,
+ m->data,
+ m->len,
+ 1);
+ cpu->ring->first = (cpu->ring->first + 1) % KVM_COALESCED_MMIO_MAX;
+ }
+ }
+}
+
int kvm_cpu__start(struct kvm_cpu *cpu)
{
sigset_t sigset;
@@ -462,6 +485,7 @@ int kvm_cpu__start(struct kvm_cpu *cpu)
default:
goto panic_kvm;
}
+ kvm_cpu__handle_coalesced_mmio(cpu);
}
exit_kvm:
diff --git a/tools/kvm/mmio.c b/tools/kvm/mmio.c
index acd091e..64bef37 100644
--- a/tools/kvm/mmio.c
+++ b/tools/kvm/mmio.c
@@ -5,6 +5,8 @@
#include <stdio.h>
#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <linux/kvm.h>
#include <linux/types.h>
#include <linux/rbtree.h>
@@ -53,9 +55,10 @@ static const char *to_direction(u8 is_write)
return "read";
}
-bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write))
+bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write))
{
struct mmio_mapping *mmio;
+ struct kvm_coalesced_mmio_zone zone;
int ret;
mmio = malloc(sizeof(*mmio));
@@ -67,6 +70,16 @@ bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callba
.kvm_mmio_callback_fn = kvm_mmio_callback_fn,
};
+ zone = (struct kvm_coalesced_mmio_zone) {
+ .addr = phys_addr,
+ .size = phys_addr_len,
+ };
+ ret = ioctl(kvm->vm_fd, KVM_REGISTER_COALESCED_MMIO, &zone);
+ if (ret < 0) {
+ free(mmio);
+ return false;
+ }
+
br_write_lock();
ret = mmio_insert(&mmio_tree, mmio);
br_write_unlock();
@@ -74,9 +87,10 @@ bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callba
return ret;
}
-bool kvm__deregister_mmio(u64 phys_addr)
+bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr)
{
struct mmio_mapping *mmio;
+ struct kvm_coalesced_mmio_zone zone;
br_write_lock();
mmio = mmio_search_single(&mmio_tree, phys_addr);
@@ -85,6 +99,12 @@ bool kvm__deregister_mmio(u64 phys_addr)
return false;
}
+ zone = (struct kvm_coalesced_mmio_zone) {
+ .addr = phys_addr,
+ .size = 1,
+ };
+ ioctl(kvm->vm_fd, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+
rb_int_erase(&mmio_tree, &mmio->node);
br_write_unlock();
--
1.7.5.3
next reply other threads:[~2011-06-03 19:51 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-06-03 19:51 Sasha Levin [this message]
2011-06-04 9:38 ` [PATCH] kvm tools: Add MMIO coalescing support Ingo Molnar
2011-06-04 10:14 ` Sasha Levin
2011-06-04 10:17 ` Ingo Molnar
2011-06-04 10:28 ` Sasha Levin
2011-06-04 10:35 ` Ingo Molnar
2011-06-04 10:39 ` Alexander Graf
2011-06-04 10:47 ` Ingo Molnar
2011-06-04 10:54 ` Alexander Graf
2011-06-04 11:27 ` Ingo Molnar
2011-06-04 11:53 ` Alexander Graf
2011-06-04 14:46 ` Ingo Molnar
2011-06-04 15:22 ` Alexander Graf
2011-06-04 16:34 ` Ingo Molnar
2011-06-04 16:50 ` Sasha Levin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1307130668-5652-1-git-send-email-levinsasha928@gmail.com \
--to=levinsasha928@gmail.com \
--cc=asias.hejun@gmail.com \
--cc=gorcunov@gmail.com \
--cc=kvm@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=penberg@kernel.org \
--cc=prasadjoshi124@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox