public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Pekka Enberg <penberg@kernel.org>
To: kvm@vger.kernel.org
Cc: Pekka Enberg <penberg@kernel.org>, Alexander Graf <agraf@suse.de>,
	Cyrill Gorcunov <gorcunov@gmail.com>, Ingo Molnar <mingo@elte.hu>,
	John Floren <john@jfloren.net>,
	Sasha Levin <levinsasha928@gmail.com>
Subject: [PATCH] kvm tools, vesa: Use guest-mapped memory for framebuffer
Date: Mon,  6 Jun 2011 16:51:55 +0300	[thread overview]
Message-ID: <1307368315-10809-1-git-send-email-penberg@kernel.org> (raw)

This patch converts hw/vesa.c to use guest-mapped memory for framebuffer and
drops the slow MMIO emulation. This speeds up framebuffer accesses
considerably. Please note that this can be optimized even more with the
KVM_GET_DIRTY_LOG ioctl() as explained by Alexander Graf.

Cc: Alexander Graf <agraf@suse.de>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: John Floren <john@jfloren.net>
Cc: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
---
 tools/kvm/hw/vesa.c         |   17 +++++------------
 tools/kvm/include/kvm/kvm.h |    1 +
 tools/kvm/kvm.c             |    8 ++++----
 3 files changed, 10 insertions(+), 16 deletions(-)

diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
index 48d31ce..be9c109 100644
--- a/tools/kvm/hw/vesa.c
+++ b/tools/kvm/hw/vesa.c
@@ -8,6 +8,7 @@
 #include "kvm/irq.h"
 #include "kvm/kvm.h"
 #include "kvm/pci.h"
+#include <sys/mman.h>
 
 #include <sys/types.h>
 #include <sys/ioctl.h>
@@ -40,14 +41,6 @@ static struct pci_device_header vesa_pci_device = {
 	.bar[1]			= VESA_MEM_ADDR | PCI_BASE_ADDRESS_SPACE_MEMORY,
 };
 
-static void vesa_mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write)
-{
-	if (!is_write)
-		return;
-
-	fb__write(addr, data, len);
-}
-
 static struct framebuffer vesafb;
 
 struct framebuffer *vesa__init(struct kvm *kvm)
@@ -65,12 +58,12 @@ struct framebuffer *vesa__init(struct kvm *kvm)
 	vesa_pci_device.bar[0]		= vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
 	pci__register(&vesa_pci_device, dev);
 
-	kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
-
-	mem = calloc(1, VESA_MEM_SIZE);
-	if (!mem)
+	mem = mmap(NULL, VESA_MEM_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
+	if (mem == MAP_FAILED)
 		return NULL;
 
+	kvm__register_mem_slot(kvm, 1, VESA_MEM_ADDR, VESA_MEM_SIZE, mem);
+
 	vesafb = (struct framebuffer) {
 		.width			= VESA_WIDTH,
 		.height			= VESA_HEIGHT,
diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
index 55551de..0628402 100644
--- a/tools/kvm/include/kvm/kvm.h
+++ b/tools/kvm/include/kvm/kvm.h
@@ -49,6 +49,7 @@ void kvm__stop_timer(struct kvm *kvm);
 void kvm__irq_line(struct kvm *kvm, int irq, int level);
 bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
 bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
+void kvm__register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr);
 bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
 bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
 void kvm__pause(void);
diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
index 54e3203..de642c7 100644
--- a/tools/kvm/kvm.c
+++ b/tools/kvm/kvm.c
@@ -162,7 +162,7 @@ static bool kvm__cpu_supports_vm(void)
 	return regs.ecx & (1 << feature);
 }
 
-static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
+void kvm__register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
 {
 	struct kvm_userspace_memory_region mem;
 	int ret;
@@ -200,7 +200,7 @@ void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
 	} else {
 		/* First RAM range from zero to the PCI gap: */
 
@@ -208,7 +208,7 @@ void kvm__init_ram(struct kvm *kvm)
 		phys_size  = KVM_32BIT_GAP_START;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
 
 		/* Second RAM range from 4GB to the end of RAM: */
 
@@ -216,7 +216,7 @@ void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size - phys_size;
 		host_mem   = kvm->ram_start + phys_start;
 
-		kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
+		kvm__register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
 	}
 }
 
-- 
1.7.0.4


             reply	other threads:[~2011-06-06 13:51 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-06-06 13:51 Pekka Enberg [this message]
2011-06-06 13:56 ` [PATCH] kvm tools, vesa: Use guest-mapped memory for framebuffer Alexander Graf
2011-06-06 13:58   ` Pekka Enberg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1307368315-10809-1-git-send-email-penberg@kernel.org \
    --to=penberg@kernel.org \
    --cc=agraf@suse.de \
    --cc=gorcunov@gmail.com \
    --cc=john@jfloren.net \
    --cc=kvm@vger.kernel.org \
    --cc=levinsasha928@gmail.com \
    --cc=mingo@elte.hu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox