From: Ed Swierk <eswierk@aristanetworks.com>
To: kvm@vger.kernel.org
Cc: Jan Kiszka <jan.kiszka@web.de>, Gerd Hoffmann <kraxel@redhat.com>
Subject: [PATCH] Xen PV-on-HVM guest support (v2)
Date: Wed, 14 Oct 2009 22:41:58 -0700 [thread overview]
Message-ID: <1255585318.12773.14.camel@localhost.localdomain> (raw)
Support for Xen PV-on-HVM guests can be implemented almost entirely in
userspace, except for handling one annoying MSR that maps a Xen
hypercall blob into guest address space.
A generic mechanism to delegate MSR writes to userspace seems overkill
and risks encouraging similar MSR abuse in the future. Thus this patch
adds special support for the Xen HVM MSR.
I implemented a new ioctl, KVM_XEN_HVM_CONFIG, that lets userspace tell
KVM which MSR the guest will write to, as well as the starting address
and size of the hypercall blobs (one each for 32-bit and 64-bit) that
userspace has loaded from files. When the guest writes to the MSR, KVM
copies one page of the blob from userspace to the guest.
I've tested this patch with a hacked-up version of Gerd's userspace
code, booting a number of guests (CentOS 5.3 i386 and x86_64, and
FreeBSD 8.0-RC1 amd64) and exercising PV network and block devices.
v2: fix ioctl struct padding; renumber CAP and ioctl constants; check
kvm_write_guest() return value; change printks to KERN_DEBUG (I think
they're worth keeping for debugging userspace)
Signed-off-by: Ed Swierk <eswierk@aristanetworks.com>
---
Index: kvm-kmod/include/asm-x86/kvm.h
===================================================================
--- kvm-kmod.orig/include/asm-x86/kvm.h
+++ kvm-kmod/include/asm-x86/kvm.h
@@ -59,6 +59,7 @@
#define __KVM_HAVE_MSIX
#define __KVM_HAVE_MCE
#define __KVM_HAVE_PIT_STATE2
+#define __KVM_HAVE_XEN_HVM
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
Index: kvm-kmod/include/linux/kvm.h
===================================================================
--- kvm-kmod.orig/include/linux/kvm.h
+++ kvm-kmod/include/linux/kvm.h
@@ -476,6 +476,9 @@ struct kvm_ioeventfd {
#endif
#define KVM_CAP_IOEVENTFD 36
#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
+#ifdef __KVM_HAVE_XEN_HVM
+#define KVM_CAP_XEN_HVM 38
+#endif
#ifdef KVM_CAP_IRQ_ROUTING
@@ -528,6 +531,15 @@ struct kvm_x86_mce {
};
#endif
+#ifdef KVM_CAP_XEN_HVM
+struct kvm_xen_hvm_config {
+ __u32 msr;
+ __u8 pad[2];
+ __u8 blob_size[2];
+ __u64 blob_addr[2];
+};
+#endif
+
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
struct kvm_irqfd {
@@ -586,6 +598,7 @@ struct kvm_irqfd {
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
+#define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config)
/*
* ioctls for vcpu fds
Index: kvm-kmod/include/linux/kvm_host.h
===================================================================
--- kvm-kmod.orig/include/linux/kvm_host.h
+++ kvm-kmod/include/linux/kvm_host.h
@@ -236,6 +236,10 @@ struct kvm {
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
#endif
+
+#ifdef KVM_CAP_XEN_HVM
+ struct kvm_xen_hvm_config xen_hvm_config;
+#endif
};
/* The guest did something we don't support. */
Index: kvm-kmod/x86/x86.c
===================================================================
--- kvm-kmod.orig/x86/x86.c
+++ kvm-kmod/x86/x86.c
@@ -875,6 +875,35 @@ static int set_msr_mce(struct kvm_vcpu *
return 0;
}
+#ifdef KVM_CAP_XEN_HVM
+static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+{
+ int blob = !!(vcpu->arch.shadow_efer & EFER_LME);
+ u32 pnum = data & ~PAGE_MASK;
+ u64 paddr = data & PAGE_MASK;
+ u8 *page;
+ int r = 1;
+
+ if (pnum >= vcpu->kvm->xen_hvm_config.blob_size[blob])
+ goto out;
+ page = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!page)
+ goto out;
+ if (copy_from_user(page, (u8 *)vcpu->kvm->xen_hvm_config.blob_addr[blob]
+ + pnum * PAGE_SIZE, PAGE_SIZE))
+ goto out_free;
+ if (kvm_write_guest(vcpu->kvm, paddr, page, PAGE_SIZE))
+ goto out_free;
+ printk(KERN_DEBUG "kvm: copied xen hvm blob %d page %d to 0x%llx\n",
+ blob, pnum, paddr);
+ r = 0;
+out_free:
+ kfree(page);
+out:
+ return r;
+}
+#endif
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
@@ -990,6 +1019,10 @@ int kvm_set_msr_common(struct kvm_vcpu *
"0x%x data 0x%llx\n", msr, data);
break;
default:
+#ifdef KVM_CAP_XEN_HVM
+ if (msr && (msr == vcpu->kvm->xen_hvm_config.msr))
+ return xen_hvm_config(vcpu, data);
+#endif
if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
@@ -2453,6 +2486,17 @@ long kvm_arch_vm_ioctl(struct file *filp
r = 0;
break;
}
+#ifdef KVM_CAP_XEN_HVM
+ case KVM_XEN_HVM_CONFIG: {
+ r = -EFAULT;
+ if (copy_from_user(&kvm->xen_hvm_config, argp,
+ sizeof(struct kvm_xen_hvm_config)))
+ goto out;
+ printk(KERN_DEBUG "kvm: configured xen hvm\n");
+ r = 0;
+ break;
+ }
+#endif
default:
;
}
next reply other threads:[~2009-10-15 5:42 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-10-15 5:41 Ed Swierk [this message]
2009-10-15 7:09 ` [PATCH] Xen PV-on-HVM guest support (v2) Avi Kivity
2009-10-15 8:11 ` Gerd Hoffmann
2009-10-15 8:12 ` Avi Kivity
2009-10-15 7:27 ` Jan Kiszka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1255585318.12773.14.camel@localhost.localdomain \
--to=eswierk@aristanetworks.com \
--cc=jan.kiszka@web.de \
--cc=kraxel@redhat.com \
--cc=kvm@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).