From: Glauber Costa <glommer@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [RFC] in-kernel irqchip : split devices
Date: Wed, 14 Oct 2009 11:30:43 -0300 [thread overview]
Message-ID: <20091014143042.GD8092@mothafucka.localdomain> (raw)
[-- Attachment #1: Type: text/plain, Size: 401 bytes --]
Hello people,
As I promised, I am sending a very brief PoC wrt split devices and in-kernel irqchip.
In this mail, I am including only the ioapic version for apreciation. I also have i8259,
and apic will take me a little bit more. This is just to try to bind the discussion to real
code.
Note that we end up with a very slim representation of the device, and the code is much less
confusing, IMHO.
[-- Attachment #2: ioapic-kvm.patch --]
[-- Type: text/plain, Size: 4365 bytes --]
Index: qemu/Makefile.target
===================================================================
--- qemu.orig/Makefile.target
+++ qemu/Makefile.target
@@ -197,6 +197,8 @@ obj-i386-y += usb-uhci.o vmmouse.o vmpor
obj-i386-y += device-hotplug.o pci-hotplug.o smbios.o wdt_ib700.o
obj-i386-y += ne2000-isa.o
+obj-i386-$(CONFIG_KVM) += ioapic-kvm.o
+
# shared objects
obj-ppc-y = ppc.o ide/core.o ide/qdev.o ide/isa.o ide/pci.o ide/macio.o
obj-ppc-y += vga.o vga-pci.o $(sound-obj-y) dma.o openpic.o
Index: qemu/hw/ioapic-kvm.c
===================================================================
--- /dev/null
+++ qemu/hw/ioapic-kvm.c
@@ -0,0 +1,81 @@
+#include "hw.h"
+#include "pc.h"
+#include "qemu-timer.h"
+#include "host-utils.h"
+#include "kvm.h"
+
+#define IOAPIC_NUM_PINS 0x18
+#define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000
+
+static void ioapic_reset(void *opaque)
+{
+ struct kvm_ioapic_state *s = opaque;
+ struct kvm_irqchip *chip;
+ int i;
+
+ chip = container_of(s, struct kvm_irqchip, chip.ioapic);
+
+ chip->chip_id = KVM_IRQCHIP_IOAPIC;
+
+ memset(s, 0, sizeof(*s));
+ s->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
+ for(i = 0; i < IOAPIC_NUM_PINS; i++)
+ s->redirtbl[i].bits = 1 << 16; /* mask LVT */
+
+ kvm_set_irqchip(chip);
+}
+
+static void ioapic_pre_save(void *opaque)
+{
+ struct kvm_ioapic_state *s = opaque;
+ struct kvm_irqchip *chip;
+
+ chip = container_of(s, struct kvm_irqchip, chip.ioapic);
+
+ kvm_get_irqchip(chip);
+}
+
+static int ioapic_post_load(void *opaque, int version_id)
+{
+ struct kvm_ioapic_state *s = opaque;
+ struct kvm_irqchip *chip;
+
+ chip = container_of(s, struct kvm_irqchip, chip.ioapic);
+
+ return kvm_set_irqchip(chip);
+}
+
+static const VMStateDescription vmstate_kvm_ioapic = {
+ .name = "ioapic-kvm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = ioapic_post_load,
+ .pre_save = ioapic_pre_save,
+ .fields = (VMStateField []) {
+ VMSTATE_U64(base_address, struct kvm_ioapic_state),
+ VMSTATE_UINT32(id, struct kvm_ioapic_state),
+ VMSTATE_UINT32(ioregsel, struct kvm_ioapic_state),
+ VMSTATE_UINT32(irr, struct kvm_ioapic_state),
+ VMSTATE_ARRAY_UNSAFE(redirtbl, struct kvm_ioapic_state, IOAPIC_NUM_PINS, 0, vmstate_info_u64, __u64),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+
+static void kvm_ioapic_set_irq(void *opaque, int vector, int level)
+{
+}
+
+qemu_irq *kvm_ioapic_init(void)
+{
+ struct kvm_irqchip *s;
+
+ s = qemu_mallocz(sizeof(*s));
+
+ ioapic_reset(&s->chip.ioapic);
+
+ vmstate_register(0, &vmstate_kvm_ioapic, &s->chip.ioapic);
+ qemu_register_reset(ioapic_reset, &s->chip.ioapic);
+
+ return qemu_allocate_irqs(kvm_ioapic_set_irq, &s->chip.ioapic, IOAPIC_NUM_PINS);
+}
Index: qemu/hw/pc.h
===================================================================
--- qemu.orig/hw/pc.h
+++ qemu/hw/pc.h
@@ -48,6 +48,8 @@ void ioapic_set_irq(void *opaque, int ve
void apic_reset_irq_delivered(void);
int apic_get_irq_delivered(void);
+qemu_irq *kvm_ioapic_init(void);
+
/* i8254.c */
#define PIT_FREQ 1193182
Index: qemu/kvm-all.c
===================================================================
--- qemu.orig/kvm-all.c
+++ qemu/kvm-all.c
@@ -411,6 +411,26 @@ int kvm_check_extension(KVMState *s, uns
return ret;
}
+#ifdef KVM_CAP_IRQCHIP
+int kvm_set_irqchip(struct kvm_irqchip *chip)
+{
+ if (!kvm_state->irqchip_in_kernel) {
+ return 0;
+ }
+
+ return kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip);
+}
+
+int kvm_get_irqchip(struct kvm_irqchip *chip)
+{
+ if (!kvm_state->irqchip_in_kernel) {
+ return 0;
+ }
+
+ return kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip);
+}
+#endif
+
int kvm_init(int smp_cpus)
{
static const char upgrade_note[] =
Index: qemu/kvm.h
===================================================================
--- qemu.orig/kvm.h
+++ qemu/kvm.h
@@ -16,6 +16,7 @@
#include "config.h"
#include "qemu-queue.h"
+#include <linux/kvm.h>
#ifdef CONFIG_KVM
extern int kvm_allowed;
@@ -63,6 +64,9 @@ int kvm_update_guest_debug(CPUState *env
int kvm_pit_in_kernel(void);
int kvm_irqchip_in_kernel(void);
+int kvm_set_irqchip(struct kvm_irqchip *chip);
+int kvm_get_irqchip(struct kvm_irqchip *chip);
+
/* internal API */
struct KVMState;
next reply other threads:[~2009-10-14 14:30 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-10-14 14:30 Glauber Costa [this message]
2009-10-20 13:08 ` [Qemu-devel] [RFC] in-kernel irqchip : split devices Glauber Costa
2009-10-22 17:07 ` Marcelo Tosatti
2009-10-25 10:26 ` Avi Kivity
2009-10-26 16:27 ` Glauber Costa
2009-10-26 16:28 ` Anthony Liguori
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20091014143042.GD8092@mothafucka.localdomain \
--to=glommer@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).