* [PATCH] Support for in-kernel mmio handlers
@ 2007-04-04 20:42 Gregory Haskins
[not found] ` <4613C73F.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
0 siblings, 1 reply; 17+ messages in thread
From: Gregory Haskins @ 2007-04-04 20:42 UTC (permalink / raw)
To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f
The MMIO registration code has been broken out as a new patch from the in-kernel APIC work with the following changes per Avi's request:
1) Supports dynamic registration
2) Uses gpa_t addresses
3) Explicit per-cpu mappings
In addition, I have added the concept of distinct VCPU and VM level registrations (where VCPU devices will eclipse competing VM registrations (if any). This will be key down the road where LAPICs should use VCPU registration, but IOAPICs should use VM level.
Signed-off-by: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
---
drivers/kvm/kvm.h | 50 +++++++++++++++++++++++++++++++++++++++++++++
drivers/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++++++++++--------
2 files changed, 94 insertions(+), 9 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index fceeb84..3334730 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -236,6 +236,54 @@ struct kvm_pio_request {
int rep;
};
+struct kvm_io_device {
+ unsigned long (*read)(struct kvm_io_device *this,
+ gpa_t addr,
+ unsigned long length);
+ void (*write)(struct kvm_io_device *this,
+ gpa_t addr,
+ unsigned long length,
+ unsigned long val);
+ int (*in_range)(struct kvm_io_device *this, gpa_t addr);
+
+ void *private;
+ struct list_head link;
+};
+
+/* It would be nice to use something smarter than a linear search, TBD...
+ Thankfully we dont expect many devices to register (famous last words :),
+ so until then it will suffice. At least its abstracted so we can change
+ in one place.
+ */
+struct kvm_io_bus {
+ struct list_head list;
+};
+
+static inline void
+kvm_io_bus_init(struct kvm_io_bus *bus)
+{
+ INIT_LIST_HEAD(&bus->list);
+}
+
+static inline struct kvm_io_device*
+kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
+{
+ struct kvm_io_device *pos = NULL;
+
+ list_for_each_entry(pos, &bus->list, link) {
+ if(pos->in_range(pos, addr))
+ return pos;
+ }
+
+ return NULL;
+}
+
+static inline void
+kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+{
+ list_add_tail(&dev->link, &bus->list);
+}
+
struct kvm_vcpu {
struct kvm *kvm;
union {
@@ -294,6 +342,7 @@ struct kvm_vcpu {
gpa_t mmio_phys_addr;
struct kvm_pio_request pio;
void *pio_data;
+ struct kvm_io_bus mmio_bus;
int sigset_active;
sigset_t sigset;
@@ -345,6 +394,7 @@ struct kvm {
unsigned long rmap_overflow;
struct list_head vm_list;
struct file *filp;
+ struct kvm_io_bus mmio_bus;
};
struct kvm_stat {
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4473174..da119c0 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void)
spin_lock_init(&kvm->lock);
INIT_LIST_HEAD(&kvm->active_mmu_pages);
+ kvm_io_bus_init(&kvm->mmio_bus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
struct kvm_vcpu *vcpu = &kvm->vcpus[i];
@@ -302,6 +303,7 @@ static struct kvm *kvm_create_vm(void)
vcpu->kvm = kvm;
vcpu->mmu.root_hpa = INVALID_PAGE;
INIT_LIST_HEAD(&vcpu->free_pages);
+ kvm_io_bus_init(&vcpu->mmio_bus);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
@@ -1015,12 +1017,30 @@ static int emulator_write_std(unsigned long addr,
return X86EMUL_UNHANDLEABLE;
}
+static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+ gpa_t addr)
+{
+ struct kvm_io_device *mmio_dev;
+
+ /* First check the local CPU addresses */
+ mmio_dev = kvm_io_bus_find_dev(&vcpu->mmio_bus, addr);
+ if(!mmio_dev) {
+ /* Then check the entire VM */
+ mmio_dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+ }
+
+ return mmio_dev;
+}
+
static int emulator_read_emulated(unsigned long addr,
unsigned long *val,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
+ gpa_t gpa;
+ int i;
+ struct kvm_io_device *mmio_dev;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
@@ -1029,18 +1049,24 @@ static int emulator_read_emulated(unsigned long addr,
} else if (emulator_read_std(addr, val, bytes, ctxt)
== X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
- else {
- gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
- if (gpa == UNMAPPED_GVA)
- return X86EMUL_PROPAGATE_FAULT;
- vcpu->mmio_needed = 1;
- vcpu->mmio_phys_addr = gpa;
- vcpu->mmio_size = bytes;
- vcpu->mmio_is_write = 0;
+ gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+ if (gpa == UNMAPPED_GVA)
+ return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT;
- return X86EMUL_UNHANDLEABLE;
+ /* Is thie MMIO handled locally? */
+ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+ if(mmio_dev) {
+ *val = mmio_dev->read(mmio_dev, gpa, bytes);
+ return X86EMUL_CONTINUE;
}
+
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_phys_addr = gpa;
+ vcpu->mmio_size = bytes;
+ vcpu->mmio_is_write = 0;
+
+ return X86EMUL_UNHANDLEABLE;
}
static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1070,6 +1096,8 @@ static int emulator_write_emulated(unsigned long addr,
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+ int i;
+ struct kvm_io_device *mmio_dev;
if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT;
@@ -1077,6 +1105,13 @@ static int emulator_write_emulated(unsigned long addr,
if (emulator_write_phys(vcpu, gpa, val, bytes))
return X86EMUL_CONTINUE;
+ /* Is thie MMIO handled locally? */
+ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+ if(mmio_dev) {
+ mmio_dev->write(mmio_dev, gpa, bytes, val);
+ return X86EMUL_CONTINUE;
+ }
+
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
vcpu->mmio_size = bytes;
-------------------------------------------------------------------------
Take Surveys. Earn Cash. Influence the Future of IT
Join SourceForge.net's Techsay panel and you'll get the chance to share your
opinions on IT & business topics through brief surveys-and earn cash
http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV
^ permalink raw reply related [flat|nested] 17+ messages in thread[parent not found: <4613C73F.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4613C73F.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> @ 2007-04-04 22:48 ` Chris Wright [not found] ` <20070404224806.GA15078-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org> 2007-04-05 7:07 ` Avi Kivity 2007-04-05 7:46 ` Avi Kivity 2 siblings, 1 reply; 17+ messages in thread From: Chris Wright @ 2007-04-04 22:48 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f * Gregory Haskins (ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org) wrote: > The MMIO registration code has been broken out as a new patch from the in-kernel APIC work with the following changes per Avi's request: > > 1) Supports dynamic registration > 2) Uses gpa_t addresses > 3) Explicit per-cpu mappings > > In addition, I have added the concept of distinct VCPU and VM level registrations (where VCPU devices will eclipse competing VM registrations (if any). This will be key down the road where LAPICs should use VCPU registration, but IOAPICs should use VM level. hmm, i'm surprised it makes a difference. > Signed-off-by: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> > > --- > drivers/kvm/kvm.h | 50 +++++++++++++++++++++++++++++++++++++++++++++ > drivers/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++++++++++-------- > 2 files changed, 94 insertions(+), 9 deletions(-) > > diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h > index fceeb84..3334730 100644 > --- a/drivers/kvm/kvm.h > +++ b/drivers/kvm/kvm.h > @@ -236,6 +236,54 @@ struct kvm_pio_request { > int rep; > }; > > +struct kvm_io_device { > + unsigned long (*read)(struct kvm_io_device *this, > + gpa_t addr, > + unsigned long length); > + void (*write)(struct kvm_io_device *this, > + gpa_t addr, > + unsigned long length, > + unsigned long val); > + int (*in_range)(struct kvm_io_device *this, gpa_t addr); > + > + void *private; This looks unused, what is it meant for? > + struct list_head link; > +}; > + > +/* It would be nice to use something smarter than a linear search, TBD... > + Thankfully we dont expect many devices to register (famous last words :), > + so until then it will suffice. At least its abstracted so we can change > + in one place. > + */ > +struct kvm_io_bus { > + struct list_head list; > +}; > + > +static inline void > +kvm_io_bus_init(struct kvm_io_bus *bus) > +{ > + INIT_LIST_HEAD(&bus->list); > +} > + > +static inline struct kvm_io_device* > +kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) > +{ > + struct kvm_io_device *pos = NULL; > + > + list_for_each_entry(pos, &bus->list, link) { > + if(pos->in_range(pos, addr)) linux style nit, missing space after if --> if (pos->in_range(pos, addr)) > + return pos; > + } > + > + return NULL; > +} > + > +static inline void > +kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) > +{ > + list_add_tail(&dev->link, &bus->list); > +} > + > struct kvm_vcpu { > struct kvm *kvm; > union { > @@ -294,6 +342,7 @@ struct kvm_vcpu { > gpa_t mmio_phys_addr; > struct kvm_pio_request pio; > void *pio_data; > + struct kvm_io_bus mmio_bus; > > int sigset_active; > sigset_t sigset; > @@ -345,6 +394,7 @@ struct kvm { > unsigned long rmap_overflow; > struct list_head vm_list; > struct file *filp; > + struct kvm_io_bus mmio_bus; > }; > > struct kvm_stat { > diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c > index 4473174..da119c0 100644 > --- a/drivers/kvm/kvm_main.c > +++ b/drivers/kvm/kvm_main.c > @@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) > > spin_lock_init(&kvm->lock); > INIT_LIST_HEAD(&kvm->active_mmu_pages); > + kvm_io_bus_init(&kvm->mmio_bus); I'd just do INIT_LIST_HEAD, unless you havd bigger plans for this wrapper? > for (i = 0; i < KVM_MAX_VCPUS; ++i) { > struct kvm_vcpu *vcpu = &kvm->vcpus[i]; > > @@ -302,6 +303,7 @@ static struct kvm *kvm_create_vm(void) > vcpu->kvm = kvm; > vcpu->mmu.root_hpa = INVALID_PAGE; > INIT_LIST_HEAD(&vcpu->free_pages); > + kvm_io_bus_init(&vcpu->mmio_bus); ditto > spin_lock(&kvm_lock); > list_add(&kvm->vm_list, &vm_list); > spin_unlock(&kvm_lock); > @@ -1015,12 +1017,30 @@ static int emulator_write_std(unsigned long addr, > return X86EMUL_UNHANDLEABLE; > } > > +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, > + gpa_t addr) > +{ > + struct kvm_io_device *mmio_dev; > + > + /* First check the local CPU addresses */ > + mmio_dev = kvm_io_bus_find_dev(&vcpu->mmio_bus, addr); > + if(!mmio_dev) { same style nit. and why do you have local vs global check (or dynamic registration for that matter)? > + /* Then check the entire VM */ > + mmio_dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); > + } > + > + return mmio_dev; > +} > + > static int emulator_read_emulated(unsigned long addr, > unsigned long *val, > unsigned int bytes, > struct x86_emulate_ctxt *ctxt) > { > struct kvm_vcpu *vcpu = ctxt->vcpu; > + gpa_t gpa; > + int i; > + struct kvm_io_device *mmio_dev; > > if (vcpu->mmio_read_completed) { > memcpy(val, vcpu->mmio_data, bytes); > @@ -1029,18 +1049,24 @@ static int emulator_read_emulated(unsigned long addr, > } else if (emulator_read_std(addr, val, bytes, ctxt) > == X86EMUL_CONTINUE) > return X86EMUL_CONTINUE; > - else { > - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); > > - if (gpa == UNMAPPED_GVA) > - return X86EMUL_PROPAGATE_FAULT; > - vcpu->mmio_needed = 1; > - vcpu->mmio_phys_addr = gpa; > - vcpu->mmio_size = bytes; > - vcpu->mmio_is_write = 0; > + gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); > + if (gpa == UNMAPPED_GVA) > + return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT; > > - return X86EMUL_UNHANDLEABLE; > + /* Is thie MMIO handled locally? */ s/thie/this/ > + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); > + if(mmio_dev) { style > + *val = mmio_dev->read(mmio_dev, gpa, bytes); > + return X86EMUL_CONTINUE; > } > + > + vcpu->mmio_needed = 1; > + vcpu->mmio_phys_addr = gpa; > + vcpu->mmio_size = bytes; > + vcpu->mmio_is_write = 0; > + > + return X86EMUL_UNHANDLEABLE; > } > > static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, > @@ -1070,6 +1096,8 @@ static int emulator_write_emulated(unsigned long addr, > { > struct kvm_vcpu *vcpu = ctxt->vcpu; > gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); > + int i; > + struct kvm_io_device *mmio_dev; > > if (gpa == UNMAPPED_GVA) > return X86EMUL_PROPAGATE_FAULT; > @@ -1077,6 +1105,13 @@ static int emulator_write_emulated(unsigned long addr, > if (emulator_write_phys(vcpu, gpa, val, bytes)) > return X86EMUL_CONTINUE; > > + /* Is thie MMIO handled locally? */ s/thie/this/ > + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); > + if(mmio_dev) { style > + mmio_dev->write(mmio_dev, gpa, bytes, val); > + return X86EMUL_CONTINUE; > + } > + > vcpu->mmio_needed = 1; > vcpu->mmio_phys_addr = gpa; > vcpu->mmio_size = bytes; ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <20070404224806.GA15078-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <20070404224806.GA15078-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org> @ 2007-04-04 23:04 ` Gregory Haskins [not found] ` <20070405001021.GV10574@sequoia.sous-sol.org> [not found] ` <4613E891.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 0 siblings, 2 replies; 17+ messages in thread From: Gregory Haskins @ 2007-04-04 23:04 UTC (permalink / raw) To: Chris Wright; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Hi Chris, Thanks for the feedback. Ive answered inline below. >>> On Wed, Apr 4, 2007 at 6:48 PM, in message <20070404224806.GA15078-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org>, Chris Wright <chrisw-69jw2NvuJkxg9hUCZPvPmw@public.gmane.org> wrote: > * Gregory Haskins (ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org) wrote: >> The MMIO registration code has been broken out as a new patch from the > in- kernel APIC work with the following changes per Avi's request: >> >> 1) Supports dynamic registration >> 2) Uses gpa_t addresses >> 3) Explicit per- cpu mappings >> >> In addition, I have added the concept of distinct VCPU and VM level > registrations (where VCPU devices will eclipse competing VM registrations (if > any). This will be key down the road where LAPICs should use VCPU > registration, but IOAPICs should use VM level. > > hmm, i'm surprised it makes a difference. LAPICs can be remapped on a per-cpu basis via an MSR, whereas something like an IOAPIC is a system-wide resource. > >> Signed- off- by: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> >> >> --- >> drivers/kvm/kvm.h | 50 +++++++++++++++++++++++++++++++++++++++++++++ >> drivers/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++++++++++-------- >> 2 files changed, 94 insertions(+), 9 deletions(- ) >> >> diff -- git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h >> index fceeb84..3334730 100644 >> --- a/drivers/kvm/kvm.h >> +++ b/drivers/kvm/kvm.h >> @@ - 236,6 +236,54 @@ struct kvm_pio_request { >> int rep; >> }; >> >> +struct kvm_io_device { >> + unsigned long (*read)(struct kvm_io_device *this, >> + gpa_t addr, >> + unsigned long length); >> + void (*write)(struct kvm_io_device *this, >> + gpa_t addr, >> + unsigned long length, >> + unsigned long val); >> + int (*in_range)(struct kvm_io_device *this, gpa_t addr); >> + >> + void *private; > > This looks unused, what is it meant for? Its unused in this patch, because the primary consumer is a follow on patch that is not yet released. The original patch had this logic + the logic that used it all together and it was requested to break them apart. > >> + struct list_head link; >> +}; >> + >> +/* It would be nice to use something smarter than a linear search, TBD... >> + Thankfully we dont expect many devices to register (famous last words > :), >> + so until then it will suffice. At least its abstracted so we can change >> + in one place. >> + */ >> +struct kvm_io_bus { >> + struct list_head list; >> +}; >> + >> +static inline void >> +kvm_io_bus_init(struct kvm_io_bus *bus) >> +{ >> + INIT_LIST_HEAD(&bus- >list); >> +} >> + >> +static inline struct kvm_io_device* >> +kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) >> +{ >> + struct kvm_io_device *pos = NULL; >> + >> + list_for_each_entry(pos, &bus- >list, link) { >> + if(pos- >in_range(pos, addr)) > > linux style nit, missing space after if -- > if (pos- >in_range(pos, addr)) Yeah, old habits die hard ;) I will fix all of these. > >> + return pos; >> + } >> + >> + return NULL; >> +} >> + >> +static inline void >> +kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) >> +{ >> + list_add_tail(&dev- >link, &bus- >list); >> +} >> + >> struct kvm_vcpu { >> struct kvm *kvm; >> union { >> @@ - 294,6 +342,7 @@ struct kvm_vcpu { >> gpa_t mmio_phys_addr; >> struct kvm_pio_request pio; >> void *pio_data; >> + struct kvm_io_bus mmio_bus; >> >> int sigset_active; >> sigset_t sigset; >> @@ - 345,6 +394,7 @@ struct kvm { >> unsigned long rmap_overflow; >> struct list_head vm_list; >> struct file *filp; >> + struct kvm_io_bus mmio_bus; >> }; >> >> struct kvm_stat { >> diff -- git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c >> index 4473174..da119c0 100644 >> --- a/drivers/kvm/kvm_main.c >> +++ b/drivers/kvm/kvm_main.c >> @@ - 294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) >> >> spin_lock_init(&kvm- >lock); >> INIT_LIST_HEAD(&kvm- >active_mmu_pages); >> + kvm_io_bus_init(&kvm- >mmio_bus); > > I'd just do INIT_LIST_HEAD, unless you havd bigger plans for this wrapper? The motivation for wrapping the init is because I want to abstract the fact that its a list. This means I can update the mechanism to do something more intelligent with address lookup (e.g. b-tree, etc) without changing code all over the place. Right now there are only two consumers, put I envision there will be some more. For instance, I would like to get PIOs using this mechanism at some point (so I can snarf accesses to the 8259s at 0x20/0xa0) > >> for (i = 0; i < KVM_MAX_VCPUS; ++i) { >> struct kvm_vcpu *vcpu = &kvm- >vcpus[i]; >> >> @@ - 302,6 +303,7 @@ static struct kvm *kvm_create_vm(void) >> vcpu- >kvm = kvm; >> vcpu- >mmu.root_hpa = INVALID_PAGE; >> INIT_LIST_HEAD(&vcpu- >free_pages); >> + kvm_io_bus_init(&vcpu- >mmio_bus); > > ditto > >> spin_lock(&kvm_lock); >> list_add(&kvm- >vm_list, &vm_list); >> spin_unlock(&kvm_lock); >> @@ - 1015,12 +1017,30 @@ static int emulator_write_std(unsigned long addr, >> return X86EMUL_UNHANDLEABLE; >> } >> >> +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, >> + gpa_t addr) >> +{ >> + struct kvm_io_device *mmio_dev; >> + >> + /* First check the local CPU addresses */ >> + mmio_dev = kvm_io_bus_find_dev(&vcpu- >mmio_bus, addr); >> + if(!mmio_dev) { > > same style nit. ack > and why do you have local vs global check see my first comment: re LAPIC is a relocateble per-cpu resource, IOAPIC is not >(or dynamic registration for that matter)? the xAPIC support will be optional, so the ability to register for MMIO handing has to be dynamic. > >> + /* Then check the entire VM */ >> + mmio_dev = kvm_io_bus_find_dev(&vcpu- >kvm- >mmio_bus, addr); >> + } >> + >> + return mmio_dev; >> +} >> + >> static int emulator_read_emulated(unsigned long addr, >> unsigned long *val, >> unsigned int bytes, >> struct x86_emulate_ctxt *ctxt) >> { >> struct kvm_vcpu *vcpu = ctxt- >vcpu; >> + gpa_t gpa; >> + int i; >> + struct kvm_io_device *mmio_dev; >> >> if (vcpu- >mmio_read_completed) { >> memcpy(val, vcpu- >mmio_data, bytes); >> @@ - 1029,18 +1049,24 @@ static int emulator_read_emulated(unsigned long addr, >> } else if (emulator_read_std(addr, val, bytes, ctxt) >> == X86EMUL_CONTINUE) >> return X86EMUL_CONTINUE; >> - else { >> - gpa_t gpa = vcpu- >mmu.gva_to_gpa(vcpu, addr); >> >> - if (gpa == UNMAPPED_GVA) >> - return X86EMUL_PROPAGATE_FAULT; >> - vcpu- >mmio_needed = 1; >> - vcpu- >mmio_phys_addr = gpa; >> - vcpu- >mmio_size = bytes; >> - vcpu- >mmio_is_write = 0; >> + gpa = vcpu- >mmu.gva_to_gpa(vcpu, addr); >> + if (gpa == UNMAPPED_GVA) >> + return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT; >> >> - return X86EMUL_UNHANDLEABLE; >> + /* Is thie MMIO handled locally? */ > > s/thie/this/ ack > >> + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); >> + if(mmio_dev) { > > style ack > >> + *val = mmio_dev- >read(mmio_dev, gpa, bytes); >> + return X86EMUL_CONTINUE; >> } >> + >> + vcpu- >mmio_needed = 1; >> + vcpu- >mmio_phys_addr = gpa; >> + vcpu- >mmio_size = bytes; >> + vcpu- >mmio_is_write = 0; >> + >> + return X86EMUL_UNHANDLEABLE; >> } >> >> static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, >> @@ - 1070,6 +1096,8 @@ static int emulator_write_emulated(unsigned long addr, >> { >> struct kvm_vcpu *vcpu = ctxt- >vcpu; >> gpa_t gpa = vcpu- >mmu.gva_to_gpa(vcpu, addr); >> + int i; >> + struct kvm_io_device *mmio_dev; >> >> if (gpa == UNMAPPED_GVA) >> return X86EMUL_PROPAGATE_FAULT; >> @@ - 1077,6 +1105,13 @@ static int emulator_write_emulated(unsigned long addr, >> if (emulator_write_phys(vcpu, gpa, val, bytes)) >> return X86EMUL_CONTINUE; >> >> + /* Is thie MMIO handled locally? */ > > s/thie/this/ ack > >> + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); >> + if(mmio_dev) { > > style ack > >> + mmio_dev- >write(mmio_dev, gpa, bytes, val); >> + return X86EMUL_CONTINUE; >> + } >> + >> vcpu- >mmio_needed = 1; >> vcpu- >mmio_phys_addr = gpa; >> vcpu- >mmio_size = bytes; Regards, -Greg ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <20070405001021.GV10574@sequoia.sous-sol.org>]
[parent not found: <20070405001021.GV10574-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <20070405001021.GV10574-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org> @ 2007-04-05 0:21 ` Gregory Haskins 0 siblings, 0 replies; 17+ messages in thread From: Gregory Haskins @ 2007-04-05 0:21 UTC (permalink / raw) To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Chris Wright [-- Attachment #1: Type: text/plain, Size: 97 bytes --] The attachment contains fixes based on the feedback from Chris. Thanks Chris! Regards, -Greg [-- Attachment #2: mmio.patch --] [-- Type: text/plain, Size: 5143 bytes --] diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index fceeb84..0e6eb04 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -236,6 +236,54 @@ struct kvm_pio_request { int rep; }; +struct kvm_io_device { + unsigned long (*read)(struct kvm_io_device *this, + gpa_t addr, + unsigned long length); + void (*write)(struct kvm_io_device *this, + gpa_t addr, + unsigned long length, + unsigned long val); + int (*in_range)(struct kvm_io_device *this, gpa_t addr); + + void *private; + struct list_head link; +}; + +/* It would be nice to use something smarter than a linear search, TBD... + * Thankfully we dont expect many devices to register (famous last words :), + * so until then it will suffice. At least its abstracted so we can change + * in one place. + */ +struct kvm_io_bus { + struct list_head list; +}; + +static inline void +kvm_io_bus_init(struct kvm_io_bus *bus) +{ + INIT_LIST_HEAD(&bus->list); +} + +static inline struct kvm_io_device* +kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) +{ + struct kvm_io_device *pos = NULL; + + list_for_each_entry(pos, &bus->list, link) { + if (pos->in_range(pos, addr)) + return pos; + } + + return NULL; +} + +static inline void +kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) +{ + list_add_tail(&dev->link, &bus->list); +} + struct kvm_vcpu { struct kvm *kvm; union { @@ -294,6 +342,7 @@ struct kvm_vcpu { gpa_t mmio_phys_addr; struct kvm_pio_request pio; void *pio_data; + struct kvm_io_bus mmio_bus; int sigset_active; sigset_t sigset; @@ -345,6 +394,7 @@ struct kvm { unsigned long rmap_overflow; struct list_head vm_list; struct file *filp; + struct kvm_io_bus mmio_bus; }; struct kvm_stat { diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 4473174..c8109b7 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) spin_lock_init(&kvm->lock); INIT_LIST_HEAD(&kvm->active_mmu_pages); + kvm_io_bus_init(&kvm->mmio_bus); for (i = 0; i < KVM_MAX_VCPUS; ++i) { struct kvm_vcpu *vcpu = &kvm->vcpus[i]; @@ -302,6 +303,7 @@ static struct kvm *kvm_create_vm(void) vcpu->kvm = kvm; vcpu->mmu.root_hpa = INVALID_PAGE; INIT_LIST_HEAD(&vcpu->free_pages); + kvm_io_bus_init(&vcpu->mmio_bus); spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); @@ -1015,12 +1017,30 @@ static int emulator_write_std(unsigned long addr, return X86EMUL_UNHANDLEABLE; } +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, + gpa_t addr) +{ + struct kvm_io_device *mmio_dev; + + /* First check the local CPU addresses */ + mmio_dev = kvm_io_bus_find_dev(&vcpu->mmio_bus, addr); + if (!mmio_dev) { + /* Then check the entire VM */ + mmio_dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); + } + + return mmio_dev; +} + static int emulator_read_emulated(unsigned long addr, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; + gpa_t gpa; + int i; + struct kvm_io_device *mmio_dev; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); @@ -1029,18 +1049,24 @@ static int emulator_read_emulated(unsigned long addr, } else if (emulator_read_std(addr, val, bytes, ctxt) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; - else { - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); - if (gpa == UNMAPPED_GVA) - return X86EMUL_PROPAGATE_FAULT; - vcpu->mmio_needed = 1; - vcpu->mmio_phys_addr = gpa; - vcpu->mmio_size = bytes; - vcpu->mmio_is_write = 0; + gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); + if (gpa == UNMAPPED_GVA) + return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT; - return X86EMUL_UNHANDLEABLE; + /* Is this MMIO handled locally? */ + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); + if (mmio_dev) { + *val = mmio_dev->read(mmio_dev, gpa, bytes); + return X86EMUL_CONTINUE; } + + vcpu->mmio_needed = 1; + vcpu->mmio_phys_addr = gpa; + vcpu->mmio_size = bytes; + vcpu->mmio_is_write = 0; + + return X86EMUL_UNHANDLEABLE; } static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, @@ -1070,6 +1096,8 @@ static int emulator_write_emulated(unsigned long addr, { struct kvm_vcpu *vcpu = ctxt->vcpu; gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); + int i; + struct kvm_io_device *mmio_dev; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; @@ -1077,6 +1105,13 @@ static int emulator_write_emulated(unsigned long addr, if (emulator_write_phys(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; + /* Is this MMIO handled locally? */ + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); + if (mmio_dev) { + mmio_dev->write(mmio_dev, gpa, bytes, val); + return X86EMUL_CONTINUE; + } + vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; [-- Attachment #3: Type: text/plain, Size: 345 bytes --] ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV [-- Attachment #4: Type: text/plain, Size: 186 bytes --] _______________________________________________ kvm-devel mailing list kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org https://lists.sourceforge.net/lists/listinfo/kvm-devel ^ permalink raw reply related [flat|nested] 17+ messages in thread
[parent not found: <4613E891.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4613E891.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> @ 2007-04-05 0:49 ` Chris Wright 0 siblings, 0 replies; 17+ messages in thread From: Chris Wright @ 2007-04-05 0:49 UTC (permalink / raw) To: Gregory Haskins; +Cc: Chris Wright, kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f * Gregory Haskins (ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org) wrote: > LAPICs can be remapped on a per-cpu basis via an MSR, whereas something > like an IOAPIC is a system-wide resource. Yes, I see now, no vcpu in kvm_io_device callbacks' context (admittedly, I'm used to the Xen implementation ;-) > >> +struct kvm_io_device { > >> + unsigned long (*read)(struct kvm_io_device *this, > >> + gpa_t addr, > >> + unsigned long length); > >> + void (*write)(struct kvm_io_device *this, > >> + gpa_t addr, > >> + unsigned long length, > >> + unsigned long val); > >> + int (*in_range)(struct kvm_io_device *this, gpa_t addr); > >> + > >> + void *private; > > > > This looks unused, what is it meant for? > > Its unused in this patch, because the primary consumer is a follow on > patch that is not yet released. The original patch had this logic + the > logic that used it all together and it was requested to break them apart. Makes sense, I'll wait to see a user to understand how it'w used. > >> +++ b/drivers/kvm/kvm_main.c > >> @@ - 294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) > >> > >> spin_lock_init(&kvm- >lock); > >> INIT_LIST_HEAD(&kvm- >active_mmu_pages); > >> + kvm_io_bus_init(&kvm- >mmio_bus); > > > > I'd just do INIT_LIST_HEAD, unless you havd bigger plans for this wrapper? > > The motivation for wrapping the init is because I want to abstract > the fact that its a list. This means I can update the mechanism to > do something more intelligent with address lookup (e.g. b-tree, etc) > without changing code all over the place. Right now there are only > two consumers, put I envision there will be some more. For instance, > I would like to get PIOs using this mechanism at some point (so I can > snarf accesses to the 8259s at 0x20/0xa0) Right, you even alluded to that in your comments. I didn't expect a list to really become that long where it needed a more complex data structure. thanks, -chris ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4613C73F.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 2007-04-04 22:48 ` Chris Wright @ 2007-04-05 7:07 ` Avi Kivity [not found] ` <4614A03C.2050707-atKUWr5tajBWk0Htik3J/w@public.gmane.org> 2007-04-05 7:46 ` Avi Kivity 2 siblings, 1 reply; 17+ messages in thread From: Avi Kivity @ 2007-04-05 7:07 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Gregory Haskins wrote: > The MMIO registration code has been broken out as a new patch from the in-kernel APIC work with the following changes per Avi's request: > > 1) Supports dynamic registration > 2) Uses gpa_t addresses > 3) Explicit per-cpu mappings > > In addition, I have added the concept of distinct VCPU and VM level registrations (where VCPU devices will eclipse competing VM registrations (if any). This will be key down the road where LAPICs should use VCPU registration, but IOAPICs should use VM level. > > Signed-off-by: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> > > --- > drivers/kvm/kvm.h | 50 +++++++++++++++++++++++++++++++++++++++++++++ > drivers/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++++++++++-------- > 2 files changed, 94 insertions(+), 9 deletions(-) > > diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h > index fceeb84..3334730 100644 > --- a/drivers/kvm/kvm.h > +++ b/drivers/kvm/kvm.h > @@ -236,6 +236,54 @@ struct kvm_pio_request { > int rep; > }; > > +struct kvm_io_device { > + unsigned long (*read)(struct kvm_io_device *this, > + gpa_t addr, > + unsigned long length); > + void (*write)(struct kvm_io_device *this, > + gpa_t addr, > + unsigned long length, > + unsigned long val); > length could be just an int. > + int (*in_range)(struct kvm_io_device *this, gpa_t addr); > Do you see any reason to have this as a callback and not a pair of gpas? > + > + void *private; > + struct list_head link; > Having these in an array would be much more efficient. A fixed size array of moderate size should suffice. > +}; > + > +/* It would be nice to use something smarter than a linear search, TBD... > + Thankfully we dont expect many devices to register (famous last words :), > + so until then it will suffice. At least its abstracted so we can change > + in one place. > + */ > /* * kernel comments look * like this */ > +struct kvm_io_bus { > + struct list_head list; > +}; > + > +static inline void > +kvm_io_bus_init(struct kvm_io_bus *bus) > function declarations on one line please. > +{ > + INIT_LIST_HEAD(&bus->list); > +} > + > +static inline struct kvm_io_device* > C style pointers: struct blah *somthing(); > +kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) > +{ > + struct kvm_io_device *pos = NULL; > + > + list_for_each_entry(pos, &bus->list, link) { > + if(pos->in_range(pos, addr)) > + return pos; > + } > space after if. avoid redundant {}. Have Documentaion/CodingStyle tattooed somewhere easily accessible. > + > + return NULL; > +} > + > +static inline void > +kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) > +{ > + list_add_tail(&dev->link, &bus->list); > +} > + > struct kvm_vcpu { > struct kvm *kvm; > union { > @@ -294,6 +342,7 @@ struct kvm_vcpu { > gpa_t mmio_phys_addr; > struct kvm_pio_request pio; > void *pio_data; > + struct kvm_io_bus mmio_bus; > > int sigset_active; > sigset_t sigset; > @@ -345,6 +394,7 @@ struct kvm { > unsigned long rmap_overflow; > struct list_head vm_list; > struct file *filp; > + struct kvm_io_bus mmio_bus; > The per-vcpu I/O bus is special in that it has exactly one component, and one which can change its address. I think we can special case it and just check for apic addresses explicitly when searching the bus. > }; > > struct kvm_stat { > diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c > index 4473174..da119c0 100644 > --- a/drivers/kvm/kvm_main.c > +++ b/drivers/kvm/kvm_main.c > @@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) > > spin_lock_init(&kvm->lock); > INIT_LIST_HEAD(&kvm->active_mmu_pages); > + kvm_io_bus_init(&kvm->mmio_bus); > for (i = 0; i < KVM_MAX_VCPUS; ++i) { > struct kvm_vcpu *vcpu = &kvm->vcpus[i]; > > @@ -302,6 +303,7 @@ static struct kvm *kvm_create_vm(void) > vcpu->kvm = kvm; > vcpu->mmu.root_hpa = INVALID_PAGE; > INIT_LIST_HEAD(&vcpu->free_pages); > + kvm_io_bus_init(&vcpu->mmio_bus); > spin_lock(&kvm_lock); > list_add(&kvm->vm_list, &vm_list); > spin_unlock(&kvm_lock); > @@ -1015,12 +1017,30 @@ static int emulator_write_std(unsigned long addr, > return X86EMUL_UNHANDLEABLE; > } > > +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, > + gpa_t addr) > +{ > + struct kvm_io_device *mmio_dev; > + > + /* First check the local CPU addresses */ > + mmio_dev = kvm_io_bus_find_dev(&vcpu->mmio_bus, addr); > + if(!mmio_dev) { > + /* Then check the entire VM */ > + mmio_dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); > + } > space, comment, braces > + > + return mmio_dev; > +} > + > static int emulator_read_emulated(unsigned long addr, > unsigned long *val, > unsigned int bytes, > struct x86_emulate_ctxt *ctxt) > { > struct kvm_vcpu *vcpu = ctxt->vcpu; > + gpa_t gpa; > + int i; > + struct kvm_io_device *mmio_dev; > > if (vcpu->mmio_read_completed) { > memcpy(val, vcpu->mmio_data, bytes); > @@ -1029,18 +1049,24 @@ static int emulator_read_emulated(unsigned long addr, > } else if (emulator_read_std(addr, val, bytes, ctxt) > == X86EMUL_CONTINUE) > return X86EMUL_CONTINUE; > - else { > - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); > > - if (gpa == UNMAPPED_GVA) > - return X86EMUL_PROPAGATE_FAULT; > - vcpu->mmio_needed = 1; > - vcpu->mmio_phys_addr = gpa; > - vcpu->mmio_size = bytes; > - vcpu->mmio_is_write = 0; > + gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); > + if (gpa == UNMAPPED_GVA) > + return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT; > The vcpu_printf() snuck in somehow. > > - return X86EMUL_UNHANDLEABLE; > + /* Is thie MMIO handled locally? */ > + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); > + if(mmio_dev) { > + *val = mmio_dev->read(mmio_dev, gpa, bytes); > + return X86EMUL_CONTINUE; > } > + > + vcpu->mmio_needed = 1; > + vcpu->mmio_phys_addr = gpa; > + vcpu->mmio_size = bytes; > + vcpu->mmio_is_write = 0; > + > + return X86EMUL_UNHANDLEABLE; > } > > static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, > @@ -1070,6 +1096,8 @@ static int emulator_write_emulated(unsigned long addr, > { > struct kvm_vcpu *vcpu = ctxt->vcpu; > gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); > + int i; > + struct kvm_io_device *mmio_dev; > > if (gpa == UNMAPPED_GVA) > return X86EMUL_PROPAGATE_FAULT; > @@ -1077,6 +1105,13 @@ static int emulator_write_emulated(unsigned long addr, > if (emulator_write_phys(vcpu, gpa, val, bytes)) > return X86EMUL_CONTINUE; > > + /* Is thie MMIO handled locally? */ > spelling > + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); > + if(mmio_dev) { > + mmio_dev->write(mmio_dev, gpa, bytes, val); > + return X86EMUL_CONTINUE; > + } > + > vcpu->mmio_needed = 1; > vcpu->mmio_phys_addr = gpa; > vcpu->mmio_size = bytes; > > > Please fix and *test*. Boot at least 32-bit Windows with ACPI HAL and 64-bit Linux, the more the better of course. -- Do not meddle in the internals of kernels, for they are subtle and quick to panic. ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <4614A03C.2050707-atKUWr5tajBWk0Htik3J/w@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4614A03C.2050707-atKUWr5tajBWk0Htik3J/w@public.gmane.org> @ 2007-04-05 7:29 ` Dor Laor 2007-04-05 14:58 ` Gregory Haskins 1 sibling, 0 replies; 17+ messages in thread From: Dor Laor @ 2007-04-05 7:29 UTC (permalink / raw) To: Avi Kivity, Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f >> Signed-off-by: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> >> >> --- >> drivers/kvm/kvm.h | 50 >+++++++++++++++++++++++++++++++++++++++++++++ >> drivers/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++++++++++--- >----- >> 2 files changed, 94 insertions(+), 9 deletions(-) >> >> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h >> index fceeb84..3334730 100644 >> --- a/drivers/kvm/kvm.h >> +++ b/drivers/kvm/kvm.h >> @@ -236,6 +236,54 @@ struct kvm_pio_request { >> int rep; >> }; >> >> +struct kvm_io_device { >> + unsigned long (*read)(struct kvm_io_device *this, >> + gpa_t addr, >> + unsigned long length); >> + void (*write)(struct kvm_io_device *this, >> + gpa_t addr, >> + unsigned long length, >> + unsigned long val); >> > >length could be just an int. > >> + int (*in_range)(struct kvm_io_device *this, gpa_t addr); >> > >Do you see any reason to have this as a callback and not a pair of gpas? This way a device can register for a range that contains holes, this would allow partial implementation in the kernel and part in user space. Might be usefull for some VT optimization support (haven't though of a specific one yet, but there's a good chance to be used.) > >> + >> + void *private; >> + struct list_head link; >> > >Having these in an array would be much more efficient. A fixed size >array of moderate size should suffice. You can take it from my old apic code. > ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4614A03C.2050707-atKUWr5tajBWk0Htik3J/w@public.gmane.org> 2007-04-05 7:29 ` Dor Laor @ 2007-04-05 14:58 ` Gregory Haskins [not found] ` <4614C844.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 1 sibling, 1 reply; 17+ messages in thread From: Gregory Haskins @ 2007-04-05 14:58 UTC (permalink / raw) To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f [-- Attachment #1: Type: text/plain, Size: 9590 bytes --] Hi Avi, I have addressed your comments and re-attached the fixed up patch. Most of the things you suggested I implemented, but a few I didnt so I will comment inline... >>> On Thu, Apr 5, 2007 at 3:07 AM, in message <4614A03C.2050707-atKUWr5tajBWk0Htik3J/w@public.gmane.org>, Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org> wrote: > Gregory Haskins wrote: >> The MMIO registration code has been broken out as a new patch from the > in- kernel APIC work with the following changes per Avi's request: >> >> 1) Supports dynamic registration >> 2) Uses gpa_t addresses >> 3) Explicit per- cpu mappings >> >> In addition, I have added the concept of distinct VCPU and VM level > registrations (where VCPU devices will eclipse competing VM registrations (if > any). This will be key down the road where LAPICs should use VCPU > registration, but IOAPICs should use VM level. >> >> Signed- off- by: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> >> >> --- >> drivers/kvm/kvm.h | 50 +++++++++++++++++++++++++++++++++++++++++++++ >> drivers/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++++++++++-------- >> 2 files changed, 94 insertions(+), 9 deletions(- ) >> >> diff -- git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h >> index fceeb84..3334730 100644 >> --- a/drivers/kvm/kvm.h >> +++ b/drivers/kvm/kvm.h >> @@ - 236,6 +236,54 @@ struct kvm_pio_request { >> int rep; >> }; >> >> +struct kvm_io_device { >> + unsigned long (*read)(struct kvm_io_device *this, >> + gpa_t addr, >> + unsigned long length); >> + void (*write)(struct kvm_io_device *this, >> + gpa_t addr, >> + unsigned long length, >> + unsigned long val); >> > > length could be just an int. Done > >> + int (*in_range)(struct kvm_io_device *this, gpa_t addr); >> > > Do you see any reason to have this as a callback and not a pair of gpas? I believe Dor replied earlier stating the reason of being able to support holes. Another reason that I can think of that I particularly like about this design (which I am not claiming as my own) is that the device can relocate (e.g. LAPIC base addr) without worrying about reprogramming the bus. > >> + >> + void *private; >> + struct list_head link; >> > > Having these in an array would be much more efficient. A fixed size > array of moderate size should suffice. Done. Maximum # devices is currently 6, because anything beyond that and I think we need to revisit the linear alg ;) > >> +}; >> + >> +/* It would be nice to use something smarter than a linear search, TBD... >> + Thankfully we dont expect many devices to register (famous last words > :), >> + so until then it will suffice. At least its abstracted so we can change >> + in one place. >> + */ >> > > /* > * kernel comments look > * like this > */ Done > >> +struct kvm_io_bus { >> + struct list_head list; >> +}; >> + >> +static inline void >> +kvm_io_bus_init(struct kvm_io_bus *bus) >> > > function declarations on one line please. Done (though I hate lines that runneth over 80 ;) > >> +{ >> + INIT_LIST_HEAD(&bus- >list); >> +} >> + >> +static inline struct kvm_io_device* >> > > C style pointers: > struct blah *somthing(); Done. > >> +kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) >> +{ >> + struct kvm_io_device *pos = NULL; >> + >> + list_for_each_entry(pos, &bus- >list, link) { >> + if(pos- >in_range(pos, addr)) >> + return pos; >> + } >> > > space after if. avoid redundant {}. Have Documentaion/CodingStyle > tattooed somewhere easily accessible. Done > >> + >> + return NULL; >> +} >> + >> +static inline void >> +kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) >> +{ >> + list_add_tail(&dev- >link, &bus- >list); >> +} >> + >> struct kvm_vcpu { >> struct kvm *kvm; >> union { >> @@ - 294,6 +342,7 @@ struct kvm_vcpu { >> gpa_t mmio_phys_addr; >> struct kvm_pio_request pio; >> void *pio_data; >> + struct kvm_io_bus mmio_bus; >> >> int sigset_active; >> sigset_t sigset; >> @@ - 345,6 +394,7 @@ struct kvm { >> unsigned long rmap_overflow; >> struct list_head vm_list; >> struct file *filp; >> + struct kvm_io_bus mmio_bus; >> > > The per- vcpu I/O bus is special in that it has exactly one component, > and one which can change its address. I think we can special case it > and just check for apic addresses explicitly when searching the bus. I am loath to make special cases if they can be avoided. I think performance wise a kvm_io_bus with one device wont be much different than having a special case check against apicbase. And the advantage that this buys us is future platforms (e.g. IA64?) may have more than one per-cpu MMIO address. I also realize that future platforms may be divergent from the entire in-kernel code base altogether, but I think the general and flexible way is better if there are no compromising tradeoffs, even if its only for example/reference. In this case I dont think there are any tradeoffs, so I left it. If you insist, I will pull it ;) > >> }; >> >> struct kvm_stat { >> diff -- git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c >> index 4473174..da119c0 100644 >> --- a/drivers/kvm/kvm_main.c >> +++ b/drivers/kvm/kvm_main.c >> @@ - 294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) >> >> spin_lock_init(&kvm- >lock); >> INIT_LIST_HEAD(&kvm- >active_mmu_pages); >> + kvm_io_bus_init(&kvm- >mmio_bus); >> for (i = 0; i < KVM_MAX_VCPUS; ++i) { >> struct kvm_vcpu *vcpu = &kvm- >vcpus[i]; >> >> @@ - 302,6 +303,7 @@ static struct kvm *kvm_create_vm(void) >> vcpu- >kvm = kvm; >> vcpu- >mmu.root_hpa = INVALID_PAGE; >> INIT_LIST_HEAD(&vcpu- >free_pages); >> + kvm_io_bus_init(&vcpu- >mmio_bus); >> spin_lock(&kvm_lock); >> list_add(&kvm- >vm_list, &vm_list); >> spin_unlock(&kvm_lock); >> @@ - 1015,12 +1017,30 @@ static int emulator_write_std(unsigned long addr, >> return X86EMUL_UNHANDLEABLE; >> } >> >> +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, >> + gpa_t addr) >> +{ >> + struct kvm_io_device *mmio_dev; >> + >> + /* First check the local CPU addresses */ >> + mmio_dev = kvm_io_bus_find_dev(&vcpu- >mmio_bus, addr); >> + if(!mmio_dev) { >> + /* Then check the entire VM */ >> + mmio_dev = kvm_io_bus_find_dev(&vcpu- >kvm- >mmio_bus, addr); >> + } >> > > space, comment, braces I believe I fixed this, but I am a little confused about what you were pointing out. The space is obvious. I believe you were pointing out that the braces weren't needed because its technically a single-line, and that the comment is fine. If I needed to change the comment too, let me know. > >> + >> + return mmio_dev; >> +} >> + >> static int emulator_read_emulated(unsigned long addr, >> unsigned long *val, >> unsigned int bytes, >> struct x86_emulate_ctxt *ctxt) >> { >> struct kvm_vcpu *vcpu = ctxt- >vcpu; >> + gpa_t gpa; >> + int i; >> + struct kvm_io_device *mmio_dev; >> >> if (vcpu- >mmio_read_completed) { >> memcpy(val, vcpu- >mmio_data, bytes); >> @@ - 1029,18 +1049,24 @@ static int emulator_read_emulated(unsigned long addr, >> } else if (emulator_read_std(addr, val, bytes, ctxt) >> == X86EMUL_CONTINUE) >> return X86EMUL_CONTINUE; >> - else { >> - gpa_t gpa = vcpu- >mmu.gva_to_gpa(vcpu, addr); >> >> - if (gpa == UNMAPPED_GVA) >> - return X86EMUL_PROPAGATE_FAULT; >> - vcpu- >mmio_needed = 1; >> - vcpu- >mmio_phys_addr = gpa; >> - vcpu- >mmio_size = bytes; >> - vcpu- >mmio_is_write = 0; >> + gpa = vcpu- >mmu.gva_to_gpa(vcpu, addr); >> + if (gpa == UNMAPPED_GVA) >> + return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT; >> > > > The vcpu_printf() snuck in somehow. Opps. Carry over from the apic branch merge. Fixed. > >> >> - return X86EMUL_UNHANDLEABLE; >> + /* Is thie MMIO handled locally? */ >> + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); >> + if(mmio_dev) { >> + *val = mmio_dev- >read(mmio_dev, gpa, bytes); >> + return X86EMUL_CONTINUE; >> } >> + >> + vcpu- >mmio_needed = 1; >> + vcpu- >mmio_phys_addr = gpa; >> + vcpu- >mmio_size = bytes; >> + vcpu- >mmio_is_write = 0; >> + >> + return X86EMUL_UNHANDLEABLE; >> } >> >> static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, >> @@ - 1070,6 +1096,8 @@ static int emulator_write_emulated(unsigned long addr, >> { >> struct kvm_vcpu *vcpu = ctxt- >vcpu; >> gpa_t gpa = vcpu- >mmu.gva_to_gpa(vcpu, addr); >> + int i; >> + struct kvm_io_device *mmio_dev; >> >> if (gpa == UNMAPPED_GVA) >> return X86EMUL_PROPAGATE_FAULT; >> @@ - 1077,6 +1105,13 @@ static int emulator_write_emulated(unsigned long addr, >> if (emulator_write_phys(vcpu, gpa, val, bytes)) >> return X86EMUL_CONTINUE; >> >> + /* Is thie MMIO handled locally? */ >> > > spelling Done > >> + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); >> + if(mmio_dev) { >> + mmio_dev- >write(mmio_dev, gpa, bytes, val); >> + return X86EMUL_CONTINUE; >> + } >> + >> vcpu- >mmio_needed = 1; >> vcpu- >mmio_phys_addr = gpa; >> vcpu- >mmio_size = bytes; >> >> >> > > Please fix and *test*. Boot at least 32- bit Windows with ACPI HAL and > 64- bit Linux, the more the better of course. I have confirmed that my 64 bit linux guest boots fine. I don't currently have any other guests. Careful review of the code leads me to believe this should be an inert change, so I wont go through the effort of finding an XP CD to install unless you insist ;) Regards, -Greg [-- Attachment #2: in-kernel-mmio.patch --] [-- Type: text/plain, Size: 5404 bytes --] diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index fceeb84..c1923df 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -236,6 +236,56 @@ struct kvm_pio_request { int rep; }; +struct kvm_io_device { + unsigned long (*read)(struct kvm_io_device *this, + gpa_t addr, + int length); + void (*write)(struct kvm_io_device *this, + gpa_t addr, + int length, + unsigned long val); + int (*in_range)(struct kvm_io_device *this, gpa_t addr); + + void *private; +}; + +/* It would be nice to use something smarter than a linear search, TBD... + * Thankfully we dont expect many devices to register (famous last words :), + * so until then it will suffice. At least its abstracted so we can change + * in one place. + */ +struct kvm_io_bus { + int dev_count; +#define NR_IOBUS_DEVS 6 + struct kvm_io_device *devs[NR_IOBUS_DEVS]; +}; + +static inline void kvm_io_bus_init(struct kvm_io_bus *bus) +{ + memset(bus, 0, sizeof(*bus)); +} + +static inline struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) +{ + int i; + + for(i=0; i<bus->dev_count; i++) { + struct kvm_io_device *pos = bus->devs[i]; + + if (pos->in_range(pos, addr)) + return pos; + } + + return NULL; +} + +static inline void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) +{ + BUG_ON(bus->dev_count >= (NR_IOBUS_DEVS-1)); + + bus->devs[bus->dev_count++] = dev; +} + struct kvm_vcpu { struct kvm *kvm; union { @@ -294,6 +344,7 @@ struct kvm_vcpu { gpa_t mmio_phys_addr; struct kvm_pio_request pio; void *pio_data; + struct kvm_io_bus mmio_bus; int sigset_active; sigset_t sigset; @@ -345,6 +396,7 @@ struct kvm { unsigned long rmap_overflow; struct list_head vm_list; struct file *filp; + struct kvm_io_bus mmio_bus; }; struct kvm_stat { diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 4473174..9ca0ad3 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) spin_lock_init(&kvm->lock); INIT_LIST_HEAD(&kvm->active_mmu_pages); + kvm_io_bus_init(&kvm->mmio_bus); for (i = 0; i < KVM_MAX_VCPUS; ++i) { struct kvm_vcpu *vcpu = &kvm->vcpus[i]; @@ -302,6 +303,7 @@ static struct kvm *kvm_create_vm(void) vcpu->kvm = kvm; vcpu->mmu.root_hpa = INVALID_PAGE; INIT_LIST_HEAD(&vcpu->free_pages); + kvm_io_bus_init(&vcpu->mmio_bus); spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); @@ -1015,12 +1017,28 @@ static int emulator_write_std(unsigned long addr, return X86EMUL_UNHANDLEABLE; } +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, + gpa_t addr) +{ + struct kvm_io_device *mmio_dev; + + /* First check the local CPU addresses */ + mmio_dev = kvm_io_bus_find_dev(&vcpu->mmio_bus, addr); + if (!mmio_dev) + /* Then check the entire VM */ + mmio_dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); + + return mmio_dev; +} + static int emulator_read_emulated(unsigned long addr, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_io_device *mmio_dev; + gpa_t gpa; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); @@ -1029,18 +1047,24 @@ static int emulator_read_emulated(unsigned long addr, } else if (emulator_read_std(addr, val, bytes, ctxt) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; - else { - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); - if (gpa == UNMAPPED_GVA) - return X86EMUL_PROPAGATE_FAULT; - vcpu->mmio_needed = 1; - vcpu->mmio_phys_addr = gpa; - vcpu->mmio_size = bytes; - vcpu->mmio_is_write = 0; + gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); + if (gpa == UNMAPPED_GVA) + return X86EMUL_PROPAGATE_FAULT; - return X86EMUL_UNHANDLEABLE; + /* Is this MMIO handled locally? */ + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); + if (mmio_dev) { + *val = mmio_dev->read(mmio_dev, gpa, bytes); + return X86EMUL_CONTINUE; } + + vcpu->mmio_needed = 1; + vcpu->mmio_phys_addr = gpa; + vcpu->mmio_size = bytes; + vcpu->mmio_is_write = 0; + + return X86EMUL_UNHANDLEABLE; } static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, @@ -1068,8 +1092,9 @@ static int emulator_write_emulated(unsigned long addr, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); + struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_io_device *mmio_dev; + gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; @@ -1077,6 +1102,13 @@ static int emulator_write_emulated(unsigned long addr, if (emulator_write_phys(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; + /* Is this MMIO handled locally? */ + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); + if (mmio_dev) { + mmio_dev->write(mmio_dev, gpa, bytes, val); + return X86EMUL_CONTINUE; + } + vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; [-- Attachment #3: Type: text/plain, Size: 345 bytes --] ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV [-- Attachment #4: Type: text/plain, Size: 186 bytes --] _______________________________________________ kvm-devel mailing list kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org https://lists.sourceforge.net/lists/listinfo/kvm-devel ^ permalink raw reply related [flat|nested] 17+ messages in thread
[parent not found: <4614C844.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4614C844.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> @ 2007-04-08 7:38 ` Avi Kivity 2007-04-08 8:49 ` Avi Kivity 1 sibling, 0 replies; 17+ messages in thread From: Avi Kivity @ 2007-04-08 7:38 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Gregory Haskins wrote: > >>> + int (*in_range)(struct kvm_io_device *this, gpa_t addr); >>> >>> >> Do you see any reason to have this as a callback and not a pair of gpas? >> > > I believe Dor replied earlier stating the reason of being able to support holes. Another reason that I can think of that I particularly like about this design (which I am not claiming as my own) is that the device can relocate (e.g. LAPIC base addr) without worrying about reprogramming the bus. > > I don't like either reasons much, but okay. We can address any performance concerns later (I doubt we'll see any with current hardware). >>> + >>> + void *private; >>> + struct list_head link; >>> >>> >> Having these in an array would be much more efficient. A fixed size >> array of moderate size should suffice. >> > > Done. Maximum # devices is currently 6, because anything beyond that and I think we need to revisit the linear alg ;) > > You'll be surprised. Processors are so efficient at processing arrays that you'll need a much longer list before a better algorithm starts to gain. Anyway 6 is as good a number as any. >> function declarations on one line please. >> > > Done (though I hate lines that runneth over 80 ;) > > A newline usually answers :) >> The per- vcpu I/O bus is special in that it has exactly one component, >> and one which can change its address. I think we can special case it >> and just check for apic addresses explicitly when searching the bus. >> > > I am loath to make special cases if they can be avoided. I think performance wise a kvm_io_bus with one device wont be much different than having a special case check against apicbase. And the advantage that this buys us is future platforms (e.g. IA64?) may have more than one per-cpu MMIO address. I also realize that future platforms may be divergent from the entire in-kernel code base altogether, but I think the general and flexible way is better if there are no compromising tradeoffs, even if its only for example/reference. In this case I dont think there are any tradeoffs, so I left it. If you insist, I will pull it ;) > > I think it unlikely that we'll see another local mmio device, it's so counter to the spirit of mmio (which is global by its nature). >>> >>> +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, >>> + gpa_t addr) >>> +{ >>> + struct kvm_io_device *mmio_dev; >>> + >>> + /* First check the local CPU addresses */ >>> + mmio_dev = kvm_io_bus_find_dev(&vcpu- >mmio_bus, addr); >>> + if(!mmio_dev) { >>> + /* Then check the entire VM */ >>> + mmio_dev = kvm_io_bus_find_dev(&vcpu- >kvm- >mmio_bus, addr); >>> + } >>> >>> >> space, comment, braces >> > > I believe I fixed this, but I am a little confused about what you were pointing out. The space is obvious. I believe you were pointing out that the braces weren't needed because its technically a single-line, and that the comment is fine. If I needed to change the comment too, let me know. > /* * comment */ Do re-read Documentation/CodingStyle. Coding practices die hard, and the kernel is especially sensitive to coding style. There are some instances of nonconforming comments in the updated patches too. >>> >>> >> Please fix and *test*. Boot at least 32- bit Windows with ACPI HAL and >> 64- bit Linux, the more the better of course. >> > > > I have confirmed that my 64 bit linux guest boots fine. I don't currently have any other guests. Careful review of the code leads me to believe this should be an inert change, so I wont go through the effort of finding an XP CD to install unless you insist ;) > > Please do test. Even if the changes have no effect, you might expose some latent bug. In any case you'll need Windows to do the apic stuff -- it's much more sensitive to apic problems than Linux. -- Do not meddle in the internals of kernels, for they are subtle and quick to panic. ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4614C844.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 2007-04-08 7:38 ` Avi Kivity @ 2007-04-08 8:49 ` Avi Kivity [not found] ` <4618AC94.3040700-atKUWr5tajBWk0Htik3J/w@public.gmane.org> 1 sibling, 1 reply; 17+ messages in thread From: Avi Kivity @ 2007-04-08 8:49 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f This has significant changes, so merits a review. Gregory Haskins wrote: > diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h > index fceeb84..c1923df 100644 > --- a/drivers/kvm/kvm.h > +++ b/drivers/kvm/kvm.h > @@ -236,6 +236,56 @@ struct kvm_pio_request { > int rep; > }; > > +struct kvm_io_device { > + unsigned long (*read)(struct kvm_io_device *this, > + gpa_t addr, > + int length); > + void (*write)(struct kvm_io_device *this, > + gpa_t addr, > + int length, > + unsigned long val); > + int (*in_range)(struct kvm_io_device *this, gpa_t addr); > + > + void *private; > +}; > + > +/* It would be nice to use something smarter than a linear search, TBD... > + * Thankfully we dont expect many devices to register (famous last words :), > + * so until then it will suffice. At least its abstracted so we can change > + * in one place. > + */ > /* * comment */ > +struct kvm_io_bus { > + int dev_count; > +#define NR_IOBUS_DEVS 6 > + struct kvm_io_device *devs[NR_IOBUS_DEVS]; > +}; > + > +static inline void kvm_io_bus_init(struct kvm_io_bus *bus) > +{ > + memset(bus, 0, sizeof(*bus)); > +} > + > +static inline struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) > +{ > + int i; > + > + for(i=0; i<bus->dev_count; i++) { > spaces, after for and around operators > + struct kvm_io_device *pos = bus->devs[i]; > + > + if (pos->in_range(pos, addr)) > + return pos; > + } > + > + return NULL; > +} > this is too long for an inline function. the others can also be made out-of-line. > + > +static inline void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) > +{ > + BUG_ON(bus->dev_count >= (NR_IOBUS_DEVS-1)); > + > + bus->devs[bus->dev_count++] = dev; > +} > + > I actually meant an array of objects, not pointers, but in the interest of reducing the amount of churn we'll change it if and when we see a problem there. > > +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, > + gpa_t addr) > "struct kvm_io_device *..." -- Do not meddle in the internals of kernels, for they are subtle and quick to panic. ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <4618AC94.3040700-atKUWr5tajBWk0Htik3J/w@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4618AC94.3040700-atKUWr5tajBWk0Htik3J/w@public.gmane.org> @ 2007-04-09 14:14 ` Gregory Haskins [not found] ` <461A03F3.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 0 siblings, 1 reply; 17+ messages in thread From: Gregory Haskins @ 2007-04-09 14:14 UTC (permalink / raw) To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Hi Avi, I believe I have incorporated all of the changes requested. Please find the result of that patch inline. Note that I finally understand what you were getting at with the array of objects thing. I didn't change it yet for the same reason that you mentioned: reduction of churn. However, now that I understand it, I see why you wanted it. Perhaps I will send a follow-on to this patch that uses your idea. But for now... --- KVM: Add support for in-kernel mmio handlers There is a near-term need for moving some of the emulation from userspace to the kernel (e.g. interrupt handling). This patch adds a construct for registering in-kernel MMIO handlers. The consumers of this interface will appear in a follow-on patch. Signed-off-by: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> --- drivers/kvm/kvm.h | 31 ++++++++++++++++++ drivers/kvm/kvm_main.c | 82 +++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 101 insertions(+), 12 deletions(-) diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index fceeb84..181099f 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -236,6 +236,36 @@ struct kvm_pio_request { int rep; }; +struct kvm_io_device { + unsigned long (*read)(struct kvm_io_device *this, + gpa_t addr, + int length); + void (*write)(struct kvm_io_device *this, + gpa_t addr, + int length, + unsigned long val); + int (*in_range)(struct kvm_io_device *this, gpa_t addr); + + void *private; +}; + +/* + * It would be nice to use something smarter than a linear search, TBD... + * Thankfully we dont expect many devices to register (famous last words :), + * so until then it will suffice. At least its abstracted so we can change + * in one place. + */ +struct kvm_io_bus { + int dev_count; +#define NR_IOBUS_DEVS 6 + struct kvm_io_device *devs[NR_IOBUS_DEVS]; +}; + +void kvm_io_bus_init(struct kvm_io_bus *bus); +struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); +void kvm_io_bus_register_dev(struct kvm_io_bus *bus, + struct kvm_io_device *dev); + struct kvm_vcpu { struct kvm *kvm; union { @@ -345,6 +375,7 @@ struct kvm { unsigned long rmap_overflow; struct list_head vm_list; struct file *filp; + struct kvm_io_bus mmio_bus; }; struct kvm_stat { diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 4473174..c3c0059 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void) spin_lock_init(&kvm->lock); INIT_LIST_HEAD(&kvm->active_mmu_pages); + kvm_io_bus_init(&kvm->mmio_bus); for (i = 0; i < KVM_MAX_VCPUS; ++i) { struct kvm_vcpu *vcpu = &kvm->vcpus[i]; @@ -1015,12 +1016,25 @@ static int emulator_write_std(unsigned long addr, return X86EMUL_UNHANDLEABLE; } +static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, + gpa_t addr) +{ + /* + * Note that its important to have this wrapper function because + * in the very near future we will be checking for MMIOs against + * the LAPIC as well as the general MMIO bus + */ + return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); +} + static int emulator_read_emulated(unsigned long addr, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_io_device *mmio_dev; + gpa_t gpa; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); @@ -1029,18 +1043,26 @@ static int emulator_read_emulated(unsigned long addr, } else if (emulator_read_std(addr, val, bytes, ctxt) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; - else { - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); - if (gpa == UNMAPPED_GVA) - return X86EMUL_PROPAGATE_FAULT; - vcpu->mmio_needed = 1; - vcpu->mmio_phys_addr = gpa; - vcpu->mmio_size = bytes; - vcpu->mmio_is_write = 0; + gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); + if (gpa == UNMAPPED_GVA) + return X86EMUL_PROPAGATE_FAULT; - return X86EMUL_UNHANDLEABLE; + /* + * Is this MMIO handled locally? + */ + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); + if (mmio_dev) { + *val = mmio_dev->read(mmio_dev, gpa, bytes); + return X86EMUL_CONTINUE; } + + vcpu->mmio_needed = 1; + vcpu->mmio_phys_addr = gpa; + vcpu->mmio_size = bytes; + vcpu->mmio_is_write = 0; + + return X86EMUL_UNHANDLEABLE; } static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, @@ -1068,8 +1090,9 @@ static int emulator_write_emulated(unsigned long addr, unsigned int bytes, struct x86_emulate_ctxt *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); + struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_io_device *mmio_dev; + gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; @@ -1077,6 +1100,15 @@ static int emulator_write_emulated(unsigned long addr, if (emulator_write_phys(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; + /* + * Is this MMIO handled locally? + */ + mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); + if (mmio_dev) { + mmio_dev->write(mmio_dev, gpa, bytes, val); + return X86EMUL_CONTINUE; + } + vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; @@ -2911,6 +2943,32 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, return NOTIFY_OK; } +void kvm_io_bus_init(struct kvm_io_bus *bus) +{ + memset(bus, 0, sizeof(*bus)); +} + +struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) +{ + int i; + + for (i = 0; i < bus->dev_count; i++) { + struct kvm_io_device *pos = bus->devs[i]; + + if (pos->in_range(pos, addr)) + return pos; + } + + return NULL; +} + +void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev) +{ + BUG_ON(bus->dev_count >= (NR_IOBUS_DEVS-1)); + + bus->devs[bus->dev_count++] = dev; +} + static struct notifier_block kvm_cpu_notifier = { .notifier_call = kvm_cpu_hotplug, .priority = 20, /* must be > scheduler priority */ ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply related [flat|nested] 17+ messages in thread
[parent not found: <461A03F3.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <461A03F3.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> @ 2007-04-10 7:56 ` Avi Kivity [not found] ` <461B4319.80608-atKUWr5tajBWk0Htik3J/w@public.gmane.org> 0 siblings, 1 reply; 17+ messages in thread From: Avi Kivity @ 2007-04-10 7:56 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Gregory Haskins wrote: > Hi Avi, > > I believe I have incorporated all of the changes requested. Please find the result of that patch inline. > > Note that I finally understand what you were getting at with the array of objects thing. I didn't change it yet for the same reason that you mentioned: reduction of churn. However, now that I understand it, I see why you wanted it. Perhaps I will send a follow-on to this patch that uses your idea. But for now... > I will try to be more explicit in the future. > --- > > KVM: Add support for in-kernel mmio handlers > > There is a near-term need for moving some of the emulation from userspace to > the kernel (e.g. interrupt handling). This patch adds a construct for > registering in-kernel MMIO handlers. The consumers of this interface will > appear in a follow-on patch. > Given that the first consumer is the local apic, it makes sense to add the vcpu-local bus first, no? -- Do not meddle in the internals of kernels, for they are subtle and quick to panic. ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <461B4319.80608-atKUWr5tajBWk0Htik3J/w@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <461B4319.80608-atKUWr5tajBWk0Htik3J/w@public.gmane.org> @ 2007-04-10 11:49 ` Gregory Haskins [not found] ` <461B4176.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 0 siblings, 1 reply; 17+ messages in thread From: Gregory Haskins @ 2007-04-10 11:49 UTC (permalink / raw) To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f >>> On Tue, Apr 10, 2007 at 3:56 AM, in message <461B4319.80608-atKUWr5tajBWk0Htik3J/w@public.gmane.org>, Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org> wrote: > Gregory Haskins wrote: >> >> Note that I finally understand what you were getting at with the array of > objects thing. I didn't change it yet for the same reason that you > mentioned: reduction of churn. However, now that I understand it, I see why > you wanted it. Perhaps I will send a follow- on to this patch that uses your > idea. But for now... >> > > I will try to be more explicit in the future. > Nah, I think you explained it fine, I was just being too obtuse ;) When you first brought it up, I figured there was some secret deficiency in the linked-list implementation compared to arrays that I wasn't aware of and didn't question it. I see now that what you were getting at is that an array of objects with the range criteria in-line can be made to fit in a handful of cache-lines. Having an indirection to find the kvm_io_device* and another to find the in_range() function do not have this benefit (which both the list and the pointer-array as I have implemented suffer from this). >> --- >> >> KVM: Add support for in- kernel mmio handlers >> >> There is a near- term need for moving some of the emulation from userspace to >> the kernel (e.g. interrupt handling). This patch adds a construct for >> registering in- kernel MMIO handlers. The consumers of this interface will >> appear in a follow- on patch. >> > > > Given that the first consumer is the local apic, it makes sense to add > the vcpu- local bus first, no? I'm confused. I thought you didn't like the vpcu-local bus? I pulled it based on your feedback :) Please advise. Regards, -Greg ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <461B4176.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <461B4176.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> @ 2007-04-10 12:02 ` Avi Kivity 0 siblings, 0 replies; 17+ messages in thread From: Avi Kivity @ 2007-04-10 12:02 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Gregory Haskins wrote: >> Given that the first consumer is the local apic, it makes sense to add >> the vcpu- local bus first, no? >> > > I'm confused. I thought you didn't like the vpcu-local bus? I pulled it based on your feedback :) Please advise. > > Sorry, my mistake. -- error compiling committee.c: too many arguments to function ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4613C73F.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 2007-04-04 22:48 ` Chris Wright 2007-04-05 7:07 ` Avi Kivity @ 2007-04-05 7:46 ` Avi Kivity [not found] ` <4614A973.6020102-atKUWr5tajBWk0Htik3J/w@public.gmane.org> 2 siblings, 1 reply; 17+ messages in thread From: Avi Kivity @ 2007-04-05 7:46 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Gregory Haskins wrote: > The MMIO registration code has been broken out as a new patch from the in-kernel APIC work with the following changes per Avi's request: > > 1) Supports dynamic registration > 2) Uses gpa_t addresses > 3) Explicit per-cpu mappings > > In addition, I have added the concept of distinct VCPU and VM level registrations (where VCPU devices will eclipse competing VM registrations (if any). This will be key down the road where LAPICs should use VCPU registration, but IOAPICs should use VM level. > > @@ -345,6 +394,7 @@ struct kvm { > unsigned long rmap_overflow; > struct list_head vm_list; > struct file *filp; > + struct kvm_io_bus mmio_bus; > }; > Additionally: The pit/pic are pio devices, not mmio, so they need their own bus. -- Do not meddle in the internals of kernels, for they are subtle and quick to panic. ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <4614A973.6020102-atKUWr5tajBWk0Htik3J/w@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4614A973.6020102-atKUWr5tajBWk0Htik3J/w@public.gmane.org> @ 2007-04-05 13:23 ` Gregory Haskins [not found] ` <4614B1FE.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> 0 siblings, 1 reply; 17+ messages in thread From: Gregory Haskins @ 2007-04-05 13:23 UTC (permalink / raw) To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f >>> On Thu, Apr 5, 2007 at 3:46 AM, in message <4614A973.6020102-atKUWr5tajBWk0Htik3J/w@public.gmane.org>, Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org> wrote: > > The pit/pic are pio devices, not mmio, so they need their own bus. Good morning! Yeah, I knew that but I haven't coded any support for anything but mmio yet. I explicitly chose the "kvm_io_XX" name instead of kvm_mmio_XXX in hopes that I can use the same structure for a pio_bus object in addition to the existing mmio_bus objects. Since I haven't really spent much time thinking about the pio side yet, im not sure if it will translate or require its own. Thanks for your feedback on the patch. I will make the changes you requested and re-submit (note quite a few of them have been fixed in the follow up email I sent after talking with Chris Wright). Regards, -Greg ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
[parent not found: <4614B1FE.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>]
* Re: [PATCH] Support for in-kernel mmio handlers [not found] ` <4614B1FE.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org> @ 2007-04-05 13:45 ` Avi Kivity 0 siblings, 0 replies; 17+ messages in thread From: Avi Kivity @ 2007-04-05 13:45 UTC (permalink / raw) To: Gregory Haskins; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f Gregory Haskins wrote: >> The pit/pic are pio devices, not mmio, so they need their own bus. >> > > Yeah, I knew that but I haven't coded any support for anything but mmio yet. I explicitly chose the "kvm_io_XX" name instead of kvm_mmio_XXX in hopes that I can use the same structure for a pio_bus object in addition to the existing mmio_bus objects. Since I haven't really spent much time thinking about the pio side yet, im not sure if it will translate or require its own. > It should work. A minor nit is that gpa_t is a bit wide for pio, but that does no real harm. -- error compiling committee.c: too many arguments to function ------------------------------------------------------------------------- Take Surveys. Earn Cash. Influence the Future of IT Join SourceForge.net's Techsay panel and you'll get the chance to share your opinions on IT & business topics through brief surveys-and earn cash http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV ^ permalink raw reply [flat|nested] 17+ messages in thread
end of thread, other threads:[~2007-04-10 12:02 UTC | newest]
Thread overview: 17+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-04-04 20:42 [PATCH] Support for in-kernel mmio handlers Gregory Haskins
[not found] ` <4613C73F.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
2007-04-04 22:48 ` Chris Wright
[not found] ` <20070404224806.GA15078-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org>
2007-04-04 23:04 ` Gregory Haskins
[not found] ` <20070405001021.GV10574@sequoia.sous-sol.org>
[not found] ` <20070405001021.GV10574-JyIX8gxvWYPr2PDY2+4mTGD2FQJk+8+b@public.gmane.org>
2007-04-05 0:21 ` Gregory Haskins
[not found] ` <4613E891.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
2007-04-05 0:49 ` Chris Wright
2007-04-05 7:07 ` Avi Kivity
[not found] ` <4614A03C.2050707-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-04-05 7:29 ` Dor Laor
2007-04-05 14:58 ` Gregory Haskins
[not found] ` <4614C844.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
2007-04-08 7:38 ` Avi Kivity
2007-04-08 8:49 ` Avi Kivity
[not found] ` <4618AC94.3040700-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-04-09 14:14 ` Gregory Haskins
[not found] ` <461A03F3.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
2007-04-10 7:56 ` Avi Kivity
[not found] ` <461B4319.80608-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-04-10 11:49 ` Gregory Haskins
[not found] ` <461B4176.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
2007-04-10 12:02 ` Avi Kivity
2007-04-05 7:46 ` Avi Kivity
[not found] ` <4614A973.6020102-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-04-05 13:23 ` Gregory Haskins
[not found] ` <4614B1FE.BA47.005A.0-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
2007-04-05 13:45 ` Avi Kivity
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox