From mboxrd@z Thu Jan 1 00:00:00 1970 From: Gleb Natapov Subject: Re: [PATCH 1/4] Move irq routing data structure to rcu locking Date: Mon, 13 Jul 2009 16:15:34 +0300 Message-ID: <20090713131534.GJ28046@redhat.com> References: <1247400233-24243-1-git-send-email-gleb@redhat.com> <1247400233-24243-2-git-send-email-gleb@redhat.com> <4A5B302D.2040802@novell.com> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Cc: avi@redhat.com, kvm@vger.kernel.org To: Gregory Haskins Return-path: Received: from mx2.redhat.com ([66.187.237.31]:43583 "EHLO mx2.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755773AbZGMNPj (ORCPT ); Mon, 13 Jul 2009 09:15:39 -0400 Content-Disposition: inline In-Reply-To: <4A5B302D.2040802@novell.com> Sender: kvm-owner@vger.kernel.org List-ID: On Mon, Jul 13, 2009 at 09:01:33AM -0400, Gregory Haskins wrote: > Gleb Natapov wrote: > > Signed-off-by: Gleb Natapov > > --- > > include/linux/kvm_host.h | 2 +- > > virt/kvm/irq_comm.c | 55 +++++++++++++++++++++------------------------- > > virt/kvm/kvm_main.c | 1 - > > 3 files changed, 26 insertions(+), 32 deletions(-) > > > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > > index f54a0d3..6756b3e 100644 > > --- a/include/linux/kvm_host.h > > +++ b/include/linux/kvm_host.h > > @@ -161,7 +161,7 @@ struct kvm { > > > > struct mutex irq_lock; > > #ifdef CONFIG_HAVE_KVM_IRQCHIP > > - struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ > > + struct kvm_kernel_irq_routing_entry *irq_routing; > > struct hlist_head mask_notifier_list; > > #endif > > > > diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c > > index 7af18b8..b2fa3f6 100644 > > --- a/virt/kvm/irq_comm.c > > +++ b/virt/kvm/irq_comm.c > > @@ -148,7 +148,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) > > * IOAPIC. So set the bit in both. The guest will ignore > > * writes to the unused one. > > */ > > - list_for_each_entry(e, &kvm->irq_routing, link) > > + rcu_read_lock(); > > + for (e = rcu_dereference(kvm->irq_routing); e && e->set; e++) { > > > > Hi Gleb, > I haven't had a chance to fully digest and review these patches, but > one thing I did notice is that you seem to be converting from a list to > an open-coded structure. I am just curious why you made this design > decision instead of using the RCU variant of list? > It is not scary "open-coded structure" it's just an array :) As I responded to Michael the idea is to move msis out of irq_routing, make the array much smaller and either use gsi as an index in the array or use hash table instead looping over all entries. For now I can justify array as more cache friendly data structure as we scan it linearly. > Regards, > -Greg > > > if (e->gsi == irq) { > > int r = e->set(e, kvm, sig_level); > > if (r < 0) > > @@ -156,6 +157,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) > > > > ret = r + ((ret < 0) ? 0 : ret); > > } > > + } > > + rcu_read_unlock(); > > return ret; > > } > > > > @@ -168,12 +171,15 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) > > > > trace_kvm_ack_irq(irqchip, pin); > > > > - list_for_each_entry(e, &kvm->irq_routing, link) > > + rcu_read_lock(); > > + for (e = rcu_dereference(kvm->irq_routing); e && e->set; e++) { > > if (e->irqchip.irqchip == irqchip && > > e->irqchip.pin == pin) { > > gsi = e->gsi; > > break; > > } > > + } > > + rcu_read_unlock(); > > > > hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link) > > if (kian->gsi == gsi) > > @@ -264,19 +270,11 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) > > kimn->func(kimn, mask); > > } > > > > -static void __kvm_free_irq_routing(struct list_head *irq_routing) > > -{ > > - struct kvm_kernel_irq_routing_entry *e, *n; > > - > > - list_for_each_entry_safe(e, n, irq_routing, link) > > - kfree(e); > > -} > > - > > void kvm_free_irq_routing(struct kvm *kvm) > > { > > - mutex_lock(&kvm->irq_lock); > > - __kvm_free_irq_routing(&kvm->irq_routing); > > - mutex_unlock(&kvm->irq_lock); > > + /* Called only during vm destruction. Nobody can use the pointer > > + at this stage */ > > + kfree(kvm->irq_routing); > > } > > > > static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, > > @@ -326,43 +324,40 @@ int kvm_set_irq_routing(struct kvm *kvm, > > unsigned nr, > > unsigned flags) > > { > > - struct list_head irq_list = LIST_HEAD_INIT(irq_list); > > - struct list_head tmp = LIST_HEAD_INIT(tmp); > > - struct kvm_kernel_irq_routing_entry *e = NULL; > > + struct kvm_kernel_irq_routing_entry *new, *old; > > unsigned i; > > int r; > > > > + /* last element is left zeroed and indicates the end of the array */ > > + new = kzalloc(sizeof(*new) * (nr + 1), GFP_KERNEL); > > + > > + if (!new) > > + return -ENOMEM; > > + > > for (i = 0; i < nr; ++i) { > > r = -EINVAL; > > if (ue->gsi >= KVM_MAX_IRQ_ROUTES) > > goto out; > > if (ue->flags) > > goto out; > > - r = -ENOMEM; > > - e = kzalloc(sizeof(*e), GFP_KERNEL); > > - if (!e) > > - goto out; > > - r = setup_routing_entry(e, ue); > > + r = setup_routing_entry(new + i, ue); > > if (r) > > goto out; > > ++ue; > > - list_add(&e->link, &irq_list); > > - e = NULL; > > } > > > > mutex_lock(&kvm->irq_lock); > > - list_splice(&kvm->irq_routing, &tmp); > > - INIT_LIST_HEAD(&kvm->irq_routing); > > - list_splice(&irq_list, &kvm->irq_routing); > > - INIT_LIST_HEAD(&irq_list); > > - list_splice(&tmp, &irq_list); > > + old = kvm->irq_routing; > > + rcu_assign_pointer(kvm->irq_routing, new); > > mutex_unlock(&kvm->irq_lock); > > > > + synchronize_rcu(); > > + > > r = 0; > > + new = old; > > > > out: > > - kfree(e); > > - __kvm_free_irq_routing(&irq_list); > > + kfree(new); > > return r; > > } > > > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > > index cf20dc1..24013b4 100644 > > --- a/virt/kvm/kvm_main.c > > +++ b/virt/kvm/kvm_main.c > > @@ -945,7 +945,6 @@ static struct kvm *kvm_create_vm(void) > > if (IS_ERR(kvm)) > > goto out; > > #ifdef CONFIG_HAVE_KVM_IRQCHIP > > - INIT_LIST_HEAD(&kvm->irq_routing); > > INIT_HLIST_HEAD(&kvm->mask_notifier_list); > > #endif > > > > > > -- Gleb.