From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Jan Beulich" Subject: [PATCH 6/6] passthrough: use domain pirq as index of struct hvm_irq_dpci's hvm_timer array Date: Tue, 05 Apr 2011 09:24:26 +0100 Message-ID: <4D9AEDDA0200007800039F5B@vpn.id2.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="=__PartC4E81EAA.0__=" Return-path: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: "xen-devel@lists.xensource.com" List-Id: xen-devel@lists.xenproject.org This is a MIME message. If you are reading this text, you may want to consider changing to a mail reader or gateway that understands how to properly handle MIME multipart messages. --=__PartC4E81EAA.0__= Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable Content-Disposition: inline Since d->nr_pirqs is guaranteed to be not larger than nr_irqs, indexing arrays by the former ought to be preferred. In the case given, the indices so far had to be computed specially in a number of cases, whereas the indexes use now are all readily available. This opens the possibility to fold the ->mirq[] and ->hvm_timer[] members of struct hvm_irq_dpci into a single array, possibly with some members overlayed in a union to reduce size (see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg02006.html). Such space saving wouldn't, however, suffice to generally get the respective allocation sizes here to below PAGE_SIZE, not even when converting the array of structures into an array of pointers to structures. Whether a multi-level lookup mechanism would make sense here is questionable, as it can be expected that for other than Dom0 (which isn't hvm, and hence shouldn't use these data structures - see http://lists.xensource.com/archives/html/xen-devel/2011-03/msg02004.html) only very few entries would commonly be used here. An obvious alternative would be to use rb or radix trees (both currently only used in tmem). Signed-off-by: Jan Beulich --- a/xen/drivers/passthrough/io.c +++ b/xen/drivers/passthrough/io.c @@ -135,7 +135,7 @@ int pt_irq_create_bind_vtd( BITS_TO_LONGS(d->nr_pirqs)= ); hvm_irq_dpci->mapping =3D xmalloc_array(unsigned long, BITS_TO_LONGS(d->nr_pirqs));= - hvm_irq_dpci->hvm_timer =3D xmalloc_array(struct timer, nr_irqs); + hvm_irq_dpci->hvm_timer =3D xmalloc_array(struct timer, d->nr_pirq= s); if ( !hvm_irq_dpci->mirq || !hvm_irq_dpci->dirq_mask || !hvm_irq_dpci->mapping || @@ -150,7 +150,7 @@ int pt_irq_create_bind_vtd( bitmap_zero(hvm_irq_dpci->dirq_mask, d->nr_pirqs); bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs); memset(hvm_irq_dpci->hvm_timer, 0, - nr_irqs * sizeof(*hvm_irq_dpci->hvm_timer)); + d->nr_pirqs * sizeof(*hvm_irq_dpci->hvm_timer)); for ( int i =3D 0; i < d->nr_pirqs; i++ ) { INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list); hvm_irq_dpci->mirq[i].gmsi.dest_vcpu_id =3D -1; @@ -258,7 +258,6 @@ int pt_irq_create_bind_vtd( /* Bind the same mirq once in the same domain */ if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping)) { - unsigned int irq =3D domain_pirq_to_irq(d, machine_gsi); unsigned int share; =20 hvm_irq_dpci->mirq[machine_gsi].dom =3D d; @@ -278,14 +277,14 @@ int pt_irq_create_bind_vtd( =20 /* Init timer before binding */ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) = ) - init_timer(&hvm_irq_dpci->hvm_timer[irq], + init_timer(&hvm_irq_dpci->hvm_timer[machine_gsi], pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gs= i], 0); /* Deal with gsi for legacy devices */ rc =3D pirq_guest_bind(d->vcpu[0], machine_gsi, share); if ( unlikely(rc) ) { if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].fla= gs) ) - kill_timer(&hvm_irq_dpci->hvm_timer[irq]); + kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]); hvm_irq_dpci->mirq[machine_gsi].dom =3D NULL; clear_bit(machine_gsi, hvm_irq_dpci->mapping); list_del(&girq->list); @@ -374,7 +373,7 @@ int pt_irq_destroy_bind_vtd( pirq_guest_unbind(d, machine_gsi); msixtbl_pt_unregister(d, machine_gsi); if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) = ) - kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, = machine_gsi)]); + kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]); hvm_irq_dpci->mirq[machine_gsi].dom =3D NULL; hvm_irq_dpci->mirq[machine_gsi].flags =3D 0; clear_bit(machine_gsi, hvm_irq_dpci->mapping); @@ -516,7 +515,7 @@ static void hvm_dirq_assist(unsigned lon * will never be deasserted. */ if ( pt_irq_need_timer(hvm_irq_dpci->mirq[pirq].flags) ) - set_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, = pirq)], + set_timer(&hvm_irq_dpci->hvm_timer[pirq], NOW() + PT_IRQ_TIME_OUT); spin_unlock(&d->event_lock); } @@ -544,7 +543,7 @@ static void __hvm_dpci_eoi(struct domain ! pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) ) return; =20 - stop_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)= ]); + stop_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]); pirq_guest_eoi(d, machine_gsi); } =20 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -262,7 +262,7 @@ static void pci_clean_dpci_irqs(struct d pirq_guest_unbind(d, i); =20 if ( pt_irq_need_timer(hvm_irq_dpci->mirq[i].flags) ) - kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, = i)]); + kill_timer(&hvm_irq_dpci->hvm_timer[i]); =20 list_for_each_safe ( digl_list, tmp, &hvm_irq_dpci->mirq[i].digl_list ) --- a/xen/drivers/passthrough/vtd/x86/vtd.c +++ b/xen/drivers/passthrough/vtd/x86/vtd.c @@ -101,7 +101,7 @@ void hvm_dpci_isairq_eoi(struct domain * hvm_pci_intx_deassert(d, digl->device, digl->intx); if ( --dpci->mirq[i].pending =3D=3D 0 ) { - stop_timer(&dpci->hvm_timer[domain_pirq_to_irq(d, = i)]); + stop_timer(&dpci->hvm_timer[i]); pirq_guest_eoi(d, i); } } --=__PartC4E81EAA.0__= Content-Type: text/plain; name="pt-hvm_timer-pirq-indexed.patch" Content-Transfer-Encoding: quoted-printable Content-Disposition: attachment; filename="pt-hvm_timer-pirq-indexed.patch" Since d->nr_pirqs is guaranteed to be not larger than nr_irqs, indexing=0Aa= rrays by the former ought to be preferred. In the case given, the = indices=0Aso far had to be computed specially in a number of cases, = whereas the=0Aindexes use now are all readily available.=0A=0AThis opens = the possibility to fold the ->mirq[] and ->hvm_timer[] members=0Aof struct = hvm_irq_dpci into a single array, possibly with some members=0Aoverlayed = in a union to reduce size (see=0Ahttp://lists.xensource.com/archives/html/x= en-devel/2011-03/msg02006.html).=0ASuch space saving wouldn't, however, = suffice to generally get the=0Arespective allocation sizes here to below = PAGE_SIZE, not even when=0Aconverting the array of structures into an = array of pointers to=0Astructures. Whether a multi-level lookup mechanism = would make sense here=0Ais questionable, as it can be expected that for = other than Dom0 (which=0Aisn't hvm, and hence shouldn't use these data = structures - see=0Ahttp://lists.xensource.com/archives/html/xen-devel/2011-= 03/msg02004.html)=0Aonly very few entries would commonly be used here. An = obvious alternative=0Awould be to use rb or radix trees (both currently = only used in tmem).=0A=0ASigned-off-by: Jan Beulich = =0A=0A--- a/xen/drivers/passthrough/io.c=0A+++ b/xen/drivers/passthrough/io= .c=0A@@ -135,7 +135,7 @@ int pt_irq_create_bind_vtd(=0A = BITS_TO_LONGS(d->nr_pirqs));=0A = hvm_irq_dpci->mapping =3D xmalloc_array(unsigned long,=0A = BITS_TO_LONGS(d->nr_pirqs));=0A- = hvm_irq_dpci->hvm_timer =3D xmalloc_array(struct timer, nr_irqs);=0A+ = hvm_irq_dpci->hvm_timer =3D xmalloc_array(struct timer, d->nr_pirqs);=0A = if ( !hvm_irq_dpci->mirq ||=0A !hvm_irq_dpci->dirq_mas= k ||=0A !hvm_irq_dpci->mapping ||=0A@@ -150,7 +150,7 @@ int = pt_irq_create_bind_vtd(=0A bitmap_zero(hvm_irq_dpci->dirq_mask, = d->nr_pirqs);=0A bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs);= =0A memset(hvm_irq_dpci->hvm_timer, 0,=0A- nr_irqs = * sizeof(*hvm_irq_dpci->hvm_timer));=0A+ d->nr_pirqs * = sizeof(*hvm_irq_dpci->hvm_timer));=0A for ( int i =3D 0; i < = d->nr_pirqs; i++ ) {=0A INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].d= igl_list);=0A hvm_irq_dpci->mirq[i].gmsi.dest_vcpu_id =3D = -1;=0A@@ -258,7 +258,6 @@ int pt_irq_create_bind_vtd(=0A /* Bind = the same mirq once in the same domain */=0A if ( !test_and_set_bit(= machine_gsi, hvm_irq_dpci->mapping))=0A {=0A- unsigned = int irq =3D domain_pirq_to_irq(d, machine_gsi);=0A unsigned = int share;=0A =0A hvm_irq_dpci->mirq[machine_gsi].dom =3D = d;=0A@@ -278,14 +277,14 @@ int pt_irq_create_bind_vtd(=0A =0A = /* Init timer before binding */=0A if ( pt_irq_need_timer(hvm_i= rq_dpci->mirq[machine_gsi].flags) )=0A- init_timer(&hvm_irq_= dpci->hvm_timer[irq],=0A+ init_timer(&hvm_irq_dpci->hvm_time= r[machine_gsi],=0A pt_irq_time_out, &hvm_irq_dpc= i->mirq[machine_gsi], 0);=0A /* Deal with gsi for legacy = devices */=0A rc =3D pirq_guest_bind(d->vcpu[0], machine_gsi, = share);=0A if ( unlikely(rc) )=0A {=0A = if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )=0A- = kill_timer(&hvm_irq_dpci->hvm_timer[irq]);=0A+ = kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]);=0A = hvm_irq_dpci->mirq[machine_gsi].dom =3D NULL;=0A = clear_bit(machine_gsi, hvm_irq_dpci->mapping);=0A = list_del(&girq->list);=0A@@ -374,7 +373,7 @@ int pt_irq_destroy_bind_vtd(= =0A pirq_guest_unbind(d, machine_gsi);=0A = msixtbl_pt_unregister(d, machine_gsi);=0A if ( pt_irq_need_time= r(hvm_irq_dpci->mirq[machine_gsi].flags) )=0A- kill_timer(&h= vm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)]);=0A+ = kill_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]);=0A = hvm_irq_dpci->mirq[machine_gsi].dom =3D NULL;=0A hvm_irq_dpci= ->mirq[machine_gsi].flags =3D 0;=0A clear_bit(machine_gsi, = hvm_irq_dpci->mapping);=0A@@ -516,7 +515,7 @@ static void hvm_dirq_assist(u= nsigned lon=0A * will never be deasserted.=0A */=0A = if ( pt_irq_need_timer(hvm_irq_dpci->mirq[pirq].flags) )=0A- = set_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, pirq)],=0A+ = set_timer(&hvm_irq_dpci->hvm_timer[pirq],=0A = NOW() + PT_IRQ_TIME_OUT);=0A spin_unlock(&d->event_lock);=0A = }=0A@@ -544,7 +543,7 @@ static void __hvm_dpci_eoi(struct domain=0A = ! pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )=0A = return;=0A =0A- stop_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d= , machine_gsi)]);=0A+ stop_timer(&hvm_irq_dpci->hvm_timer[machine_gsi]);= =0A pirq_guest_eoi(d, machine_gsi);=0A }=0A =0A--- a/xen/drivers/passth= rough/pci.c=0A+++ b/xen/drivers/passthrough/pci.c=0A@@ -262,7 +262,7 @@ = static void pci_clean_dpci_irqs(struct d=0A pirq_guest_unbind(d= , i);=0A =0A if ( pt_irq_need_timer(hvm_irq_dpci->mirq[i].flags= ) )=0A- kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_i= rq(d, i)]);=0A+ kill_timer(&hvm_irq_dpci->hvm_timer[i]);=0A = =0A list_for_each_safe ( digl_list, tmp,=0A = &hvm_irq_dpci->mirq[i].digl_list )=0A--- a/xen/drivers/passth= rough/vtd/x86/vtd.c=0A+++ b/xen/drivers/passthrough/vtd/x86/vtd.c=0A@@ = -101,7 +101,7 @@ void hvm_dpci_isairq_eoi(struct domain *=0A = hvm_pci_intx_deassert(d, digl->device, digl->intx);=0A = if ( --dpci->mirq[i].pending =3D=3D 0 )=0A {=0A- = stop_timer(&dpci->hvm_timer[domain_pirq_to_irq(d, i)]);=0A+ = stop_timer(&dpci->hvm_timer[i]);=0A = pirq_guest_eoi(d, i);=0A }=0A }=0A --=__PartC4E81EAA.0__= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel --=__PartC4E81EAA.0__=--