* xen: __hvm_pci_intx_assert should check for gsis remapped onto pirqs
2011-08-31 10:24 xen: fix hvm_domain_use_pirq's behavior stefano.stabellini
@ 2011-08-31 10:24 ` stefano.stabellini
2011-08-31 10:27 ` Stefano Stabellini
2011-08-31 10:24 ` xen: get_free_pirq: make sure that the returned pirq is allocated stefano.stabellini
2011-08-31 10:26 ` xen: fix hvm_domain_use_pirq's behavior Stefano Stabellini
2 siblings, 1 reply; 5+ messages in thread
From: stefano.stabellini @ 2011-08-31 10:24 UTC (permalink / raw)
To: xen-devel; +Cc: Stefano Stabellini
If the isa irq corresponding to a particular gsi is disabled while the
gsi is enabled, __hvm_pci_intx_assert will always inject the gsi through
the violapic, even if the gsi has been remapped onto a pirq.
This patch makes sure that even in this case we inject the notification
appropriately.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
diff -r 5814da0753df xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c Tue Aug 30 11:56:22 2011 +0000
+++ b/xen/arch/x86/hvm/irq.c Tue Aug 30 11:56:37 2011 +0000
@@ -29,7 +29,7 @@
#include <asm/msi.h>
/* Must be called with hvm_domain->irq_lock hold */
-static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
+static void assert_gsi(struct domain *d, unsigned ioapic_gsi)
{
struct pirq *pirq =
pirq_info(d, domain_emuirq_to_pirq(d, ioapic_gsi));
@@ -40,6 +40,11 @@ static void assert_irq(struct domain *d,
return;
}
vioapic_irq_positive_edge(d, ioapic_gsi);
+}
+
+static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
+{
+ assert_gsi(d, ioapic_gsi);
vpic_irq_positive_edge(d, pic_irq);
}
@@ -66,7 +71,7 @@ static void __hvm_pci_intx_assert(
gsi = hvm_pci_intx_gsi(device, intx);
if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
- vioapic_irq_positive_edge(d, gsi);
+ assert_gsi(d, gsi);
link = hvm_pci_intx_link(device, intx);
isa_irq = hvm_irq->pci_link.route[link];
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: xen: fix hvm_domain_use_pirq's behavior
2011-08-31 10:24 xen: fix hvm_domain_use_pirq's behavior stefano.stabellini
2011-08-31 10:24 ` xen: __hvm_pci_intx_assert should check for gsis remapped onto pirqs stefano.stabellini
2011-08-31 10:24 ` xen: get_free_pirq: make sure that the returned pirq is allocated stefano.stabellini
@ 2011-08-31 10:26 ` Stefano Stabellini
2 siblings, 0 replies; 5+ messages in thread
From: Stefano Stabellini @ 2011-08-31 10:26 UTC (permalink / raw)
To: stefano.stabellini@eu.citrix.com; +Cc: xen-devel@lists.xensource.com
[-- Attachment #1: Type: text/plain, Size: 3244 bytes --]
On Wed, 31 Aug 2011, stefano.stabellini@eu.citrix.com wrote:
> hvm_domain_use_pirq should return true when the guest is using a certain
> pirq, no matter if the corresponding event channel is currently enabled
> or disabled. As an additional complication, qemu is going to request
> pirqs for passthrough devices even for Xen unaware HVM guests, so we
> need to wait for an event channel to be connected before considering the
> pirq of a passthrough device as "in use".
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
>
this patch should also be backported to 4.1, I am attaching the
backport to this email.
> diff -r 1515138fbd7b xen/arch/x86/irq.c
> --- a/xen/arch/x86/irq.c Wed Aug 24 09:33:10 2011 +0100
> +++ b/xen/arch/x86/irq.c Tue Aug 30 11:56:21 2011 +0000
> @@ -2024,6 +2024,5 @@ int unmap_domain_pirq_emuirq(struct doma
> bool_t hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq)
> {
> return is_hvm_domain(d) && pirq &&
> - pirq->arch.hvm.emuirq != IRQ_UNBOUND &&
> - pirq->evtchn != 0;
> + pirq->arch.hvm.emuirq != IRQ_UNBOUND;
> }
> diff -r 1515138fbd7b xen/arch/x86/physdev.c
> --- a/xen/arch/x86/physdev.c Wed Aug 24 09:33:10 2011 +0100
> +++ b/xen/arch/x86/physdev.c Tue Aug 30 11:56:21 2011 +0000
> @@ -196,9 +196,6 @@ int physdev_map_pirq(domid_t domid, int
> if ( ret == 0 )
> *pirq_p = pirq;
>
> - if ( !ret && is_hvm_domain(d) )
> - map_domain_emuirq_pirq(d, pirq, IRQ_PT);
> -
> done:
> spin_unlock(&d->event_lock);
> spin_unlock(&pcidevs_lock);
> @@ -271,7 +268,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
> v->domain->arch.pv_domain.pirq_eoi_map )
> evtchn_unmask(pirq->evtchn);
> if ( !is_hvm_domain(v->domain) ||
> - pirq->arch.hvm.emuirq == IRQ_PT )
> + domain_pirq_to_irq(v->domain, eoi.irq) > 0 )
> pirq_guest_eoi(pirq);
> spin_unlock(&v->domain->event_lock);
> ret = 0;
> @@ -331,7 +328,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
> break;
> irq_status_query.flags = 0;
> if ( is_hvm_domain(v->domain) &&
> - domain_pirq_to_emuirq(v->domain, irq) != IRQ_PT )
> + domain_pirq_to_irq(v->domain, irq) <= 0 )
> {
> ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
> break;
> diff -r 1515138fbd7b xen/common/event_channel.c
> --- a/xen/common/event_channel.c Wed Aug 24 09:33:10 2011 +0100
> +++ b/xen/common/event_channel.c Tue Aug 30 11:56:21 2011 +0000
> @@ -366,6 +366,9 @@ static long evtchn_bind_pirq(evtchn_bind
>
> bind->port = port;
>
> + if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 )
> + map_domain_emuirq_pirq(d, pirq, IRQ_PT);
> +
> out:
> spin_unlock(&d->event_lock);
>
> @@ -419,6 +422,8 @@ static long __evtchn_close(struct domain
> pirq->evtchn = 0;
> pirq_cleanup_check(pirq, d1);
> unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
> + if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, pirq->pirq) > 0 )
> + unmap_domain_pirq_emuirq(d1, pirq->pirq);
> break;
> }
>
>
[-- Attachment #2: Type: text/plain, Size: 2587 bytes --]
diff -r 007a40379000 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Thu Aug 25 15:36:14 2011 +0100
+++ b/xen/arch/x86/irq.c Tue Aug 30 13:59:32 2011 +0000
@@ -1876,7 +1876,7 @@ int hvm_domain_use_pirq(struct domain *d
return 0;
emuirq = domain_pirq_to_emuirq(d, pirq);
- if ( emuirq != IRQ_UNBOUND && d->pirq_to_evtchn[pirq] != 0 )
+ if ( emuirq != IRQ_UNBOUND )
return 1;
else
return 0;
diff -r 007a40379000 xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Thu Aug 25 15:36:14 2011 +0100
+++ b/xen/arch/x86/physdev.c Tue Aug 30 13:59:32 2011 +0000
@@ -202,9 +202,6 @@ static int physdev_map_pirq(struct physd
if ( ret == 0 )
map->pirq = pirq;
- if ( !ret && is_hvm_domain(d) )
- map_domain_emuirq_pirq(d, pirq, IRQ_PT);
-
done:
spin_unlock(&d->event_lock);
spin_unlock(&pcidevs_lock);
@@ -267,7 +264,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( v->domain->arch.pirq_eoi_map )
evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
if ( !is_hvm_domain(v->domain) ||
- domain_pirq_to_emuirq(v->domain, eoi.irq) == IRQ_PT )
+ domain_pirq_to_irq(v->domain, eoi.irq) > 0 )
ret = pirq_guest_eoi(v->domain, eoi.irq);
else
ret = 0;
@@ -326,7 +323,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
irq_status_query.flags = 0;
if ( is_hvm_domain(v->domain) &&
- domain_pirq_to_emuirq(v->domain, irq) != IRQ_PT )
+ domain_pirq_to_irq(v->domain, irq) <= 0 )
{
ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
break;
diff -r 007a40379000 xen/common/event_channel.c
--- a/xen/common/event_channel.c Thu Aug 25 15:36:14 2011 +0100
+++ b/xen/common/event_channel.c Tue Aug 30 13:59:32 2011 +0000
@@ -361,6 +361,9 @@ static long evtchn_bind_pirq(evtchn_bind
bind->port = port;
+ if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 )
+ map_domain_emuirq_pirq(d, pirq, IRQ_PT);
+
out:
spin_unlock(&d->event_lock);
@@ -409,6 +412,8 @@ static long __evtchn_close(struct domain
pirq_guest_unbind(d1, chn1->u.pirq.irq);
d1->pirq_to_evtchn[chn1->u.pirq.irq] = 0;
unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
+ if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, chn1->u.pirq.irq) > 0 )
+ unmap_domain_pirq_emuirq(d1, chn1->u.pirq.irq);
break;
case ECS_VIRQ:
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread