From: David Vrabel <david.vrabel@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: David Vrabel <david.vrabel@citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Ian Campbell <ian.campbell@citrix.com>
Subject: [PATCHv1 1/2] passthrough: use per-interrupt lock when injecting an interrupt
Date: Fri, 23 Oct 2015 12:05:21 +0100 [thread overview]
Message-ID: <1445598322-22154-2-git-send-email-david.vrabel@citrix.com> (raw)
In-Reply-To: <1445598322-22154-1-git-send-email-david.vrabel@citrix.com>
The use of the per-domain event_lock in hvm_dirq_assist() does not scale
with many VCPUs or interrupts.
Add a per-interrupt lock to reduce contention. When a interrupt for a
passthrough device is being setup or teared down, we must take both
the event_lock and this new lock.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
xen/drivers/passthrough/io.c | 34 +++++++++++++++++++++++-----------
xen/include/xen/hvm/irq.h | 1 +
2 files changed, 24 insertions(+), 11 deletions(-)
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index bda9374..7c86c20 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -106,7 +106,7 @@ static void pt_pirq_softirq_reset(struct hvm_pirq_dpci *pirq_dpci)
{
struct domain *d = pirq_dpci->dom;
- ASSERT(spin_is_locked(&d->event_lock));
+ ASSERT(spin_is_locked(&pirq_dpci->lock));
switch ( cmpxchg(&pirq_dpci->state, 1 << STATE_SCHED, 0) )
{
@@ -209,7 +209,6 @@ int pt_irq_create_bind(
if ( pirq < 0 || pirq >= d->nr_pirqs )
return -EINVAL;
- restart:
spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
@@ -237,6 +236,8 @@ int pt_irq_create_bind(
}
pirq_dpci = pirq_dpci(info);
+ spin_lock(&pirq_dpci->lock);
+
/*
* A crude 'while' loop with us dropping the spinlock and giving
* the softirq_dpci a chance to run.
@@ -245,11 +246,11 @@ int pt_irq_create_bind(
* would have spun forever and would do the same thing (wait to flush out
* outstanding hvm_dirq_assist calls.
*/
- if ( pt_pirq_softirq_active(pirq_dpci) )
+ while ( pt_pirq_softirq_active(pirq_dpci) )
{
- spin_unlock(&d->event_lock);
+ spin_unlock(&pirq_dpci->lock);
cpu_relax();
- goto restart;
+ spin_lock(&pirq_dpci->lock);
}
switch ( pt_irq_bind->irq_type )
@@ -301,6 +302,7 @@ int pt_irq_create_bind(
pirq_dpci->dom = NULL;
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
return rc;
}
@@ -311,6 +313,7 @@ int pt_irq_create_bind(
if ( (pirq_dpci->flags & mask) != mask )
{
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
return -EBUSY;
}
@@ -331,6 +334,7 @@ int pt_irq_create_bind(
dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
if ( dest_vcpu_id >= 0 )
hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
@@ -351,6 +355,7 @@ int pt_irq_create_bind(
if ( !digl || !girq )
{
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
xfree(girq);
xfree(digl);
@@ -412,6 +417,7 @@ int pt_irq_create_bind(
hvm_irq_dpci->link_cnt[link]--;
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
xfree(girq);
xfree(digl);
@@ -419,6 +425,7 @@ int pt_irq_create_bind(
}
}
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
if ( iommu_verbose )
@@ -430,6 +437,7 @@ int pt_irq_create_bind(
}
default:
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
return -EOPNOTSUPP;
}
@@ -481,6 +489,8 @@ int pt_irq_destroy_bind(
pirq = pirq_info(d, machine_gsi);
pirq_dpci = pirq_dpci(pirq);
+ spin_lock(&pirq_dpci->lock);
+
if ( pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI )
{
unsigned int bus = pt_irq_bind->u.pci.bus;
@@ -549,6 +559,7 @@ int pt_irq_destroy_bind(
pirq_cleanup_check(pirq, d);
}
+ spin_unlock(&pirq_dpci->lock);
spin_unlock(&d->event_lock);
if ( what && iommu_verbose )
@@ -566,6 +577,7 @@ int pt_irq_destroy_bind(
void pt_pirq_init(struct domain *d, struct hvm_pirq_dpci *dpci)
{
+ spin_lock_init(&dpci->lock);
INIT_LIST_HEAD(&dpci->digl_list);
dpci->gmsi.dest_vcpu_id = -1;
}
@@ -621,7 +633,7 @@ int hvm_do_IRQ_dpci(struct domain *d, struct pirq *pirq)
return 1;
}
-/* called with d->event_lock held */
+/* called with pirq_dhci->lock held */
static void __msi_pirq_eoi(struct hvm_pirq_dpci *pirq_dpci)
{
irq_desc_t *desc;
@@ -675,7 +687,7 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
{
ASSERT(d->arch.hvm_domain.irq.dpci);
- spin_lock(&d->event_lock);
+ spin_lock(&pirq_dpci->lock);
if ( test_and_clear_bool(pirq_dpci->masked) )
{
struct pirq *pirq = dpci_pirq(pirq_dpci);
@@ -687,7 +699,7 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
{
- spin_unlock(&d->event_lock);
+ spin_unlock(&pirq_dpci->lock);
return;
}
}
@@ -695,7 +707,7 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
{
vmsi_deliver_pirq(d, pirq_dpci);
- spin_unlock(&d->event_lock);
+ spin_unlock(&pirq_dpci->lock);
return;
}
@@ -709,7 +721,7 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
__msi_pirq_eoi(pirq_dpci);
- spin_unlock(&d->event_lock);
+ spin_unlock(&pirq_dpci->lock);
return;
}
@@ -723,7 +735,7 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
ASSERT(pt_irq_need_timer(pirq_dpci->flags));
set_timer(&pirq_dpci->timer, NOW() + PT_IRQ_TIME_OUT);
}
- spin_unlock(&d->event_lock);
+ spin_unlock(&pirq_dpci->lock);
}
static void __hvm_dpci_eoi(struct domain *d,
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index 4c9cb20..8b8e461 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -91,6 +91,7 @@ struct hvm_irq_dpci {
/* Machine IRQ to guest device/intx mapping. */
struct hvm_pirq_dpci {
+ spinlock_t lock;
uint32_t flags;
unsigned int state;
bool_t masked;
--
2.1.4
next prev parent reply other threads:[~2015-10-23 11:05 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-23 11:05 passthrough: improve interrupt injection locking David Vrabel
2015-10-23 11:05 ` David Vrabel [this message]
2015-10-27 11:56 ` [PATCHv1 1/2] passthrough: use per-interrupt lock when injecting an interrupt Jan Beulich
2015-10-28 20:18 ` Konrad Rzeszutek Wilk
2015-10-29 9:11 ` Jan Beulich
2015-10-30 14:45 ` Konrad Rzeszutek Wilk
2015-10-23 11:05 ` [PATCHv1 2/2] passthrough: improve locking when iterating over interrupts bound to VMs David Vrabel
2015-10-27 12:44 ` Jan Beulich
2015-10-27 13:11 ` David Vrabel
2015-10-28 20:42 ` Konrad Rzeszutek Wilk
2015-10-23 12:37 ` passthrough: improve interrupt injection locking Ian Campbell
2015-10-23 12:38 ` David Vrabel
2015-10-23 12:45 ` Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1445598322-22154-2-git-send-email-david.vrabel@citrix.com \
--to=david.vrabel@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).