public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>
To: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, x86@kernel.org,
	yrl.pp-manager.tt@hitachi.com,
	Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>,
	Avi Kivity <avi@redhat.com>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>
Subject: [RFC v2 PATCH 13/21] x86/apic: IRQ vector remapping on slave for slave CPUs
Date: Thu, 06 Sep 2012 20:28:27 +0900	[thread overview]
Message-ID: <20120906112827.13320.30551.stgit@kvmdev> (raw)
In-Reply-To: <20120906112718.13320.8231.stgit@kvmdev>

Add a facility to use IRQ vector different from online CPUs on slave CPUs.

When alternative vector for IRQ is registered by remap_slave_vector_irq()
and the IRQ affinity is set only to slave CPUs, the device is configured
to use the alternative vector.

Current patch only supports MSI and Intel CPU with IRQ remapper of IOMMU.

This is intended to be used to routing interrupts directly to KVM guest
which is running on slave CPUs which do not cause VM EXIT by external
interrupts.

Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
---

 arch/x86/include/asm/irq.h          |   15 ++++++++
 arch/x86/kernel/apic/io_apic.c      |   68 ++++++++++++++++++++++++++++++++++-
 drivers/iommu/intel_irq_remapping.c |    2 +
 3 files changed, 83 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb..84756f7 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -41,4 +41,19 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
 
 extern void init_ISA_irqs(void);
 
+#ifdef CONFIG_SLAVE_CPU
+extern void remap_slave_vector_irq(int irq, int vector,
+				   const struct cpumask *mask);
+extern void revert_slave_vector_irq(int irq, const struct cpumask *mask);
+extern u8 get_remapped_slave_vector(u8 vector, unsigned int irq,
+				    const struct cpumask *mask);
+#else
+static inline u8 get_remapped_slave_vector(u8 vector, unsigned int irq,
+					   const struct cpumask *mask)
+{
+	return vector;
+}
+#endif
+
+
 #endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 0cd2682..167b001 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1266,6 +1266,69 @@ void __setup_vector_irq(int cpu)
 	raw_spin_unlock(&vector_lock);
 }
 
+#ifdef CONFIG_SLAVE_CPU
+
+/* vector table remapped on slave cpus, indexed by IRQ */
+static DEFINE_PER_CPU(u8[NR_IRQS], slave_vector_remap_tbl) = {
+	[0 ... NR_IRQS - 1] = 0,
+};
+
+void remap_slave_vector_irq(int irq, int vector, const struct cpumask *mask)
+{
+	int cpu;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vector_lock, flags);
+	for_each_cpu(cpu, mask) {
+		BUG_ON(!cpu_slave(cpu));
+		per_cpu(slave_vector_remap_tbl, cpu)[irq] = vector;
+		per_cpu(vector_irq, cpu)[vector] = irq;
+	}
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
+}
+EXPORT_SYMBOL_GPL(remap_slave_vector_irq);
+
+void revert_slave_vector_irq(int irq, const struct cpumask *mask)
+{
+	int cpu;
+	u8 vector;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vector_lock, flags);
+	for_each_cpu(cpu, mask) {
+		BUG_ON(!cpu_slave(cpu));
+		vector = per_cpu(slave_vector_remap_tbl, cpu)[irq];
+		if (vector) {
+			per_cpu(vector_irq, cpu)[vector] = -1;
+			per_cpu(slave_vector_remap_tbl, cpu)[irq] = 0;
+		}
+	}
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
+}
+EXPORT_SYMBOL_GPL(revert_slave_vector_irq);
+
+/* If all targets CPUs are slave, returns remapped vector */
+u8 get_remapped_slave_vector(u8 vector, unsigned int irq,
+			     const struct cpumask *mask)
+{
+	u8 slave_vector;
+
+	if (vector < FIRST_EXTERNAL_VECTOR ||
+	    cpumask_intersects(mask, cpu_online_mask))
+		return vector;
+
+	slave_vector = per_cpu(slave_vector_remap_tbl,
+			       cpumask_first(mask))[irq];
+	if (slave_vector >= FIRST_EXTERNAL_VECTOR)
+		vector = slave_vector;
+
+	pr_info("slave vector remap: irq: %d => vector: %d\n", irq, vector);
+
+	return vector;
+}
+
+#endif
+
 static struct irq_chip ioapic_chip;
 
 #ifdef CONFIG_X86_32
@@ -3133,6 +3196,7 @@ static int
 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
 {
 	struct irq_cfg *cfg = data->chip_data;
+	int vector = cfg->vector;
 	struct msi_msg msg;
 	unsigned int dest;
 
@@ -3141,8 +3205,10 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
 
 	__get_cached_msi_msg(data->msi_desc, &msg);
 
+	vector = get_remapped_slave_vector(vector, data->irq, mask);
+
 	msg.data &= ~MSI_DATA_VECTOR_MASK;
-	msg.data |= MSI_DATA_VECTOR(cfg->vector);
+	msg.data |= MSI_DATA_VECTOR(vector);
 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
 	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index df38334..471d23f 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -970,7 +970,7 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
 		goto error;
 	}
 
-	irte.vector = cfg->vector;
+	irte.vector = get_remapped_slave_vector(cfg->vector, irq, mask);
 	irte.dest_id = IRTE_DEST(dest);
 
 	/*



  parent reply	other threads:[~2012-09-06 11:31 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-09-06 11:27 [RFC v2 PATCH 00/21] KVM: x86: CPU isolation and direct interrupts delivery to guests Tomoki Sekiyama
2012-09-06 11:27 ` [RFC v2 PATCH 01/21] x86: Split memory hotplug function from cpu_up() as cpu_memory_up() Tomoki Sekiyama
2012-09-06 11:31   ` Avi Kivity
2012-09-06 11:32     ` Avi Kivity
2012-09-06 11:27 ` [RFC v2 PATCH 02/21] x86: Add a facility to use offlined CPUs as slave CPUs Tomoki Sekiyama
2012-09-06 11:27 ` [RFC v2 PATCH 03/21] x86: Support hrtimer on " Tomoki Sekiyama
2012-09-06 11:27 ` [RFC v2 PATCH 04/21] x86: Avoid RCU warnings " Tomoki Sekiyama
2012-09-20 17:34   ` Paul E. McKenney
2012-09-28  8:10     ` Tomoki Sekiyama
2012-09-06 11:27 ` [RFC v2 PATCH 05/21] KVM: Enable/Disable virtualization on slave CPUs are activated/dying Tomoki Sekiyama
2012-09-06 11:27 ` [RFC v2 PATCH 06/21] KVM: Add facility to run guests on slave CPUs Tomoki Sekiyama
2012-09-06 11:27 ` [RFC v2 PATCH 07/21] KVM: handle page faults of slave guests on online CPUs Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 08/21] KVM: Add KVM_GET_SLAVE_CPU and KVM_SET_SLAVE_CPU to vCPU ioctl Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 09/21] KVM: Go back to online CPU on VM exit by external interrupt Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 10/21] KVM: proxy slab operations for slave CPUs on online CPUs Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 11/21] KVM: no exiting from guest when slave CPU halted Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 12/21] x86/apic: Enable external interrupt routing to slave CPUs Tomoki Sekiyama
2012-09-06 11:28 ` Tomoki Sekiyama [this message]
2012-09-06 11:28 ` [RFC v2 PATCH 14/21] KVM: Directly handle interrupts by guests without VM EXIT on " Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 15/21] KVM: add tracepoint on enabling/disabling direct interrupt delivery Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 16/21] KVM: vmx: Add definitions PIN_BASED_PREEMPTION_TIMER Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 17/21] KVM: add kvm_arch_vcpu_prevent_run to prevent VM ENTER when NMI is received Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 18/21] KVM: route assigned devices' MSI/MSI-X directly to guests on slave CPUs Tomoki Sekiyama
2012-09-06 11:28 ` [RFC v2 PATCH 19/21] KVM: Enable direct EOI for directly routed interrupts to guests Tomoki Sekiyama
2012-09-06 11:29 ` [RFC v2 PATCH 20/21] KVM: Pass-through local APIC timer of on slave CPUs to guest VM Tomoki Sekiyama
2012-09-06 11:29 ` [RFC v2 PATCH 21/21] x86: request TLB flush to slave CPU using NMI Tomoki Sekiyama
2012-09-06 11:46 ` [RFC v2 PATCH 00/21] KVM: x86: CPU isolation and direct interrupts delivery to guests Avi Kivity
2012-09-07  8:26 ` Jan Kiszka
2012-09-10 11:36   ` Tomoki Sekiyama

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20120906112827.13320.30551.stgit@kvmdev \
    --to=tomoki.sekiyama.qu@hitachi.com \
    --cc=avi@redhat.com \
    --cc=hpa@zytor.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    --cc=yrl.pp-manager.tt@hitachi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox