From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: M A Young <m.a.young@durham.ac.uk>
Cc: xen-devel@lists.xensource.com
Subject: Re: xen 4 only seeing one keyboard and mouse
Date: Wed, 8 Sep 2010 11:44:22 -0400 [thread overview]
Message-ID: <20100908154422.GA4569@dumpdata.com> (raw)
In-Reply-To: <alpine.LFD.2.00.1009031946190.3512@vega4.dur.ac.uk>
[-- Attachment #1: Type: text/plain, Size: 6105 bytes --]
On Fri, Sep 03, 2010 at 07:50:06PM +0100, M A Young wrote:
> On Tue, 31 Aug 2010, Konrad Rzeszutek Wilk wrote:
>
> >If you could, can you instrument it to print the cfg->domain, before the 'vector_allocation_domain'
> >is called, and as well instrument the assign_irq_vector similary to what you did with Xen?
> >
> >And also instrument the 'dest' value. Basically the idea is to get an idea of what the
> >per_cpu(vector) gets set during the bootup for legacy IRQs. Similary to what you did
> >with Xen.
>
> The kernel code I was working with (2.6.32) doesn't have the
> vector_allocation_domain section . I am attaching the debugging
> output I did get and the patch I used.
OK, so based on the output the IO APIC pins for you two IRQs should have destination set
to 1.
.. snip..
_IO_APIC_irq: cfg->domain=-1
setup_IO_APIC_irq: dest=1
IOAPIC[0]: Set routing entry (2-1 -> 0x31 -> IRQ 1 Mode:0 Active:0)
setup_IO_APIC_irq: cfg->domain=-1
setup_IO_APIC_irq: dest=1
IOAPIC[0]: Set routing entry (2-12 -> 0x3c -> IRQ 12 Mode:0 Active:0)
BUT when the IO APIC is being printed:
01 003 0 0 0 0 0 1 1 31
0c 003 0 0 0 0 0 1 1 3C
They are set to dest = 3!?
Somehow the IO APIC is being programmed without using the setup_IO_APIC_irq
and its friends. Also the per_cpu(vector_irq, 1)[0x31] = 1 is set.
So, for the second problem, I think the __setup_vector_irq is the one
that sets the vectors on the second CPU to correspond to the right IRQs.
But I am not sure how the IOAPIC pin for all IRQs below 16 get set to '3'.
There is something happening between the initial call to init IO_APIC IRQs
and when it is being printed that sets the destination to a new value.
I've piggybacked on your debug patch and added some extra stuff to see if the __setup_vector_irq
is responsible for setting the new per_cpu. Those printk's _might_ not work as
all of that is being run on a secondary CPU that is being initialized..?
For the IO APIC programming I added a printk/debug_stack by the ioapic_write
to see who and when sets those pins on the IOAPIC to 3.
Here is the patch:
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ec4e874..37482fe 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -69,7 +69,7 @@
#include <asm/xen/pci.h>
#include <asm/xen/pci.h>
-
+#include <linux/kernel.h>
#define __apicdebuginit(type) static type __init
#define for_each_irq_pin(entry, head) \
for (entry = head; entry; entry = entry->next)
@@ -486,6 +486,11 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
+ if (pin == 1 || pin == 0xc) {
+ printk(KERN_INFO "Reprogramming PIN%d, dest=%d\n", pin, e.dest);
+ if (e.dest > 1)
+ dump_stack();
+ }
spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, e);
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -1198,6 +1203,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
if (old_vector) {
cpumask_and(tmp_mask, mask, cpu_online_mask);
cpumask_and(tmp_mask, cfg->domain, tmp_mask);
+ printk(KERN_INFO "old_vector: %d mask: %x\n", old_vector, tmp_mask->bits[0]);
if (!cpumask_empty(tmp_mask)) {
free_cpumask_var(tmp_mask);
return 0;
@@ -1214,6 +1220,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
vector = current_vector;
offset = current_offset;
+ printk(KERN_INFO "vector: %d, mask: %x, cpu: %d per_cpu:%x\n",
+ vector, tmp_mask->bits[0], cpu, per_cpu(vector_irq, cpu)[vector]);
next:
vector += 8;
if (vector >= first_system_vector) {
@@ -1237,8 +1245,11 @@ next:
cfg->move_in_progress = 1;
cpumask_copy(cfg->old_domain, cfg->domain);
}
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
per_cpu(vector_irq, new_cpu)[vector] = irq;
+ printk(KERN_WARNING "__assign_irq_vector: setting vector_irq[%d]=%d for cpu=%d\n",
+ vector, irq, new_cpu);
+ }
cfg->vector = vector;
cpumask_copy(cfg->domain, tmp_mask);
err = 0;
@@ -1304,6 +1315,8 @@ void __setup_vector_irq(int cpu)
if (!cpumask_test_cpu(cpu, cfg->domain))
continue;
vector = cfg->vector;
+ printk(KERN_INFO "%s: vector: %d on CPU %d set to IRQ: %d\n",
+ __FUNCTION__, vector, cpu, irq);
per_cpu(vector_irq, cpu)[vector] = irq;
}
/* Mark the free vectors */
@@ -1313,8 +1326,11 @@ void __setup_vector_irq(int cpu)
continue;
cfg = irq_cfg(irq);
- if (!cpumask_test_cpu(cpu, cfg->domain))
+ if (!cpumask_test_cpu(cpu, cfg->domain)) {
+ printk(KERN_INFO "%s: vector %d on CPU %d reset b/c not in affinity mask (%d)\n",
+ __FUNCTION__, vector, cpu, cfg->domain->bits[0]);
per_cpu(vector_irq, cpu)[vector] = -1;
+ }
}
}
@@ -1452,7 +1468,20 @@ int setup_ioapic_entry(int apic_id, int irq,
entry->mask = 1;
return 0;
}
-
+static void dump_vectors(const char *prefix) {
+ int cpu;
+ int vector;
+
+ for (vector = 0x30; vector < 0x3f; vector++) {
+ for_each_cpu_and(cpu, 0xff, cpu_online_mask) {
+ if (per_cpu(vector_irq, cpu)[vector] != -1)
+ printk(KERN_INFO "%s [vec:%d,cpu:%d] = irq:%d\n",
+ prefix,
+ vector, cpu,
+ per_cpu(vector_irq, cpu)[vector]);
+ }
+ }
+}
static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
int trigger, int polarity)
{
@@ -1465,10 +1494,15 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
cfg = desc->chip_data;
+ printk(KERN_WARNING "setup_IO_APIC_irq: cfg->domain=%d (vector: %d)\n", cfg->domain->bits[0], cfg->vector);
+
+ dump_vectors("PRE");
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
+ dump_vectors("PAST");
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+ printk(KERN_WARNING "setup_IO_APIC_irq: dest=%d\n", dest);
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
[-- Attachment #2: debug-ioapic-irq-2-12.patch --]
[-- Type: text/x-diff, Size: 3992 bytes --]
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ec4e874..37482fe 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -69,7 +69,7 @@
#include <asm/xen/pci.h>
#include <asm/xen/pci.h>
-
+#include <linux/kernel.h>
#define __apicdebuginit(type) static type __init
#define for_each_irq_pin(entry, head) \
for (entry = head; entry; entry = entry->next)
@@ -486,6 +486,11 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
+ if (pin == 1 || pin == 0xc) {
+ printk(KERN_INFO "Reprogramming PIN%d, dest=%d\n", pin, e.dest);
+ if (e.dest > 1)
+ dump_stack();
+ }
spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, e);
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -1198,6 +1203,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
if (old_vector) {
cpumask_and(tmp_mask, mask, cpu_online_mask);
cpumask_and(tmp_mask, cfg->domain, tmp_mask);
+ printk(KERN_INFO "old_vector: %d mask: %x\n", old_vector, tmp_mask->bits[0]);
if (!cpumask_empty(tmp_mask)) {
free_cpumask_var(tmp_mask);
return 0;
@@ -1214,6 +1220,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
vector = current_vector;
offset = current_offset;
+ printk(KERN_INFO "vector: %d, mask: %x, cpu: %d per_cpu:%x\n",
+ vector, tmp_mask->bits[0], cpu, per_cpu(vector_irq, cpu)[vector]);
next:
vector += 8;
if (vector >= first_system_vector) {
@@ -1237,8 +1245,11 @@ next:
cfg->move_in_progress = 1;
cpumask_copy(cfg->old_domain, cfg->domain);
}
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
per_cpu(vector_irq, new_cpu)[vector] = irq;
+ printk(KERN_WARNING "__assign_irq_vector: setting vector_irq[%d]=%d for cpu=%d\n",
+ vector, irq, new_cpu);
+ }
cfg->vector = vector;
cpumask_copy(cfg->domain, tmp_mask);
err = 0;
@@ -1304,6 +1315,8 @@ void __setup_vector_irq(int cpu)
if (!cpumask_test_cpu(cpu, cfg->domain))
continue;
vector = cfg->vector;
+ printk(KERN_INFO "%s: vector: %d on CPU %d set to IRQ: %d\n",
+ __FUNCTION__, vector, cpu, irq);
per_cpu(vector_irq, cpu)[vector] = irq;
}
/* Mark the free vectors */
@@ -1313,8 +1326,11 @@ void __setup_vector_irq(int cpu)
continue;
cfg = irq_cfg(irq);
- if (!cpumask_test_cpu(cpu, cfg->domain))
+ if (!cpumask_test_cpu(cpu, cfg->domain)) {
+ printk(KERN_INFO "%s: vector %d on CPU %d reset b/c not in affinity mask (%d)\n",
+ __FUNCTION__, vector, cpu, cfg->domain->bits[0]);
per_cpu(vector_irq, cpu)[vector] = -1;
+ }
}
}
@@ -1452,7 +1468,20 @@ int setup_ioapic_entry(int apic_id, int irq,
entry->mask = 1;
return 0;
}
-
+static void dump_vectors(const char *prefix) {
+ int cpu;
+ int vector;
+
+ for (vector = 0x30; vector < 0x3f; vector++) {
+ for_each_cpu_and(cpu, 0xff, cpu_online_mask) {
+ if (per_cpu(vector_irq, cpu)[vector] != -1)
+ printk(KERN_INFO "%s [vec:%d,cpu:%d] = irq:%d\n",
+ prefix,
+ vector, cpu,
+ per_cpu(vector_irq, cpu)[vector]);
+ }
+ }
+}
static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
int trigger, int polarity)
{
@@ -1465,10 +1494,15 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
cfg = desc->chip_data;
+ printk(KERN_WARNING "setup_IO_APIC_irq: cfg->domain=%d (vector: %d)\n", cfg->domain->bits[0], cfg->vector);
+
+ dump_vectors("PRE");
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
+ dump_vectors("PAST");
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+ printk(KERN_WARNING "setup_IO_APIC_irq: dest=%d\n", dest);
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next prev parent reply other threads:[~2010-09-08 15:44 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-07-21 20:27 xen 4 only seeing one keyboard and mouse M A Young
2010-07-21 20:40 ` Pasi Kärkkäinen
2010-07-21 21:16 ` M A Young
2010-07-21 21:41 ` Pasi Kärkkäinen
2010-07-21 22:10 ` M A Young
2010-07-22 17:51 ` Konrad Rzeszutek Wilk
2010-07-22 18:54 ` M A Young
2010-07-23 14:27 ` Konrad Rzeszutek Wilk
2010-08-08 18:50 ` M A Young
2010-08-08 20:16 ` M A Young
2010-08-09 4:42 ` Konrad Rzeszutek Wilk
2010-08-16 15:46 ` Konrad Rzeszutek Wilk
2010-08-16 21:05 ` M A Young
2010-08-16 22:33 ` Konrad Rzeszutek Wilk
2010-08-18 23:25 ` Konrad Rzeszutek Wilk
2010-08-20 21:59 ` M A Young
2010-08-22 20:03 ` M A Young
2010-08-23 15:18 ` Konrad Rzeszutek Wilk
2010-08-23 19:34 ` M A Young
2010-08-23 20:37 ` M A Young
2010-08-24 17:10 ` Konrad Rzeszutek Wilk
2010-08-24 19:06 ` M A Young
2010-08-24 19:47 ` Keir Fraser
2010-08-24 20:31 ` M A Young
2010-08-24 21:16 ` Konrad Rzeszutek Wilk
2010-08-24 22:40 ` M A Young
2010-08-25 14:28 ` Konrad Rzeszutek Wilk
2010-08-25 21:32 ` M A Young
2010-08-26 14:04 ` Konrad Rzeszutek Wilk
2010-08-26 14:08 ` Keir Fraser
2010-08-26 20:53 ` M A Young
2010-08-26 22:15 ` M A Young
2010-08-26 22:24 ` M A Young
2010-08-27 7:34 ` Keir Fraser
2010-08-31 15:00 ` Konrad Rzeszutek Wilk
2010-09-03 18:50 ` M A Young
2010-09-08 15:44 ` Konrad Rzeszutek Wilk [this message]
2010-09-08 21:36 ` M A Young
2010-09-08 23:17 ` Konrad Rzeszutek Wilk
2010-09-17 22:49 ` M A Young
2010-09-20 14:51 ` Konrad Rzeszutek Wilk
2010-09-20 15:05 ` xen 4 only seeing one keyboard and mouse, fixed in xen 4.0.2-rc-pre Pasi Kärkkäinen
2010-08-21 21:00 ` xen 4 only seeing one keyboard and mouse M A Young
2010-08-23 14:24 ` Konrad Rzeszutek Wilk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20100908154422.GA4569@dumpdata.com \
--to=konrad.wilk@oracle.com \
--cc=m.a.young@durham.ac.uk \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).