kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86/irq: introduce repair_irq try to repair CPU vector
@ 2025-07-23  1:50 Hogan Wang
  2025-07-23 15:08 ` Thomas Gleixner
  2025-07-23 16:33 ` kernel test robot
  0 siblings, 2 replies; 3+ messages in thread
From: Hogan Wang @ 2025-07-23  1:50 UTC (permalink / raw)
  To: x86, dave.hansen, kvm, alex.williamson
  Cc: weidong.huang, yechuan, hogan.wang, wangxinxin.wang, jianjay.zhou,
	wangjie88

When the VM irqbalance service adjusts interrupt affinity
frequently, the VM repeatedly masks MSI-x interrupts.
During the guest kernel masking MSI-x interrupts, VM exits
to the Hypervisor.

The Qemu emulator implements the switching between
kvm_interrupt and qemu_interrupt to achieve MSI-x PBA
capability.

When the Qemu emulator calls the vfio_msi_set_vector_signal
interface to switch the kvm_interrupt and qemu_interrupt
eventfd, it releases and requests IRQs, and correspondingly
clears and initializes the CPU Vector.

When initializing the CPU Vector, if an unhandled interrupt
in the APIC is delivered to the kernel, the __common_interrupt
function is called to handle the interrupt.

Since the call_irq_handler function assigns vector_irq[vector]
to VECTOR_UNUSED without lock protection, the assignment of
vector_irq[vector] and the initialization of the CPU Vector
are concurrent, leading to vector_irq[vector] being mistakenly
set to VECTOR_UNUSED.

This ultimately results in the inability of VFIO passthrough
device interrupts to be delivered, causing packet loss in
network devices or IO hangs in disk devices.

This patch detects and repairs vector_irq[vector] after the
interrupt initialization is completed, ensuring that
vector_irq[vector] can be corrected if it is mistakenly set
to VECTOR_UNUSED.

Signed-off-by: Hogan Wang <hogan.wang@huawei.com>
---
 arch/x86/kernel/apic/vector.c     | 18 ++++++++++++++++++
 drivers/vfio/pci/vfio_pci_intrs.c |  2 ++
 include/linux/interrupt.h         |  3 +++
 include/linux/irqdomain.h         |  3 +++
 kernel/irq/irqdomain.c            | 27 +++++++++++++++++++++++++++
 kernel/irq/manage.c               | 18 ++++++++++++++++++
 6 files changed, 71 insertions(+)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 93069b13d3af..20164a9ce63b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -396,6 +396,23 @@ static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
 	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
+static void x86_vector_repair(struct irq_domain *dom, struct irq_data *irqd)
+{
+	struct apic_chip_data *apicd = apic_chip_data(irqd);
+	struct irq_desc *desc = irq_data_to_desc(irqd);
+	unsigned int vector = apicd->vector;
+	unsigned int cpu = apicd->cpu;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vector_lock, flags);
+	if (per_cpu(vector_irq, cpu)[vector] != desc) {
+		per_cpu(vector_irq, cpu)[vector] = desc;
+		pr_warn("irq %u: repair vector %u.%u\n",
+			irqd->irq, cpu, vector);
+	}
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
+}
+
 static int activate_reserved(struct irq_data *irqd)
 {
 	struct apic_chip_data *apicd = apic_chip_data(irqd);
@@ -703,6 +720,7 @@ static const struct irq_domain_ops x86_vector_domain_ops = {
 	.free		= x86_vector_free_irqs,
 	.activate	= x86_vector_activate,
 	.deactivate	= x86_vector_deactivate,
+	.repair		= x86_vector_repair,
 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
 	.debug_show	= x86_vector_debug_show,
 #endif
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 565966351dfa..6ea34a52878c 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -517,6 +517,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
 	}
 	ctx->trigger = trigger;
 
+	repair_irq(irq);
+
 	return 0;
 
 out_put_eventfd_ctx:
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 51b6484c0493..c5f6172ae1cd 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -203,6 +203,9 @@ extern void free_percpu_irq(unsigned int, void __percpu *);
 extern const void *free_nmi(unsigned int irq, void *dev_id);
 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
 
+extern void repair_irq(unsigned int irq);
+
+
 struct device;
 
 extern int __must_check
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 7387d183029b..10538a13addc 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -69,6 +69,7 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
  * @translate:	Given @fwspec, decode the hardware irq number (@out_hwirq) and
  *		linux irq type value (@out_type). This is a generalised @xlate
  *		(over struct irq_fwspec) and is preferred if provided.
+ * @repair: repair one interrupt (@irqd).
  * @debug_show:	For domains to show specific data for an interrupt in debugfs.
  *
  * Functions below are provided by the driver and called whenever a new mapping
@@ -96,6 +97,7 @@ struct irq_domain_ops {
 	void	(*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
 	int	(*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
 			     unsigned long *out_hwirq, unsigned int *out_type);
+	void (*repair)(struct irq_domain *d, struct irq_data *irqd);
 #endif
 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
 	void	(*debug_show)(struct seq_file *m, struct irq_domain *d,
@@ -563,6 +565,7 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned in
 void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
 int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
 void irq_domain_deactivate_irq(struct irq_data *irq_data);
+void irq_domain_repair_irq(struct irq_data *irq_data);
 
 /**
  * irq_domain_alloc_irqs - Allocate IRQs from domain
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index c8b6de09047b..d9c2aaa6247d 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1921,6 +1921,18 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
 }
 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
 
+static void __irq_domain_repair_irq(struct irq_data *irq_data)
+{
+	if (irq_data && irq_data->domain) {
+		struct irq_domain *domain = irq_data->domain;
+
+		if (domain->ops->repair)
+			domain->ops->repair(domain, irq_data);
+		if (irq_data->parent_data)
+			__irq_domain_repair_irq(irq_data->parent_data);
+	}
+}
+
 static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
 {
 	if (irq_data && irq_data->domain) {
@@ -1989,6 +2001,21 @@ void irq_domain_deactivate_irq(struct irq_data *irq_data)
 	}
 }
 
+/**
+ * irq_domain_repair_irq - Call domain_ops->repair recursively to
+ *			       repair interrupt
+ * @irq_data: outermost irq_data associated with interrupt
+ *
+ * It calls domain_ops->repair to program interrupt controllers to repair
+ * interrupt delivery.
+ */
+void irq_domain_repair_irq(struct irq_data *irq_data)
+{
+	if (irqd_is_activated(irq_data))
+		__irq_domain_repair_irq(irq_data);
+}
+
+
 static void irq_domain_check_hierarchy(struct irq_domain *domain)
 {
 	/* Hierarchy irq_domains must implement callback alloc() */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c94837382037..f2e6bed02f98 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1418,6 +1418,24 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
 	return 0;
 }
 
+
+void repair_irq(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+	unsigned long flags;
+
+	mutex_lock(&desc->request_mutex);
+	chip_bus_lock(desc);
+	raw_spin_lock_irqsave(&desc->lock, flags);
+
+	irq_domain_repair_irq(irq_desc_get_irq_data(desc));
+
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
+	chip_bus_sync_unlock(desc);
+	mutex_unlock(&desc->request_mutex);
+}
+EXPORT_SYMBOL(repair_irq);
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
-- 
2.45.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] x86/irq: introduce repair_irq try to repair CPU vector
  2025-07-23  1:50 [PATCH] x86/irq: introduce repair_irq try to repair CPU vector Hogan Wang
@ 2025-07-23 15:08 ` Thomas Gleixner
  2025-07-23 16:33 ` kernel test robot
  1 sibling, 0 replies; 3+ messages in thread
From: Thomas Gleixner @ 2025-07-23 15:08 UTC (permalink / raw)
  To: Hogan Wang, x86, dave.hansen, kvm, alex.williamson
  Cc: weidong.huang, yechuan, hogan.wang, wangxinxin.wang, jianjay.zhou,
	wangjie88, Marc Zyngier

On Wed, Jul 23 2025 at 09:50, Hogan Wang wrote:

I have no idea what that subject line means.

> When the VM irqbalance service adjusts interrupt affinity
> frequently, the VM repeatedly masks MSI-x interrupts.

What has this to do with frequently? The point is that the interrupt is
masked at the PCI level for changing the affinity, which causes a VMEXIT
and activates the VFIO horrorshow vai QEMU.

> During the guest kernel masking MSI-x interrupts, VM exits
> to the Hypervisor.
>
> The Qemu emulator implements the switching between
> kvm_interrupt and qemu_interrupt to achieve MSI-x PBA
> capability.

What's achieved here?

> When the Qemu emulator calls the vfio_msi_set_vector_signal
> interface to switch the kvm_interrupt and qemu_interrupt
> eventfd, it releases and requests IRQs, and correspondingly
> clears and initializes the CPU Vector.
>
> When initializing the CPU Vector, if an unhandled interrupt
> in the APIC is delivered to the kernel, the __common_interrupt
> function is called to handle the interrupt.

I really don't know what that means. Documentation clearly asks you to
provide a proper description of multi-CPU race conditions.

https://www.kernel.org/doc/html/latest/process/maintainer-tip.html#patch-submission-notes

I've reverse engineered this word salad and I have to tell you that this
is not a completely VFIO specific problem. VFIO just makes it more
likely to trigger and adds some VFIO specific twist on top.

> Since the call_irq_handler function assigns vector_irq[vector]

Please use proper function annotation, i.e.:

  Since call_irq_handler() assigns...

> to VECTOR_UNUSED without lock protection, the assignment of
> vector_irq[vector] and the initialization of the CPU Vector
> are concurrent, leading to vector_irq[vector] being mistakenly
> set to VECTOR_UNUSED.

It's not mistakenly. It's the obvious consequence.

As you pointed out correctly there is no lock protection, so why not
fixing that in the first place?

> This ultimately results in the inability of VFIO passthrough
> device interrupts to be delivered, causing packet loss in
> network devices or IO hangs in disk devices.
>
> This patch detects and repairs vector_irq[vector] after the

# git grep 'This patch' Documentation/process/

> interrupt initialization is completed, ensuring that
> vector_irq[vector] can be corrected if it is mistakenly set
> to VECTOR_UNUSED.

That's a patently bad idea and does not even work under all
circumstances. See below.

> +static void x86_vector_repair(struct irq_domain *dom, struct irq_data *irqd)
> +{
> +	struct apic_chip_data *apicd = apic_chip_data(irqd);
> +	struct irq_desc *desc = irq_data_to_desc(irqd);
> +	unsigned int vector = apicd->vector;
> +	unsigned int cpu = apicd->cpu;
> +	unsigned long flags;
> +
> +	raw_spin_lock_irqsave(&vector_lock, flags);
> +	if (per_cpu(vector_irq, cpu)[vector] != desc) {
> +		per_cpu(vector_irq, cpu)[vector] = desc;
> +		pr_warn("irq %u: repair vector %u.%u\n",
> +			irqd->irq, cpu, vector);
> +	}
> +	raw_spin_unlock_irqrestore(&vector_lock, flags);
> +}

> --- a/drivers/vfio/pci/vfio_pci_intrs.c
> +++ b/drivers/vfio/pci/vfio_pci_intrs.c
> @@ -517,6 +517,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
>  	}
>  	ctx->trigger = trigger;
>  
> +	repair_irq(irq);
> +

How is that supposed to cure the problem completely?

Let me reverse engineer the actual problem you are trying to solve from
the breadcrumbs you provided:

	CPU0
	vmenter(vCPU0)
   	....
         msi_set_affinity()
           mask(MSI-X)
             vmexit()
        ...

        free_irq()
1         mask();        

2         __synchronize_irq()

          msi_domain_deactivate()
3           write_msg(0);
          x86_vector_deactivate()
4           per_cpu(vector_irq, cpu)[vector] = VECTOR_SHUTDOWN;
       
        request_irq()
          x86_vector_activate()
5           per_cpu(vector_irq, cpu)[vector] = desc;
	  msi_domain_deactivate()
6           write_msg(msg);
7         unmask();

After #1 the device _cannot_ raise the original vector anymore.

After #7 the device _can_ raise an interrupt on the new vector/target
         CPU pair, which might be the same as the previous one.

So the only case where this causes the problem you describe is when

  A) the device raises the old vector _before_ #1. If it raises the
     interrupt _after_ #1, the device is broken.

  B) the old target CPU is delayed handling the interrupt (interrupts
     disabled, NMI, ....)

  C) As a consequence #2 - __synchronize_irq() - cannot observe the
     interrupt handler being executed on the target CPU and x86 has no
     way to query the APIC IRR of a remote CPU to detect the delayed
     case.

  D) x86_vector_deactivate() sets vector_irq to VECTOR_SHUTDOWN

  E) the old target CPU handles the interrupt and observes
     VECTOR_SHUTDOWN and is delayed again

  F) request_irq() gets the same vector/target CPU combo back and writes
     the descriptor into vector_irq

  G) the old target CPU writes VECTOR_UNUSED

In a proper side by side flow the broken case looks like this:

	CPU0				CPU1
	vmenter(vCPU0)
   	....
         msi_set_affinity()
           mask(MSI-X)
             vmexit()
        ...                             interrupt is raised in APIC
                                        but not handled
        free_irq()
          mask();        

          __synchronize_irq()

          msi_domain_deactivate()
            write_msg(0);
          x86_vector_deactivate()
            per_cpu(vector_irq, cpu)[vector] = VECTOR_SHUTDOWN;
       
        request_irq()                   interrupt is handled and
                                        observes VECTOR_SHUTDOWN
          x86_vector_activate()
            per_cpu(vector_irq, cpu)[vector] = desc;

                                        writes VECTOR_UNUSED

	  msi_domain_deactivate()
            write_msg(msg);
          unmask();

That's the kind of analysis, which needs to be provided and is required
to understand the root cause. And if you look carefully at that
analysis, then this is even a problem for regular host side device
drivers:

	CPU0				CPU1
                                        interrupt is raised in APIC
                                        but not handled
        disable_irq_in_device();
        free_irq()
          mask();

          __synchronize_irq()

          msi_domain_deactivate()
            write_msg(0);
          x86_vector_deactivate()
            per_cpu(vector_irq, cpu)[vector] = VECTOR_SHUTDOWN;
       
        request_irq()                   interrupt is handled and
                                        observes VECTOR_SHUTDOWN
          x86_vector_activate()
            per_cpu(vector_irq, cpu)[vector] = desc;

                                        writes VECTOR_UNUSED

	  msi_domain_deactivate()
            write_msg(msg);
          unmask();
        enable_irq_in_device();

See?

Now what to do about that?

Definitely not hacking some ill defined repair function into the code,
which is neither guaranteed to work nor fixes the general problem.

Worse it exposes a functionality which should not be there in the first
place to drivers, which then go and invoke it randomly and for the very
wrong reasons.

As you described correctly, there is a lack of locking in the x86
interrupt entry code. That's the obvious thing to fix. See uncompiled
and untested patch below.

That solves the general overwrite problem _and_ does not rely on an
interrupt sent by the device right afterwards.

But it does not and _cannot_ solve the other VFIO specific problem,
which comes with free_irq()/request_irq() on an active device:

	CPU0				CPU1
	vmenter(vCPU0)
   	....
         msi_set_affinity()
           mask(MSI-X)
             vmexit()
#1      ...                             interrupt is raised in APIC
                                        but not handled
        free_irq()
          mask();        

          __synchronize_irq()

          msi_domain_deactivate()
            write_msg(0);
          x86_vector_deactivate()
            per_cpu(vector_irq, cpu)[vector] = VECTOR_SHUTDOWN;
       
#2                                      interrupt is handled and
                                        observes VECTOR_SHUTDOWN
                                        writes VECTOR_UNUSED
       request_irq()
         x86_vector_activate()
            per_cpu(vector_irq, cpu)[vector] = desc;

	  msi_domain_deactivate()
            write_msg(msg);
          unmask();

#2 discards the interrupt as spurious _after_ shutdown and acknowledges
the APIC with EOI. That means the interrupt is lost.

So when the device logic is:

        raise_interrupt()
          if (!wait_for_driver_ack)
             wait_for_driver_ack = true;
             send_msi_message();

then the device waits forever or in the best case until timeout / driver
interaction that something handles the interrupt and reads the device
status register, which clears 'wait_for_driver_ack'.

That's not a theoretical case, that's what real world hardware devices
implement. If it does not apply to your device, that does not mean that
the problem does not exist.

For a regular device driver this is a non-problem once the locking fix
is applied. But for VFIO this _is_ an unfixable problem and the magic
repair hack can't fix it either.

I've pointed out a gazillion times before, that freeing an interrupt
without quiescing the device interrupt before that, is a patently bad
idea.

Unless VFIO/QEMU has some secret magic to handle that particular case,
the brute force workaround for this is to unconditionaly inject the
interrupt in QEMU after returning from the VFIO syscall. Maybe that's
the case today already, but I can't be bothered to stare at that
code. That's an exercise left for the virt folks. If that exists, then
this want's to be explained in the change log for completeness sake and
ideally in a comment in that VFIO function.

As the fix here is x86 specific, I looked at other architectures as
well. AFAICT on a quick skim, it seems (no guarantee though) none of
them is affected by the generic issue, but _all_ of them are affected by
the VFIO specific one.

Thanks,

        tglx

---
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -256,26 +256,46 @@ static __always_inline void handle_irq(s
 		__handle_irq(desc, regs);
 }
 
-static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
+static struct irq_desc *reevaluate_vector(int vector)
 {
-	struct irq_desc *desc;
-	int ret = 0;
+	struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
+
+	if (!IS_ERR_OR_NULL(desc))
+		return desc;
+
+	if (desc == VECTOR_UNUSED) {
+		pr_emerg_ratelimited("%d.%u No irq handler for vector\n",
+				     smp_processor_id(), vector);
+	} else {
+		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+	}
+	return NULL;
+}
+
+static __always_inline bool call_irq_handler(int vector, struct pt_regs *regs)
+{
+	struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
 
-	desc = __this_cpu_read(vector_irq[vector]);
 	if (likely(!IS_ERR_OR_NULL(desc))) {
 		handle_irq(desc, regs);
-	} else {
-		ret = -EINVAL;
-		if (desc == VECTOR_UNUSED) {
-			pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
-					     __func__, smp_processor_id(),
-					     vector);
-		} else {
-			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
-		}
+		return true;
 	}
 
-	return ret;
+	/*
+	 * Reevaluate with vector_lock held.
+	 *
+	 * FIXME: Add a big fat comment explaining the problem
+	 */
+	lock_vector_lock();
+	desc = reevaluate_vector(vector);
+	unlock_vector_lock();
+
+	if (!desc)
+		return false;
+
+	/* Using @desc is safe here as it is RCU protected */
+	handle_irq(desc, regs);
+	return true;
 }
 
 /*
@@ -289,7 +309,7 @@ DEFINE_IDTENTRY_IRQ(common_interrupt)
 	/* entry code tells RCU that we're not quiescent.  Check it. */
 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
 
-	if (unlikely(call_irq_handler(vector, regs)))
+	if (unlikely(!call_irq_handler(vector, regs)))
 		apic_eoi();
 
 	set_irq_regs(old_regs);

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] x86/irq: introduce repair_irq try to repair CPU vector
  2025-07-23  1:50 [PATCH] x86/irq: introduce repair_irq try to repair CPU vector Hogan Wang
  2025-07-23 15:08 ` Thomas Gleixner
@ 2025-07-23 16:33 ` kernel test robot
  1 sibling, 0 replies; 3+ messages in thread
From: kernel test robot @ 2025-07-23 16:33 UTC (permalink / raw)
  To: Hogan Wang, x86, dave.hansen, kvm, alex.williamson
  Cc: oe-kbuild-all, weidong.huang, yechuan, hogan.wang,
	wangxinxin.wang, jianjay.zhou, wangjie88

Hi Hogan,

kernel test robot noticed the following build errors:

[auto build test ERROR on tip/irq/core]
[also build test ERROR on tip/master tip/x86/core awilliam-vfio/next linus/master v6.16-rc7 next-20250723]
[cannot apply to tip/auto-latest awilliam-vfio/for-linus]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Hogan-Wang/x86-irq-introduce-repair_irq-try-to-repair-CPU-vector/20250723-095327
base:   tip/irq/core
patch link:    https://lore.kernel.org/r/20250723015045.1701-1-hogan.wang%40huawei.com
patch subject: [PATCH] x86/irq: introduce repair_irq try to repair CPU vector
config: openrisc-allnoconfig (https://download.01.org/0day-ci/archive/20250724/202507240030.11iG6frT-lkp@intel.com/config)
compiler: or1k-linux-gcc (GCC) 15.1.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250724/202507240030.11iG6frT-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202507240030.11iG6frT-lkp@intel.com/

All errors (new ones prefixed by >>):

   kernel/irq/manage.c: In function 'repair_irq':
>> kernel/irq/manage.c:1431:9: error: implicit declaration of function 'irq_domain_repair_irq'; did you mean 'irq_domain_free_irqs'? [-Wimplicit-function-declaration]
    1431 |         irq_domain_repair_irq(irq_desc_get_irq_data(desc));
         |         ^~~~~~~~~~~~~~~~~~~~~
         |         irq_domain_free_irqs


vim +1431 kernel/irq/manage.c

  1420	
  1421	
  1422	void repair_irq(unsigned int irq)
  1423	{
  1424		struct irq_desc *desc = irq_to_desc(irq);
  1425		unsigned long flags;
  1426	
  1427		mutex_lock(&desc->request_mutex);
  1428		chip_bus_lock(desc);
  1429		raw_spin_lock_irqsave(&desc->lock, flags);
  1430	
> 1431		irq_domain_repair_irq(irq_desc_get_irq_data(desc));
  1432	
  1433		raw_spin_unlock_irqrestore(&desc->lock, flags);
  1434		chip_bus_sync_unlock(desc);
  1435		mutex_unlock(&desc->request_mutex);
  1436	}
  1437	EXPORT_SYMBOL(repair_irq);
  1438	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-07-23 16:33 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-23  1:50 [PATCH] x86/irq: introduce repair_irq try to repair CPU vector Hogan Wang
2025-07-23 15:08 ` Thomas Gleixner
2025-07-23 16:33 ` kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).