* [PATCH v6 1/5] arm/irq: Keep track of irq affinities
2026-03-12 9:39 [PATCH v6 0/5] Implement CPU hotplug on Arm Mykyta Poturai
@ 2026-03-12 9:39 ` Mykyta Poturai
2026-03-18 8:05 ` Bertrand Marquis
2026-03-19 1:34 ` Volodymyr Babchuk
2026-03-12 9:39 ` [PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations Mykyta Poturai
` (3 subsequent siblings)
4 siblings, 2 replies; 15+ messages in thread
From: Mykyta Poturai @ 2026-03-12 9:39 UTC (permalink / raw)
To: xen-devel@lists.xenproject.org
Cc: Mykyta Poturai, Stefano Stabellini, Julien Grall,
Bertrand Marquis, Michal Orzel, Volodymyr Babchuk
Currently on Arm the desc->affinity mask of an irq is never updated,
which makes it hard to know the actual affinity of an interrupt.
Fix this by updating the field in irq_set_affinity.
Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
---
v5->v6:
* add missing locking around irq_set_affinity calls
v4->v5:
* add locking
v3->v4:
* patch introduced
---
xen/arch/arm/gic-vgic.c | 2 ++
xen/arch/arm/irq.c | 9 +++++++--
xen/arch/arm/vgic.c | 14 ++++++++++++--
xen/arch/arm/vgic/vgic-mmio-v2.c | 11 +++++------
xen/arch/arm/vgic/vgic.c | 15 ++++++++-------
5 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c
index ea48c5375a..5253caf002 100644
--- a/xen/arch/arm/gic-vgic.c
+++ b/xen/arch/arm/gic-vgic.c
@@ -232,7 +232,9 @@ static void gic_update_one_lr(struct vcpu *v, int i)
if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
{
struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
+ spin_lock(&p->desc->lock);
irq_set_affinity(p->desc, cpumask_of(v_target->processor));
+ spin_unlock(&p->desc->lock);
clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
}
}
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 73e58a5108..7204bc2b68 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -216,10 +216,15 @@ static inline struct domain *irq_get_domain(struct irq_desc *desc)
return irq_get_guest_info(desc)->d;
}
+/* Must be called with desc->lock held */
void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
- if ( desc != NULL )
- desc->handler->set_affinity(desc, mask);
+ if ( desc == NULL )
+ return;
+
+ ASSERT(spin_is_locked(&desc->lock));
+ cpumask_copy(desc->affinity, mask);
+ desc->handler->set_affinity(desc, mask);
}
int request_irq(unsigned int irq, unsigned int irqflags,
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 6647071ad4..c59f6873db 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -445,7 +445,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
if ( list_empty(&p->inflight) )
{
+ spin_lock(&p->desc->lock);
irq_set_affinity(p->desc, cpumask_of(new->processor));
+ spin_unlock(&p->desc->lock);
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
return true;
}
@@ -453,7 +455,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
if ( !list_empty(&p->lr_queue) )
{
vgic_remove_irq_from_queues(old, p);
+ spin_lock(&p->desc->lock);
irq_set_affinity(p->desc, cpumask_of(new->processor));
+ spin_unlock(&p->desc->lock);
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
vgic_inject_irq(new->domain, new, irq, true);
return true;
@@ -473,6 +477,7 @@ void arch_move_irqs(struct vcpu *v)
struct domain *d = v->domain;
struct pending_irq *p;
struct vcpu *v_target;
+ unsigned long flags;
int i;
/*
@@ -494,7 +499,13 @@ void arch_move_irqs(struct vcpu *v)
p = irq_to_pending(v_target, virq);
if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+ {
+ if ( !p->desc )
+ continue;
+ spin_lock_irqsave(&p->desc->lock, flags);
irq_set_affinity(p->desc, cpu_mask);
+ spin_unlock_irqrestore(&p->desc->lock, flags);
+ }
}
}
@@ -574,8 +585,8 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, unsigned int n)
spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
if ( p->desc != NULL )
{
- irq_set_affinity(p->desc, cpumask_of(v_target->processor));
spin_lock_irqsave(&p->desc->lock, flags);
+ irq_set_affinity(p->desc, cpumask_of(v_target->processor));
/*
* The irq cannot be a PPI, we only support delivery of SPIs
* to guests.
@@ -944,4 +955,3 @@ void vgic_check_inflight_irqs_pending(struct vcpu *v, unsigned int rank, uint32_
* indent-tabs-mode: nil
* End:
*/
-
diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
index b7c2d7ce99..fc04741ca1 100644
--- a/xen/arch/arm/vgic/vgic-mmio-v2.c
+++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
@@ -159,24 +159,23 @@ static void vgic_mmio_write_target(struct vcpu *vcpu,
for ( i = 0; i < len; i++ )
{
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, NULL, intid + i);
+ struct irq_desc *desc = irq_to_desc(irq->hwintid);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ spin_lock_irqsave(&desc->lock, flags);
+ spin_lock(&irq->irq_lock);
irq->targets = (val >> (i * 8)) & cpu_mask;
if ( irq->targets )
{
irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1];
if ( irq->hw )
- {
- struct irq_desc *desc = irq_to_desc(irq->hwintid);
-
irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
- }
}
else
irq->target_vcpu = NULL;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&desc->lock, flags);
vgic_put_irq(vcpu->domain, irq);
}
}
diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
index b2c0e1873a..81ba4099ef 100644
--- a/xen/arch/arm/vgic/vgic.c
+++ b/xen/arch/arm/vgic/vgic.c
@@ -812,21 +812,22 @@ void arch_move_irqs(struct vcpu *v)
{
struct vgic_irq *irq = vgic_get_irq(d, NULL, i + VGIC_NR_PRIVATE_IRQS);
unsigned long flags;
+ irq_desc_t *desc;
if ( !irq )
continue;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ desc = irq_to_desc(irq->hwintid);
- /* Only hardware mapped vIRQs that are targeting this vCPU. */
- if ( irq->hw && irq->target_vcpu == v)
- {
- irq_desc_t *desc = irq_to_desc(irq->hwintid);
+ spin_lock_irqsave(&desc->lock, flags);
+ spin_lock(&irq->irq_lock);
+ /* Only hardware mapped vIRQs that are targeting this vCPU. */
+ if ( irq->hw && irq->target_vcpu == v )
irq_set_affinity(desc, cpumask_of(v->processor));
- }
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&desc->lock, flags);
vgic_put_irq(d, irq);
}
}
--
2.51.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v6 1/5] arm/irq: Keep track of irq affinities
2026-03-12 9:39 ` [PATCH v6 1/5] arm/irq: Keep track of irq affinities Mykyta Poturai
@ 2026-03-18 8:05 ` Bertrand Marquis
2026-03-19 1:34 ` Volodymyr Babchuk
1 sibling, 0 replies; 15+ messages in thread
From: Bertrand Marquis @ 2026-03-18 8:05 UTC (permalink / raw)
To: Mykyta Poturai
Cc: xen-devel@lists.xenproject.org, Stefano Stabellini, Julien Grall,
Michal Orzel, Volodymyr Babchuk
Hi Mykyta,
> On 12 Mar 2026, at 10:39, Mykyta Poturai <Mykyta_Poturai@epam.com> wrote:
>
> Currently on Arm the desc->affinity mask of an irq is never updated,
> which makes it hard to know the actual affinity of an interrupt.
>
> Fix this by updating the field in irq_set_affinity.
The commit message here should also explain what has changed regarding
locks and irq affinity and justify why code is modified in other places (vgic.c,
vgic-mmio) to follow the new locking requirements for it.
>
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
>
With that fixed:
Reviewed-by: Bertrand Marquis <bertrand.marquis@arm.com>
Cheers
Bertrand
> ---
> v5->v6:
> * add missing locking around irq_set_affinity calls
>
> v4->v5:
> * add locking
>
> v3->v4:
> * patch introduced
> ---
> xen/arch/arm/gic-vgic.c | 2 ++
> xen/arch/arm/irq.c | 9 +++++++--
> xen/arch/arm/vgic.c | 14 ++++++++++++--
> xen/arch/arm/vgic/vgic-mmio-v2.c | 11 +++++------
> xen/arch/arm/vgic/vgic.c | 15 ++++++++-------
> 5 files changed, 34 insertions(+), 17 deletions(-)
>
> diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c
> index ea48c5375a..5253caf002 100644
> --- a/xen/arch/arm/gic-vgic.c
> +++ b/xen/arch/arm/gic-vgic.c
> @@ -232,7 +232,9 @@ static void gic_update_one_lr(struct vcpu *v, int i)
> if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
> {
> struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
> + spin_lock(&p->desc->lock);
> irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> + spin_unlock(&p->desc->lock);
> clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
> }
> }
> diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
> index 73e58a5108..7204bc2b68 100644
> --- a/xen/arch/arm/irq.c
> +++ b/xen/arch/arm/irq.c
> @@ -216,10 +216,15 @@ static inline struct domain *irq_get_domain(struct irq_desc *desc)
> return irq_get_guest_info(desc)->d;
> }
>
> +/* Must be called with desc->lock held */
> void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
> {
> - if ( desc != NULL )
> - desc->handler->set_affinity(desc, mask);
> + if ( desc == NULL )
> + return;
> +
> + ASSERT(spin_is_locked(&desc->lock));
> + cpumask_copy(desc->affinity, mask);
> + desc->handler->set_affinity(desc, mask);
> }
>
> int request_irq(unsigned int irq, unsigned int irqflags,
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 6647071ad4..c59f6873db 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -445,7 +445,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
>
> if ( list_empty(&p->inflight) )
> {
> + spin_lock(&p->desc->lock);
> irq_set_affinity(p->desc, cpumask_of(new->processor));
> + spin_unlock(&p->desc->lock);
> spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
> return true;
> }
> @@ -453,7 +455,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
> if ( !list_empty(&p->lr_queue) )
> {
> vgic_remove_irq_from_queues(old, p);
> + spin_lock(&p->desc->lock);
> irq_set_affinity(p->desc, cpumask_of(new->processor));
> + spin_unlock(&p->desc->lock);
> spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
> vgic_inject_irq(new->domain, new, irq, true);
> return true;
> @@ -473,6 +477,7 @@ void arch_move_irqs(struct vcpu *v)
> struct domain *d = v->domain;
> struct pending_irq *p;
> struct vcpu *v_target;
> + unsigned long flags;
> int i;
>
> /*
> @@ -494,7 +499,13 @@ void arch_move_irqs(struct vcpu *v)
> p = irq_to_pending(v_target, virq);
>
> if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
> + {
> + if ( !p->desc )
> + continue;
> + spin_lock_irqsave(&p->desc->lock, flags);
> irq_set_affinity(p->desc, cpu_mask);
> + spin_unlock_irqrestore(&p->desc->lock, flags);
> + }
> }
> }
>
> @@ -574,8 +585,8 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, unsigned int n)
> spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
> if ( p->desc != NULL )
> {
> - irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> spin_lock_irqsave(&p->desc->lock, flags);
> + irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> /*
> * The irq cannot be a PPI, we only support delivery of SPIs
> * to guests.
> @@ -944,4 +955,3 @@ void vgic_check_inflight_irqs_pending(struct vcpu *v, unsigned int rank, uint32_
> * indent-tabs-mode: nil
> * End:
> */
> -
> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
> index b7c2d7ce99..fc04741ca1 100644
> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c
> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
> @@ -159,24 +159,23 @@ static void vgic_mmio_write_target(struct vcpu *vcpu,
> for ( i = 0; i < len; i++ )
> {
> struct vgic_irq *irq = vgic_get_irq(vcpu->domain, NULL, intid + i);
> + struct irq_desc *desc = irq_to_desc(irq->hwintid);
>
> - spin_lock_irqsave(&irq->irq_lock, flags);
> + spin_lock_irqsave(&desc->lock, flags);
> + spin_lock(&irq->irq_lock);
>
> irq->targets = (val >> (i * 8)) & cpu_mask;
> if ( irq->targets )
> {
> irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1];
> if ( irq->hw )
> - {
> - struct irq_desc *desc = irq_to_desc(irq->hwintid);
> -
> irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
> - }
> }
> else
> irq->target_vcpu = NULL;
>
> - spin_unlock_irqrestore(&irq->irq_lock, flags);
> + spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&desc->lock, flags);
> vgic_put_irq(vcpu->domain, irq);
> }
> }
> diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
> index b2c0e1873a..81ba4099ef 100644
> --- a/xen/arch/arm/vgic/vgic.c
> +++ b/xen/arch/arm/vgic/vgic.c
> @@ -812,21 +812,22 @@ void arch_move_irqs(struct vcpu *v)
> {
> struct vgic_irq *irq = vgic_get_irq(d, NULL, i + VGIC_NR_PRIVATE_IRQS);
> unsigned long flags;
> + irq_desc_t *desc;
>
> if ( !irq )
> continue;
>
> - spin_lock_irqsave(&irq->irq_lock, flags);
> + desc = irq_to_desc(irq->hwintid);
>
> - /* Only hardware mapped vIRQs that are targeting this vCPU. */
> - if ( irq->hw && irq->target_vcpu == v)
> - {
> - irq_desc_t *desc = irq_to_desc(irq->hwintid);
> + spin_lock_irqsave(&desc->lock, flags);
> + spin_lock(&irq->irq_lock);
>
> + /* Only hardware mapped vIRQs that are targeting this vCPU. */
> + if ( irq->hw && irq->target_vcpu == v )
> irq_set_affinity(desc, cpumask_of(v->processor));
> - }
>
> - spin_unlock_irqrestore(&irq->irq_lock, flags);
> + spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&desc->lock, flags);
> vgic_put_irq(d, irq);
> }
> }
> --
> 2.51.2
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v6 1/5] arm/irq: Keep track of irq affinities
2026-03-12 9:39 ` [PATCH v6 1/5] arm/irq: Keep track of irq affinities Mykyta Poturai
2026-03-18 8:05 ` Bertrand Marquis
@ 2026-03-19 1:34 ` Volodymyr Babchuk
2026-03-19 1:39 ` Volodymyr Babchuk
1 sibling, 1 reply; 15+ messages in thread
From: Volodymyr Babchuk @ 2026-03-19 1:34 UTC (permalink / raw)
To: Mykyta Poturai
Cc: xen-devel@lists.xenproject.org, Stefano Stabellini, Julien Grall,
Bertrand Marquis, Michal Orzel
Hi Mykyta,
Mykyta Poturai <Mykyta_Poturai@epam.com> writes:
> Currently on Arm the desc->affinity mask of an irq is never updated,
> which makes it hard to know the actual affinity of an interrupt.
With this change you'll track affinity of hardware interrupts, but pure
virtual interrupts still will not be tracked. Is it intended behaviour?
I believe it should be mentioned in the commit message.
>
> Fix this by updating the field in irq_set_affinity.
>
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
>
> ---
> v5->v6:
> * add missing locking around irq_set_affinity calls
>
> v4->v5:
> * add locking
>
> v3->v4:
> * patch introduced
> ---
> xen/arch/arm/gic-vgic.c | 2 ++
> xen/arch/arm/irq.c | 9 +++++++--
> xen/arch/arm/vgic.c | 14 ++++++++++++--
> xen/arch/arm/vgic/vgic-mmio-v2.c | 11 +++++------
> xen/arch/arm/vgic/vgic.c | 15 ++++++++-------
> 5 files changed, 34 insertions(+), 17 deletions(-)
>
> diff --git a/xen/arch/arm/gic-vgic.c b/xen/arch/arm/gic-vgic.c
> index ea48c5375a..5253caf002 100644
> --- a/xen/arch/arm/gic-vgic.c
> +++ b/xen/arch/arm/gic-vgic.c
> @@ -232,7 +232,9 @@ static void gic_update_one_lr(struct vcpu *v, int i)
> if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
> {
> struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
> + spin_lock(&p->desc->lock);
> irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> + spin_unlock(&p->desc->lock);
> clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
> }
> }
> diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
> index 73e58a5108..7204bc2b68 100644
> --- a/xen/arch/arm/irq.c
> +++ b/xen/arch/arm/irq.c
> @@ -216,10 +216,15 @@ static inline struct domain *irq_get_domain(struct irq_desc *desc)
> return irq_get_guest_info(desc)->d;
> }
>
> +/* Must be called with desc->lock held */
> void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
> {
> - if ( desc != NULL )
> - desc->handler->set_affinity(desc, mask);
> + if ( desc == NULL )
> + return;
> +
> + ASSERT(spin_is_locked(&desc->lock));
> + cpumask_copy(desc->affinity, mask);
> + desc->handler->set_affinity(desc, mask);
> }
>
> int request_irq(unsigned int irq, unsigned int irqflags,
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 6647071ad4..c59f6873db 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -445,7 +445,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
>
> if ( list_empty(&p->inflight) )
> {
> + spin_lock(&p->desc->lock);
> irq_set_affinity(p->desc, cpumask_of(new->processor));
> + spin_unlock(&p->desc->lock);
> spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
> return true;
> }
> @@ -453,7 +455,9 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
> if ( !list_empty(&p->lr_queue) )
> {
> vgic_remove_irq_from_queues(old, p);
> + spin_lock(&p->desc->lock);
> irq_set_affinity(p->desc, cpumask_of(new->processor));
> + spin_unlock(&p->desc->lock);
> spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
> vgic_inject_irq(new->domain, new, irq, true);
> return true;
> @@ -473,6 +477,7 @@ void arch_move_irqs(struct vcpu *v)
> struct domain *d = v->domain;
> struct pending_irq *p;
> struct vcpu *v_target;
> + unsigned long flags;
> int i;
>
> /*
> @@ -494,7 +499,13 @@ void arch_move_irqs(struct vcpu *v)
> p = irq_to_pending(v_target, virq);
>
> if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
> + {
> + if ( !p->desc )
> + continue;
> + spin_lock_irqsave(&p->desc->lock, flags);
> irq_set_affinity(p->desc, cpu_mask);
> + spin_unlock_irqrestore(&p->desc->lock, flags);
> + }
> }
> }
>
> @@ -574,8 +585,8 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, unsigned int n)
> spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
> if ( p->desc != NULL )
> {
> - irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> spin_lock_irqsave(&p->desc->lock, flags);
> + irq_set_affinity(p->desc, cpumask_of(v_target->processor));
> /*
> * The irq cannot be a PPI, we only support delivery of SPIs
> * to guests.
> @@ -944,4 +955,3 @@ void vgic_check_inflight_irqs_pending(struct vcpu *v, unsigned int rank, uint32_
> * indent-tabs-mode: nil
> * End:
> */
> -
> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c
> index b7c2d7ce99..fc04741ca1 100644
> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c
> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c
> @@ -159,24 +159,23 @@ static void vgic_mmio_write_target(struct vcpu *vcpu,
> for ( i = 0; i < len; i++ )
> {
> struct vgic_irq *irq = vgic_get_irq(vcpu->domain, NULL, intid + i);
> + struct irq_desc *desc = irq_to_desc(irq->hwintid);
>
> - spin_lock_irqsave(&irq->irq_lock, flags);
> + spin_lock_irqsave(&desc->lock, flags);
> + spin_lock(&irq->irq_lock);
>
> irq->targets = (val >> (i * 8)) & cpu_mask;
> if ( irq->targets )
> {
> irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1];
> if ( irq->hw )
> - {
> - struct irq_desc *desc = irq_to_desc(irq->hwintid);
> -
> irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
> - }
> }
> else
> irq->target_vcpu = NULL;
>
> - spin_unlock_irqrestore(&irq->irq_lock, flags);
> + spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&desc->lock, flags);
> vgic_put_irq(vcpu->domain, irq);
> }
> }
> diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c
> index b2c0e1873a..81ba4099ef 100644
> --- a/xen/arch/arm/vgic/vgic.c
> +++ b/xen/arch/arm/vgic/vgic.c
> @@ -812,21 +812,22 @@ void arch_move_irqs(struct vcpu *v)
> {
> struct vgic_irq *irq = vgic_get_irq(d, NULL, i + VGIC_NR_PRIVATE_IRQS);
> unsigned long flags;
> + irq_desc_t *desc;
>
> if ( !irq )
> continue;
>
> - spin_lock_irqsave(&irq->irq_lock, flags);
> + desc = irq_to_desc(irq->hwintid);
>
> - /* Only hardware mapped vIRQs that are targeting this vCPU. */
> - if ( irq->hw && irq->target_vcpu == v)
> - {
> - irq_desc_t *desc = irq_to_desc(irq->hwintid);
> + spin_lock_irqsave(&desc->lock, flags);
> + spin_lock(&irq->irq_lock);
>
> + /* Only hardware mapped vIRQs that are targeting this vCPU. */
> + if ( irq->hw && irq->target_vcpu == v )
> irq_set_affinity(desc, cpumask_of(v->processor));
> - }
>
> - spin_unlock_irqrestore(&irq->irq_lock, flags);
> + spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&desc->lock, flags);
> vgic_put_irq(d, irq);
> }
> }
--
WBR, Volodymyr
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v6 1/5] arm/irq: Keep track of irq affinities
2026-03-19 1:34 ` Volodymyr Babchuk
@ 2026-03-19 1:39 ` Volodymyr Babchuk
0 siblings, 0 replies; 15+ messages in thread
From: Volodymyr Babchuk @ 2026-03-19 1:39 UTC (permalink / raw)
To: Mykyta Poturai
Cc: xen-devel@lists.xenproject.org, Stefano Stabellini, Julien Grall,
Bertrand Marquis, Michal Orzel
Volodymyr Babchuk <volodymyr_babchuk@epam.com> writes:
> Hi Mykyta,
>
> Mykyta Poturai <Mykyta_Poturai@epam.com> writes:
>
>> Currently on Arm the desc->affinity mask of an irq is never updated,
>> which makes it hard to know the actual affinity of an interrupt.
>
> With this change you'll track affinity of hardware interrupts, but pure
> virtual interrupts still will not be tracked. Is it intended behaviour?
> I believe it should be mentioned in the commit message.
Well, that was blunder :( please disregard this.
--
WBR, Volodymyr
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations
2026-03-12 9:39 [PATCH v6 0/5] Implement CPU hotplug on Arm Mykyta Poturai
2026-03-12 9:39 ` [PATCH v6 1/5] arm/irq: Keep track of irq affinities Mykyta Poturai
@ 2026-03-12 9:39 ` Mykyta Poturai
2026-03-18 8:12 ` Bertrand Marquis
2026-03-12 9:39 ` [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops Mykyta Poturai
` (2 subsequent siblings)
4 siblings, 1 reply; 15+ messages in thread
From: Mykyta Poturai @ 2026-03-12 9:39 UTC (permalink / raw)
To: xen-devel@lists.xenproject.org
Cc: Mykyta Poturai, Stefano Stabellini, Julien Grall,
Bertrand Marquis, Michal Orzel, Volodymyr Babchuk
Move IRQs from dying CPU to the online ones when a CPU is getting
offlined. When onlining, rebalance all IRQs in a round-robin fashion.
Guest-bound IRQs are already handled by scheduler in the process of
moving vCPUs to active pCPUs, so we only need to handle IRQs used by Xen
itself.
Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
---
v5->v6:
* don't do any balancing on boot
* only do balancing when cpu hotplug is enabled
v4->v5:
* handle CPU onlining as well
* more comments
* fix crash when ESPI is disabled
* don't assume CPU 0 is a boot CPU
* use insigned int for irq number
* remove assumption that all irqs a bound to CPU 0 by default from the
commit message
v3->v4:
* patch introduced
---
xen/arch/arm/include/asm/irq.h | 4 +++
xen/arch/arm/irq.c | 60 ++++++++++++++++++++++++++++++++++
xen/arch/arm/smpboot.c | 8 +++++
3 files changed, 72 insertions(+)
diff --git a/xen/arch/arm/include/asm/irq.h b/xen/arch/arm/include/asm/irq.h
index 09788dbfeb..a3897ec62d 100644
--- a/xen/arch/arm/include/asm/irq.h
+++ b/xen/arch/arm/include/asm/irq.h
@@ -126,6 +126,10 @@ bool irq_type_set_by_domain(const struct domain *d);
void irq_end_none(struct irq_desc *irq);
#define irq_end_none irq_end_none
+#ifdef CONFIG_CPU_HOTPLUG
+void rebalance_irqs(unsigned int from, bool up);
+#endif
+
#endif /* _ASM_HW_IRQ_H */
/*
* Local variables:
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 7204bc2b68..d428d3118b 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -158,6 +158,60 @@ static int init_local_irq_data(unsigned int cpu)
return 0;
}
+#ifdef CONFIG_CPU_HOTPLUG
+static int cpu_next;
+
+static void balance_irq(int irq, unsigned int from, bool up)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+
+ ASSERT(!cpumask_empty(&cpu_online_map));
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if ( likely(!desc->action) )
+ goto out;
+
+ if ( likely(test_bit(_IRQ_GUEST, &desc->status) ||
+ test_bit(_IRQ_MOVE_PENDING, &desc->status)) )
+ goto out;
+
+ /*
+ * Setting affinity to a mask of multiple CPUs causes the GIC drivers to
+ * select one CPU from that mask. If the dying CPU was included in the IRQ's
+ * affinity mask, we cannot determine exactly which CPU the interrupt is
+ * currently routed to, as GIC drivers lack a concrete get_affinity API. So
+ * to be safe we must reroute it to a new, definitely online, CPU. In the
+ * case of CPU going down, we move only the interrupt that could reside on
+ * it. Otherwise, we rearrange all interrupts in a round-robin fashion.
+ */
+ if ( !up && !cpumask_test_cpu(from, desc->affinity) )
+ goto out;
+
+ cpu_next = cpumask_cycle(cpu_next, &cpu_online_map);
+ irq_set_affinity(desc, cpumask_of(cpu_next));
+
+out:
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+void rebalance_irqs(unsigned int from, bool up)
+{
+ int irq;
+
+ if ( cpumask_empty(&cpu_online_map) )
+ return;
+
+ for ( irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++ )
+ balance_irq(irq, from, up);
+
+#ifdef CONFIG_GICV3_ESPI
+ for ( irq = ESPI_BASE_INTID; irq < ESPI_MAX_INTID; irq++ )
+ balance_irq(irq, from, up);
+#endif
+}
+#endif /* CONFIG_CPU_HOTPLUG */
+
static int cpu_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
@@ -172,6 +226,12 @@ static int cpu_callback(struct notifier_block *nfb, unsigned long action,
printk(XENLOG_ERR "Unable to allocate local IRQ for CPU%u\n",
cpu);
break;
+ case CPU_ONLINE:
+#ifdef CONFIG_CPU_HOTPLUG
+ if ( system_state >= SYS_STATE_active )
+ rebalance_irqs(cpu, true);
+#endif
+ break;
}
return notifier_from_errno(rc);
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index 7f3cfa812e..f17e88e678 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -425,6 +425,14 @@ void __cpu_disable(void)
smp_mb();
+ /*
+ * Now that the interrupts are cleared and the CPU marked as offline,
+ * move interrupts out of it
+ */
+#ifdef CONFIG_CPU_HOTPLUG
+ rebalance_irqs(cpu, false);
+#endif
+
/* Return to caller; eventually the IPI mechanism will unwind and the
* scheduler will drop to the idle loop, which will call stop_cpu(). */
}
--
2.51.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations
2026-03-12 9:39 ` [PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations Mykyta Poturai
@ 2026-03-18 8:12 ` Bertrand Marquis
0 siblings, 0 replies; 15+ messages in thread
From: Bertrand Marquis @ 2026-03-18 8:12 UTC (permalink / raw)
To: Mykyta Poturai
Cc: xen-devel@lists.xenproject.org, Stefano Stabellini, Julien Grall,
Michal Orzel, Volodymyr Babchuk
Hi Mykyta,
> On 12 Mar 2026, at 10:39, Mykyta Poturai <Mykyta_Poturai@epam.com> wrote:
>
> Move IRQs from dying CPU to the online ones when a CPU is getting
> offlined. When onlining, rebalance all IRQs in a round-robin fashion.
> Guest-bound IRQs are already handled by scheduler in the process of
> moving vCPUs to active pCPUs, so we only need to handle IRQs used by Xen
> itself.
>
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
> ---
> v5->v6:
> * don't do any balancing on boot
> * only do balancing when cpu hotplug is enabled
>
> v4->v5:
> * handle CPU onlining as well
> * more comments
> * fix crash when ESPI is disabled
> * don't assume CPU 0 is a boot CPU
> * use insigned int for irq number
> * remove assumption that all irqs a bound to CPU 0 by default from the
> commit message
>
> v3->v4:
> * patch introduced
> ---
> xen/arch/arm/include/asm/irq.h | 4 +++
> xen/arch/arm/irq.c | 60 ++++++++++++++++++++++++++++++++++
> xen/arch/arm/smpboot.c | 8 +++++
> 3 files changed, 72 insertions(+)
>
> diff --git a/xen/arch/arm/include/asm/irq.h b/xen/arch/arm/include/asm/irq.h
> index 09788dbfeb..a3897ec62d 100644
> --- a/xen/arch/arm/include/asm/irq.h
> +++ b/xen/arch/arm/include/asm/irq.h
> @@ -126,6 +126,10 @@ bool irq_type_set_by_domain(const struct domain *d);
> void irq_end_none(struct irq_desc *irq);
> #define irq_end_none irq_end_none
>
> +#ifdef CONFIG_CPU_HOTPLUG
> +void rebalance_irqs(unsigned int from, bool up);
> +#endif
Could you make here something like:
#else
static inline void rebalance_irqs(unsigned int from, bool up) {}
#endif
so that ...
> +
> #endif /* _ASM_HW_IRQ_H */
> /*
> * Local variables:
> diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
> index 7204bc2b68..d428d3118b 100644
> --- a/xen/arch/arm/irq.c
> +++ b/xen/arch/arm/irq.c
> @@ -158,6 +158,60 @@ static int init_local_irq_data(unsigned int cpu)
> return 0;
> }
>
> +#ifdef CONFIG_CPU_HOTPLUG
> +static int cpu_next;
> +
> +static void balance_irq(int irq, unsigned int from, bool up)
> +{
> + struct irq_desc *desc = irq_to_desc(irq);
> + unsigned long flags;
> +
> + ASSERT(!cpumask_empty(&cpu_online_map));
> +
> + spin_lock_irqsave(&desc->lock, flags);
> + if ( likely(!desc->action) )
> + goto out;
> +
> + if ( likely(test_bit(_IRQ_GUEST, &desc->status) ||
> + test_bit(_IRQ_MOVE_PENDING, &desc->status)) )
> + goto out;
> +
> + /*
> + * Setting affinity to a mask of multiple CPUs causes the GIC drivers to
> + * select one CPU from that mask. If the dying CPU was included in the IRQ's
> + * affinity mask, we cannot determine exactly which CPU the interrupt is
> + * currently routed to, as GIC drivers lack a concrete get_affinity API. So
> + * to be safe we must reroute it to a new, definitely online, CPU. In the
> + * case of CPU going down, we move only the interrupt that could reside on
> + * it. Otherwise, we rearrange all interrupts in a round-robin fashion.
> + */
> + if ( !up && !cpumask_test_cpu(from, desc->affinity) )
> + goto out;
> +
> + cpu_next = cpumask_cycle(cpu_next, &cpu_online_map);
> + irq_set_affinity(desc, cpumask_of(cpu_next));
> +
> +out:
> + spin_unlock_irqrestore(&desc->lock, flags);
> +}
> +
> +void rebalance_irqs(unsigned int from, bool up)
> +{
> + int irq;
> +
> + if ( cpumask_empty(&cpu_online_map) )
> + return;
> +
> + for ( irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++ )
> + balance_irq(irq, from, up);
> +
> +#ifdef CONFIG_GICV3_ESPI
> + for ( irq = ESPI_BASE_INTID; irq < ESPI_MAX_INTID; irq++ )
> + balance_irq(irq, from, up);
> +#endif
> +}
> +#endif /* CONFIG_CPU_HOTPLUG */
> +
> static int cpu_callback(struct notifier_block *nfb, unsigned long action,
> void *hcpu)
> {
> @@ -172,6 +226,12 @@ static int cpu_callback(struct notifier_block *nfb, unsigned long action,
> printk(XENLOG_ERR "Unable to allocate local IRQ for CPU%u\n",
> cpu);
> break;
> + case CPU_ONLINE:
> +#ifdef CONFIG_CPU_HOTPLUG
> + if ( system_state >= SYS_STATE_active )
> + rebalance_irqs(cpu, true);
> +#endif
> + break;
This ifdef could be switched to if IS_ENABLED
> }
>
> return notifier_from_errno(rc);
> diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> index 7f3cfa812e..f17e88e678 100644
> --- a/xen/arch/arm/smpboot.c
> +++ b/xen/arch/arm/smpboot.c
> @@ -425,6 +425,14 @@ void __cpu_disable(void)
>
> smp_mb();
>
> + /*
> + * Now that the interrupts are cleared and the CPU marked as offline,
> + * move interrupts out of it
> + */
> +#ifdef CONFIG_CPU_HOTPLUG
> + rebalance_irqs(cpu, false);
> +#endif
and this one to.
Doing it without the static inline will end up in an error and i think it is clearer
to have IS_ENABLED here so that it is clear from the code that nothing is done
if the config is not enabled.
But happy to remove the IS_ENABLED part if other think differently.
Cheers
Bertrand
> +
> /* Return to caller; eventually the IPI mechanism will unwind and the
> * scheduler will drop to the idle loop, which will call stop_cpu(). */
> }
> --
> 2.51.2
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops
2026-03-12 9:39 [PATCH v6 0/5] Implement CPU hotplug on Arm Mykyta Poturai
2026-03-12 9:39 ` [PATCH v6 1/5] arm/irq: Keep track of irq affinities Mykyta Poturai
2026-03-12 9:39 ` [PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations Mykyta Poturai
@ 2026-03-12 9:39 ` Mykyta Poturai
2026-03-18 8:23 ` Bertrand Marquis
2026-03-23 11:09 ` Jan Beulich
2026-03-12 9:39 ` [PATCH v6 4/5] tools: Allow building xen-hptool without CONFIG_MIGRATE Mykyta Poturai
2026-03-12 9:39 ` [PATCH v6 5/5] docs: Document CPU hotplug Mykyta Poturai
4 siblings, 2 replies; 15+ messages in thread
From: Mykyta Poturai @ 2026-03-12 9:39 UTC (permalink / raw)
To: xen-devel@lists.xenproject.org
Cc: Mykyta Poturai, Stefano Stabellini, Julien Grall,
Bertrand Marquis, Michal Orzel, Volodymyr Babchuk, Andrew Cooper,
Anthony PERARD, Jan Beulich, Roger Pau Monné,
Timothy Pearson, Alistair Francis, Connor Davis, Oleksii Kurochko,
Daniel P. Smith
Move XEN_SYSCTL_CPU_HOTPLUG_{ONLINE,OFFLINE} handlers to common code to
allow for enabling/disabling CPU cores in runtime on Arm64.
SMT-disable enforcement check is moved into a separate
architecture-specific function.
For now this operations only support Arm64. For proper Arm32 support,
there needs to be a mechanism to free per-cpu page tables, allocated in
init_domheap_mappings. Also, hotplug is not supported if ITS, FFA, or
TEE is enabled, as they use non-static IRQ actions.
Create a Kconfig option CPU_HOTPLUG that reflects this
constraints. On X86 the option is enabled unconditionally.
As cpu hotplug now has its own config option, switch flask to allow
XEN_SYSCTL_cpu_hotplug depending on CONFIG_CPU_HOTPLUG, so it can work
not only on x86.
Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
---
v5->v6:
* fix style issues
* rename arch_smt_cpu_disable -> arch_cpu_can_stay_online and invert the
logic
* use IS_ENABLED istead of ifdef
* remove explicit list af arch-specific SYSCTL_CPU_HOTPLUG_* options
from the common handler
* fix flask issue
v4->v5:
* move handling to common code
* rename config to CPU_HOTPUG
* merge with "smp: Move cpu_up/down helpers to common code"
v3->v4:
* don't reimplement cpu_up/down helpers
* add Kconfig option
* fixup formatting
v2->v3:
* no changes
v1->v2:
* remove SMT ops
* remove cpu == 0 checks
* add XSM hooks
* only implement for 64bit Arm
---
xen/arch/arm/smp.c | 9 ++++++
xen/arch/ppc/stubs.c | 4 +++
xen/arch/riscv/stubs.c | 5 ++++
xen/arch/x86/include/asm/smp.h | 3 --
xen/arch/x86/platform_hypercall.c | 12 ++++++++
xen/arch/x86/smp.c | 33 ++--------------------
xen/arch/x86/sysctl.c | 21 ++++++++------
xen/common/Kconfig | 6 ++++
xen/common/smp.c | 35 +++++++++++++++++++++++
xen/common/sysctl.c | 46 +++++++++++++++++++++++++++++++
xen/include/xen/smp.h | 4 +++
xen/xsm/flask/hooks.c | 2 +-
12 files changed, 137 insertions(+), 43 deletions(-)
diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c
index b372472188..984f863a9a 100644
--- a/xen/arch/arm/smp.c
+++ b/xen/arch/arm/smp.c
@@ -44,6 +44,15 @@ void smp_send_call_function_mask(const cpumask_t *mask)
}
}
+/*
+ * We currently don't support SMT on ARM so we don't need any special logic for
+ * CPU disabling
+ */
+bool arch_cpu_can_stay_online(unsigned int cpu)
+{
+ return true;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/ppc/stubs.c b/xen/arch/ppc/stubs.c
index a333f06119..8f280ba080 100644
--- a/xen/arch/ppc/stubs.c
+++ b/xen/arch/ppc/stubs.c
@@ -101,6 +101,10 @@ void smp_send_call_function_mask(const cpumask_t *mask)
BUG_ON("unimplemented");
}
+bool arch_cpu_can_stay_online(unsigned int cpu)
+{
+ BUG_ON("unimplemented");
+}
/* irq.c */
void irq_ack_none(struct irq_desc *desc)
diff --git a/xen/arch/riscv/stubs.c b/xen/arch/riscv/stubs.c
index daadff0138..7c3cda7bc5 100644
--- a/xen/arch/riscv/stubs.c
+++ b/xen/arch/riscv/stubs.c
@@ -70,6 +70,11 @@ void smp_send_call_function_mask(const cpumask_t *mask)
BUG_ON("unimplemented");
}
+bool arch_cpu_can_stay_online(unsigned int cpu)
+{
+ BUG_ON("unimplemented");
+}
+
/* irq.c */
void irq_ack_none(struct irq_desc *desc)
diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h
index 3f16e62696..cb3e0fed19 100644
--- a/xen/arch/x86/include/asm/smp.h
+++ b/xen/arch/x86/include/asm/smp.h
@@ -50,9 +50,6 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm);
void __stop_this_cpu(void);
-long cf_check cpu_up_helper(void *data);
-long cf_check cpu_down_helper(void *data);
-
long cf_check core_parking_helper(void *data);
bool core_parking_remove(unsigned int cpu);
uint32_t get_cur_idle_nums(void);
diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c
index cd4f0ae5e5..e745151790 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -735,6 +735,12 @@ ret_t do_platform_op(
{
int cpu = op->u.cpu_ol.cpuid;
+ if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
+ {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
ret = xsm_resource_plug_core(XSM_HOOK);
if ( ret )
break;
@@ -761,6 +767,12 @@ ret_t do_platform_op(
{
int cpu = op->u.cpu_ol.cpuid;
+ if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
+ {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
ret = xsm_resource_unplug_core(XSM_HOOK);
if ( ret )
break;
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index 7936294f5f..b781e933f2 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -418,35 +418,8 @@ void cf_check call_function_interrupt(void)
smp_call_function_interrupt();
}
-long cf_check cpu_up_helper(void *data)
+bool arch_cpu_can_stay_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)data;
- int ret = cpu_up(cpu);
-
- /* Have one more go on EBUSY. */
- if ( ret == -EBUSY )
- ret = cpu_up(cpu);
-
- if ( !ret && !opt_smt &&
- cpu_data[cpu].compute_unit_id == INVALID_CUID &&
- cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 )
- {
- ret = cpu_down_helper(data);
- if ( ret )
- printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
- else
- ret = -EPERM;
- }
-
- return ret;
-}
-
-long cf_check cpu_down_helper(void *data)
-{
- int cpu = (unsigned long)data;
- int ret = cpu_down(cpu);
- /* Have one more go on EBUSY. */
- if ( ret == -EBUSY )
- ret = cpu_down(cpu);
- return ret;
+ return opt_smt || cpu_data[cpu].compute_unit_id != INVALID_CUID ||
+ cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) <= 1;
}
diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
index 1b04947516..35239b73c1 100644
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -49,6 +49,7 @@ static void cf_check l3_cache_get(void *arg)
static long cf_check smt_up_down_helper(void *data)
{
+ #ifdef CONFIG_CPU_HOTPLUG
bool up = (bool)data;
unsigned int cpu, sibling_mask = boot_cpu_data.x86_num_siblings - 1;
int ret = 0;
@@ -89,6 +90,8 @@ static long cf_check smt_up_down_helper(void *data)
up ? "enabled" : "disabled", CPUMASK_PR(&cpu_online_map));
return ret;
+ #endif /* CONFIG_CPU_HOTPLUG */
+ return 0;
}
void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
@@ -115,24 +118,24 @@ long arch_do_sysctl(
case XEN_SYSCTL_cpu_hotplug:
{
- unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
unsigned int op = sysctl->u.cpu_hotplug.op;
bool plug;
long (*fn)(void *data);
void *hcpu;
- switch ( op )
+ if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
{
- case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
- plug = true;
- fn = cpu_up_helper;
- hcpu = _p(cpu);
+ ret = -EOPNOTSUPP;
break;
+ }
+ switch ( op )
+ {
+ case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
- plug = false;
- fn = cpu_down_helper;
- hcpu = _p(cpu);
+ /* Handled by common code */
+ ASSERT_UNREACHABLE();
+ ret = -EOPNOTSUPP;
break;
case XEN_SYSCTL_CPU_HOTPLUG_SMT_ENABLE:
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index d7e79e752a..bb73990355 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -637,6 +637,12 @@ config SYSTEM_SUSPEND
If unsure, say N.
+config CPU_HOTPLUG
+ bool "Enable CPU hotplug"
+ depends on (X86 || ARM_64) && !FFA && !TEE && !HAS_ITS
+ default y
+
+
menu "Supported hypercall interfaces"
visible if EXPERT
diff --git a/xen/common/smp.c b/xen/common/smp.c
index a011f541f1..e2bf82856e 100644
--- a/xen/common/smp.c
+++ b/xen/common/smp.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <xen/cpu.h>
#include <asm/hardirq.h>
#include <asm/processor.h>
#include <xen/spinlock.h>
@@ -104,6 +105,40 @@ void smp_call_function_interrupt(void)
irq_exit();
}
+#ifdef CONFIG_CPU_HOTPLUG
+long cf_check cpu_up_helper(void *data)
+{
+ unsigned int cpu = (unsigned long)data;
+ int ret = cpu_up(cpu);
+
+ /* Have one more go on EBUSY. */
+ if ( ret == -EBUSY )
+ ret = cpu_up(cpu);
+
+ if ( !ret && !arch_cpu_can_stay_online(cpu) )
+ {
+ ret = cpu_down_helper(data);
+ if ( ret )
+ printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
+ else
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+long cf_check cpu_down_helper(void *data)
+{
+ unsigned int cpu = (unsigned long)data;
+ int ret = cpu_down(cpu);
+
+ /* Have one more go on EBUSY. */
+ if ( ret == -EBUSY )
+ ret = cpu_down(cpu);
+ return ret;
+}
+#endif /* CONFIG_CPU_HOTPLUG */
+
/*
* Local variables:
* mode: C
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 5207664252..51a3dd699a 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -483,6 +483,52 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
copyback = 1;
break;
+ case XEN_SYSCTL_cpu_hotplug:
+ {
+ unsigned int cpu = op->u.cpu_hotplug.cpu;
+ unsigned int hp_op = op->u.cpu_hotplug.op;
+ bool plug;
+ long (*fn)(void *data);
+ void *hcpu;
+
+ ret = -EOPNOTSUPP;
+ if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
+ break;
+
+ switch ( hp_op )
+ {
+ case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
+ plug = true;
+ fn = cpu_up_helper;
+ hcpu = _p(cpu);
+ break;
+
+ case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
+ plug = false;
+ fn = cpu_down_helper;
+ hcpu = _p(cpu);
+ break;
+
+ default:
+ fn = NULL;
+ break;
+ }
+
+ if ( fn )
+ {
+ ret = plug ? xsm_resource_plug_core(XSM_HOOK)
+ : xsm_resource_unplug_core(XSM_HOOK);
+
+ if ( !ret )
+ ret = continue_hypercall_on_cpu(0, fn, hcpu);
+
+ break;
+ }
+
+ /* Use the arch handler for cases not handled here */
+ fallthrough;
+ }
+
default:
ret = arch_do_sysctl(op, u_sysctl);
copyback = 0;
diff --git a/xen/include/xen/smp.h b/xen/include/xen/smp.h
index 2ca9ff1bfc..04530738c9 100644
--- a/xen/include/xen/smp.h
+++ b/xen/include/xen/smp.h
@@ -76,4 +76,8 @@ extern void *stack_base[NR_CPUS];
void initialize_cpu_data(unsigned int cpu);
int setup_cpu_root_pgt(unsigned int cpu);
+bool arch_cpu_can_stay_online(unsigned int cpu);
+long cf_check cpu_up_helper(void *data);
+long cf_check cpu_down_helper(void *data);
+
#endif /* __XEN_SMP_H__ */
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index b250b27065..36d357cae8 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -835,7 +835,7 @@ static int cf_check flask_sysctl(int cmd)
case XEN_SYSCTL_getdomaininfolist:
case XEN_SYSCTL_page_offline_op:
case XEN_SYSCTL_scheduler_op:
-#ifdef CONFIG_X86
+#ifdef CONFIG_CPU_HOTPLUG
case XEN_SYSCTL_cpu_hotplug:
#endif
return 0;
--
2.51.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops
2026-03-12 9:39 ` [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops Mykyta Poturai
@ 2026-03-18 8:23 ` Bertrand Marquis
2026-03-23 11:09 ` Jan Beulich
1 sibling, 0 replies; 15+ messages in thread
From: Bertrand Marquis @ 2026-03-18 8:23 UTC (permalink / raw)
To: Mykyta Poturai
Cc: xen-devel@lists.xenproject.org, Stefano Stabellini, Julien Grall,
Michal Orzel, Volodymyr Babchuk, Andrew Cooper, Anthony PERARD,
Jan Beulich, Roger Pau Monné, Timothy Pearson,
Alistair Francis, Connor Davis, Oleksii Kurochko, Daniel P. Smith
Hi Mykyta,
> On 12 Mar 2026, at 10:39, Mykyta Poturai <Mykyta_Poturai@epam.com> wrote:
>
> Move XEN_SYSCTL_CPU_HOTPLUG_{ONLINE,OFFLINE} handlers to common code to
> allow for enabling/disabling CPU cores in runtime on Arm64.
>
> SMT-disable enforcement check is moved into a separate
> architecture-specific function.
>
> For now this operations only support Arm64. For proper Arm32 support,
> there needs to be a mechanism to free per-cpu page tables, allocated in
> init_domheap_mappings. Also, hotplug is not supported if ITS, FFA, or
> TEE is enabled, as they use non-static IRQ actions.
>
> Create a Kconfig option CPU_HOTPLUG that reflects this
> constraints. On X86 the option is enabled unconditionally.
>
> As cpu hotplug now has its own config option, switch flask to allow
> XEN_SYSCTL_cpu_hotplug depending on CONFIG_CPU_HOTPLUG, so it can work
> not only on x86.
>
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
>
> ---
>
> v5->v6:
> * fix style issues
> * rename arch_smt_cpu_disable -> arch_cpu_can_stay_online and invert the
> logic
> * use IS_ENABLED istead of ifdef
> * remove explicit list af arch-specific SYSCTL_CPU_HOTPLUG_* options
> from the common handler
> * fix flask issue
>
> v4->v5:
> * move handling to common code
> * rename config to CPU_HOTPUG
> * merge with "smp: Move cpu_up/down helpers to common code"
>
> v3->v4:
> * don't reimplement cpu_up/down helpers
> * add Kconfig option
> * fixup formatting
>
> v2->v3:
> * no changes
>
> v1->v2:
> * remove SMT ops
> * remove cpu == 0 checks
> * add XSM hooks
> * only implement for 64bit Arm
> ---
> xen/arch/arm/smp.c | 9 ++++++
> xen/arch/ppc/stubs.c | 4 +++
> xen/arch/riscv/stubs.c | 5 ++++
> xen/arch/x86/include/asm/smp.h | 3 --
> xen/arch/x86/platform_hypercall.c | 12 ++++++++
> xen/arch/x86/smp.c | 33 ++--------------------
> xen/arch/x86/sysctl.c | 21 ++++++++------
> xen/common/Kconfig | 6 ++++
> xen/common/smp.c | 35 +++++++++++++++++++++++
> xen/common/sysctl.c | 46 +++++++++++++++++++++++++++++++
> xen/include/xen/smp.h | 4 +++
> xen/xsm/flask/hooks.c | 2 +-
> 12 files changed, 137 insertions(+), 43 deletions(-)
>
> diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c
> index b372472188..984f863a9a 100644
> --- a/xen/arch/arm/smp.c
> +++ b/xen/arch/arm/smp.c
> @@ -44,6 +44,15 @@ void smp_send_call_function_mask(const cpumask_t *mask)
> }
> }
>
> +/*
> + * We currently don't support SMT on ARM so we don't need any special logic for
> + * CPU disabling
> + */
> +bool arch_cpu_can_stay_online(unsigned int cpu)
> +{
> + return true;
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/arch/ppc/stubs.c b/xen/arch/ppc/stubs.c
> index a333f06119..8f280ba080 100644
> --- a/xen/arch/ppc/stubs.c
> +++ b/xen/arch/ppc/stubs.c
> @@ -101,6 +101,10 @@ void smp_send_call_function_mask(const cpumask_t *mask)
> BUG_ON("unimplemented");
> }
>
> +bool arch_cpu_can_stay_online(unsigned int cpu)
> +{
> + BUG_ON("unimplemented");
> +}
> /* irq.c */
>
> void irq_ack_none(struct irq_desc *desc)
> diff --git a/xen/arch/riscv/stubs.c b/xen/arch/riscv/stubs.c
> index daadff0138..7c3cda7bc5 100644
> --- a/xen/arch/riscv/stubs.c
> +++ b/xen/arch/riscv/stubs.c
> @@ -70,6 +70,11 @@ void smp_send_call_function_mask(const cpumask_t *mask)
> BUG_ON("unimplemented");
> }
>
> +bool arch_cpu_can_stay_online(unsigned int cpu)
> +{
> + BUG_ON("unimplemented");
> +}
> +
> /* irq.c */
>
> void irq_ack_none(struct irq_desc *desc)
> diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h
> index 3f16e62696..cb3e0fed19 100644
> --- a/xen/arch/x86/include/asm/smp.h
> +++ b/xen/arch/x86/include/asm/smp.h
> @@ -50,9 +50,6 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm);
>
> void __stop_this_cpu(void);
>
> -long cf_check cpu_up_helper(void *data);
> -long cf_check cpu_down_helper(void *data);
> -
> long cf_check core_parking_helper(void *data);
> bool core_parking_remove(unsigned int cpu);
> uint32_t get_cur_idle_nums(void);
> diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c
> index cd4f0ae5e5..e745151790 100644
> --- a/xen/arch/x86/platform_hypercall.c
> +++ b/xen/arch/x86/platform_hypercall.c
> @@ -735,6 +735,12 @@ ret_t do_platform_op(
> {
> int cpu = op->u.cpu_ol.cpuid;
>
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> + {
> + ret = -EOPNOTSUPP;
> + break;
> + }
> +
> ret = xsm_resource_plug_core(XSM_HOOK);
> if ( ret )
> break;
> @@ -761,6 +767,12 @@ ret_t do_platform_op(
> {
> int cpu = op->u.cpu_ol.cpuid;
>
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> + {
> + ret = -EOPNOTSUPP;
> + break;
> + }
> +
> ret = xsm_resource_unplug_core(XSM_HOOK);
> if ( ret )
> break;
> diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
> index 7936294f5f..b781e933f2 100644
> --- a/xen/arch/x86/smp.c
> +++ b/xen/arch/x86/smp.c
> @@ -418,35 +418,8 @@ void cf_check call_function_interrupt(void)
> smp_call_function_interrupt();
> }
>
> -long cf_check cpu_up_helper(void *data)
> +bool arch_cpu_can_stay_online(unsigned int cpu)
> {
> - unsigned int cpu = (unsigned long)data;
> - int ret = cpu_up(cpu);
> -
> - /* Have one more go on EBUSY. */
> - if ( ret == -EBUSY )
> - ret = cpu_up(cpu);
> -
> - if ( !ret && !opt_smt &&
> - cpu_data[cpu].compute_unit_id == INVALID_CUID &&
> - cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 )
> - {
> - ret = cpu_down_helper(data);
> - if ( ret )
> - printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
> - else
> - ret = -EPERM;
> - }
> -
> - return ret;
> -}
> -
> -long cf_check cpu_down_helper(void *data)
> -{
> - int cpu = (unsigned long)data;
> - int ret = cpu_down(cpu);
> - /* Have one more go on EBUSY. */
> - if ( ret == -EBUSY )
> - ret = cpu_down(cpu);
> - return ret;
> + return opt_smt || cpu_data[cpu].compute_unit_id != INVALID_CUID ||
> + cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) <= 1;
> }
> diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
> index 1b04947516..35239b73c1 100644
> --- a/xen/arch/x86/sysctl.c
> +++ b/xen/arch/x86/sysctl.c
> @@ -49,6 +49,7 @@ static void cf_check l3_cache_get(void *arg)
>
> static long cf_check smt_up_down_helper(void *data)
> {
> + #ifdef CONFIG_CPU_HOTPLUG
> bool up = (bool)data;
> unsigned int cpu, sibling_mask = boot_cpu_data.x86_num_siblings - 1;
> int ret = 0;
> @@ -89,6 +90,8 @@ static long cf_check smt_up_down_helper(void *data)
> up ? "enabled" : "disabled", CPUMASK_PR(&cpu_online_map));
>
> return ret;
> + #endif /* CONFIG_CPU_HOTPLUG */
> + return 0;
> }
>
> void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
> @@ -115,24 +118,24 @@ long arch_do_sysctl(
>
> case XEN_SYSCTL_cpu_hotplug:
> {
> - unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
> unsigned int op = sysctl->u.cpu_hotplug.op;
> bool plug;
> long (*fn)(void *data);
> void *hcpu;
>
> - switch ( op )
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> {
> - case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
> - plug = true;
> - fn = cpu_up_helper;
> - hcpu = _p(cpu);
> + ret = -EOPNOTSUPP;
> break;
> + }
>
> + switch ( op )
> + {
> + case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
> case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
> - plug = false;
> - fn = cpu_down_helper;
> - hcpu = _p(cpu);
> + /* Handled by common code */
> + ASSERT_UNREACHABLE();
> + ret = -EOPNOTSUPP;
> break;
>
> case XEN_SYSCTL_CPU_HOTPLUG_SMT_ENABLE:
> diff --git a/xen/common/Kconfig b/xen/common/Kconfig
> index d7e79e752a..bb73990355 100644
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -637,6 +637,12 @@ config SYSTEM_SUSPEND
>
> If unsure, say N.
>
> +config CPU_HOTPLUG
> + bool "Enable CPU hotplug"
> + depends on (X86 || ARM_64) && !FFA && !TEE && !HAS_ITS
> + default y
I do not see this as something that should be enabled on arm or
depend on any of this.
FFA could work, conditions depend more on the rest of the system
than on FFA code, same for tee which depends on TEE (and actually
FFA depends on TEE).
So i would rather see this as default n on ARM64 and only depend
on HAS_ITS and maybe flag this feature as EXPERT on arm64.
What are others thinking here ?
Cheers
Bertrand
> +
> +
> menu "Supported hypercall interfaces"
> visible if EXPERT
>
> diff --git a/xen/common/smp.c b/xen/common/smp.c
> index a011f541f1..e2bf82856e 100644
> --- a/xen/common/smp.c
> +++ b/xen/common/smp.c
> @@ -16,6 +16,7 @@
> * GNU General Public License for more details.
> */
>
> +#include <xen/cpu.h>
> #include <asm/hardirq.h>
> #include <asm/processor.h>
> #include <xen/spinlock.h>
> @@ -104,6 +105,40 @@ void smp_call_function_interrupt(void)
> irq_exit();
> }
>
> +#ifdef CONFIG_CPU_HOTPLUG
> +long cf_check cpu_up_helper(void *data)
> +{
> + unsigned int cpu = (unsigned long)data;
> + int ret = cpu_up(cpu);
> +
> + /* Have one more go on EBUSY. */
> + if ( ret == -EBUSY )
> + ret = cpu_up(cpu);
> +
> + if ( !ret && !arch_cpu_can_stay_online(cpu) )
> + {
> + ret = cpu_down_helper(data);
> + if ( ret )
> + printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
> + else
> + ret = -EPERM;
> + }
> +
> + return ret;
> +}
> +
> +long cf_check cpu_down_helper(void *data)
> +{
> + unsigned int cpu = (unsigned long)data;
> + int ret = cpu_down(cpu);
> +
> + /* Have one more go on EBUSY. */
> + if ( ret == -EBUSY )
> + ret = cpu_down(cpu);
> + return ret;
> +}
> +#endif /* CONFIG_CPU_HOTPLUG */
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
> index 5207664252..51a3dd699a 100644
> --- a/xen/common/sysctl.c
> +++ b/xen/common/sysctl.c
> @@ -483,6 +483,52 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
> copyback = 1;
> break;
>
> + case XEN_SYSCTL_cpu_hotplug:
> + {
> + unsigned int cpu = op->u.cpu_hotplug.cpu;
> + unsigned int hp_op = op->u.cpu_hotplug.op;
> + bool plug;
> + long (*fn)(void *data);
> + void *hcpu;
> +
> + ret = -EOPNOTSUPP;
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> + break;
> +
> + switch ( hp_op )
> + {
> + case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
> + plug = true;
> + fn = cpu_up_helper;
> + hcpu = _p(cpu);
> + break;
> +
> + case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
> + plug = false;
> + fn = cpu_down_helper;
> + hcpu = _p(cpu);
> + break;
> +
> + default:
> + fn = NULL;
> + break;
> + }
> +
> + if ( fn )
> + {
> + ret = plug ? xsm_resource_plug_core(XSM_HOOK)
> + : xsm_resource_unplug_core(XSM_HOOK);
> +
> + if ( !ret )
> + ret = continue_hypercall_on_cpu(0, fn, hcpu);
> +
> + break;
> + }
> +
> + /* Use the arch handler for cases not handled here */
> + fallthrough;
> + }
> +
> default:
> ret = arch_do_sysctl(op, u_sysctl);
> copyback = 0;
> diff --git a/xen/include/xen/smp.h b/xen/include/xen/smp.h
> index 2ca9ff1bfc..04530738c9 100644
> --- a/xen/include/xen/smp.h
> +++ b/xen/include/xen/smp.h
> @@ -76,4 +76,8 @@ extern void *stack_base[NR_CPUS];
> void initialize_cpu_data(unsigned int cpu);
> int setup_cpu_root_pgt(unsigned int cpu);
>
> +bool arch_cpu_can_stay_online(unsigned int cpu);
> +long cf_check cpu_up_helper(void *data);
> +long cf_check cpu_down_helper(void *data);
> +
> #endif /* __XEN_SMP_H__ */
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index b250b27065..36d357cae8 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -835,7 +835,7 @@ static int cf_check flask_sysctl(int cmd)
> case XEN_SYSCTL_getdomaininfolist:
> case XEN_SYSCTL_page_offline_op:
> case XEN_SYSCTL_scheduler_op:
> -#ifdef CONFIG_X86
> +#ifdef CONFIG_CPU_HOTPLUG
> case XEN_SYSCTL_cpu_hotplug:
> #endif
> return 0;
> --
> 2.51.2
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops
2026-03-12 9:39 ` [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops Mykyta Poturai
2026-03-18 8:23 ` Bertrand Marquis
@ 2026-03-23 11:09 ` Jan Beulich
2026-03-27 10:39 ` Mykyta Poturai
1 sibling, 1 reply; 15+ messages in thread
From: Jan Beulich @ 2026-03-23 11:09 UTC (permalink / raw)
To: Mykyta Poturai
Cc: Stefano Stabellini, Julien Grall, Bertrand Marquis, Michal Orzel,
Volodymyr Babchuk, Andrew Cooper, Anthony PERARD,
Roger Pau Monné, Timothy Pearson, Alistair Francis,
Connor Davis, Oleksii Kurochko, Daniel P. Smith,
xen-devel@lists.xenproject.org
On 12.03.2026 10:39, Mykyta Poturai wrote:
> --- a/xen/arch/arm/smp.c
> +++ b/xen/arch/arm/smp.c
> @@ -44,6 +44,15 @@ void smp_send_call_function_mask(const cpumask_t *mask)
> }
> }
>
> +/*
> + * We currently don't support SMT on ARM so we don't need any special logic for
> + * CPU disabling
> + */
> +bool arch_cpu_can_stay_online(unsigned int cpu)
> +{
> + return true;
> +}
Something as simple as this would be nice to be an inline function (or, less
desirably, a macro).
> --- a/xen/arch/x86/platform_hypercall.c
> +++ b/xen/arch/x86/platform_hypercall.c
> @@ -735,6 +735,12 @@ ret_t do_platform_op(
> {
> int cpu = op->u.cpu_ol.cpuid;
>
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> + {
> + ret = -EOPNOTSUPP;
> + break;
> + }
> +
> ret = xsm_resource_plug_core(XSM_HOOK);
> if ( ret )
> break;
> @@ -761,6 +767,12 @@ ret_t do_platform_op(
> {
> int cpu = op->u.cpu_ol.cpuid;
>
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> + {
> + ret = -EOPNOTSUPP;
> + break;
> + }
> +
> ret = xsm_resource_unplug_core(XSM_HOOK);
> if ( ret )
> break;
I wonder whether on x86 this really should become an optional thing (and
if so, whether that wouldn't better be a separate change with proper
justification). See also the comment on common/Kconfig further down - by
the name of the option, and given the support status the change above may
be legitimate, but not some of the similar restrictions added elsewhere.
> --- a/xen/arch/x86/smp.c
> +++ b/xen/arch/x86/smp.c
> @@ -418,35 +418,8 @@ void cf_check call_function_interrupt(void)
> smp_call_function_interrupt();
> }
>
> -long cf_check cpu_up_helper(void *data)
> +bool arch_cpu_can_stay_online(unsigned int cpu)
> {
> - unsigned int cpu = (unsigned long)data;
> - int ret = cpu_up(cpu);
> -
> - /* Have one more go on EBUSY. */
> - if ( ret == -EBUSY )
> - ret = cpu_up(cpu);
> -
> - if ( !ret && !opt_smt &&
> - cpu_data[cpu].compute_unit_id == INVALID_CUID &&
> - cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 )
> - {
> - ret = cpu_down_helper(data);
> - if ( ret )
> - printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
> - else
> - ret = -EPERM;
> - }
> -
> - return ret;
> -}
> -
> -long cf_check cpu_down_helper(void *data)
> -{
> - int cpu = (unsigned long)data;
> - int ret = cpu_down(cpu);
> - /* Have one more go on EBUSY. */
> - if ( ret == -EBUSY )
> - ret = cpu_down(cpu);
> - return ret;
> + return opt_smt || cpu_data[cpu].compute_unit_id != INVALID_CUID ||
> + cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) <= 1;
> }
Unlike for Arm, this may indeed better be an out-of-line function.
> --- a/xen/arch/x86/sysctl.c
> +++ b/xen/arch/x86/sysctl.c
> @@ -49,6 +49,7 @@ static void cf_check l3_cache_get(void *arg)
>
> static long cf_check smt_up_down_helper(void *data)
> {
> + #ifdef CONFIG_CPU_HOTPLUG
> bool up = (bool)data;
> unsigned int cpu, sibling_mask = boot_cpu_data.x86_num_siblings - 1;
> int ret = 0;
> @@ -89,6 +90,8 @@ static long cf_check smt_up_down_helper(void *data)
> up ? "enabled" : "disabled", CPUMASK_PR(&cpu_online_map));
>
> return ret;
> + #endif /* CONFIG_CPU_HOTPLUG */
> + return 0;
> }
The #-es or pre-processor directives want to be in the very first column.
Sharing "return ret" would also be nice, imo. Would require ret's decl to
move ahead of the #ifdef. Actually - is there anything preventing
if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
return 0;
at the top of the function? Perhaps even with ASSERT_UNREACHABLE() added
in?
> @@ -115,24 +118,24 @@ long arch_do_sysctl(
>
> case XEN_SYSCTL_cpu_hotplug:
> {
> - unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
> unsigned int op = sysctl->u.cpu_hotplug.op;
> bool plug;
> long (*fn)(void *data);
> void *hcpu;
>
> - switch ( op )
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> {
> - case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
> - plug = true;
> - fn = cpu_up_helper;
> - hcpu = _p(cpu);
> + ret = -EOPNOTSUPP;
> break;
ASSERT_UNREACHABLE() looks to also be valid to be added here, seeing how
do_sysctl() now works.
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -637,6 +637,12 @@ config SYSTEM_SUSPEND
>
> If unsure, say N.
>
> +config CPU_HOTPLUG
> + bool "Enable CPU hotplug"
I'm not happy with this prompt. For x86 SUPPORT.md declares (ACPI) CPU
hotplug as experimental. That's physical hotplug. The code you're
fiddling with, however, is also used for soft-{off,on}lining. Which,
e.g. to disable SMT on x86, may need to be used for security purposes.
> + depends on (X86 || ARM_64) && !FFA && !TEE && !HAS_ITS
What if on x86 FFA, TEE, or ITS gain a meaning?
> + default y
> +
> +
Nit: No double blank lines please.
> --- a/xen/common/sysctl.c
> +++ b/xen/common/sysctl.c
> @@ -483,6 +483,52 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
> copyback = 1;
> break;
>
> + case XEN_SYSCTL_cpu_hotplug:
> + {
> + unsigned int cpu = op->u.cpu_hotplug.cpu;
I don't think this variable is very useful to keep. Instead use ...
> + unsigned int hp_op = op->u.cpu_hotplug.op;
> + bool plug;
> + long (*fn)(void *data);
> + void *hcpu;
void *hcpu = _p(op->u.cpu_hotplug.op);
right here, dropping the assignments further down.
> + ret = -EOPNOTSUPP;
> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> + break;
> +
> + switch ( hp_op )
> + {
> + case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
> + plug = true;
> + fn = cpu_up_helper;
> + hcpu = _p(cpu);
> + break;
> +
> + case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
> + plug = false;
> + fn = cpu_down_helper;
> + hcpu = _p(cpu);
> + break;
> +
> + default:
> + fn = NULL;
> + break;
> + }
> +
> + if ( fn )
> + {
> + ret = plug ? xsm_resource_plug_core(XSM_HOOK)
> + : xsm_resource_unplug_core(XSM_HOOK);
> +
> + if ( !ret )
> + ret = continue_hypercall_on_cpu(0, fn, hcpu);
> +
> + break;
> + }
> +
> + /* Use the arch handler for cases not handled here */
> + fallthrough;
> + }
> +
> default:
> ret = arch_do_sysctl(op, u_sysctl);
> copyback = 0;
This form of falling through may be a little risky, towards someone not
looking closely enough and inserting another case label immediately ahead
of the default one. While I don't think there's a really good solution to
this, please consider
}
/* Use the arch handler for cases not handled above */
fallthrough;
default:
instead.
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -835,7 +835,7 @@ static int cf_check flask_sysctl(int cmd)
> case XEN_SYSCTL_getdomaininfolist:
> case XEN_SYSCTL_page_offline_op:
> case XEN_SYSCTL_scheduler_op:
> -#ifdef CONFIG_X86
> +#ifdef CONFIG_CPU_HOTPLUG
> case XEN_SYSCTL_cpu_hotplug:
> #endif
> return 0;
Is there a reason the #ifdef can't simply be dropped?
Jan
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops
2026-03-23 11:09 ` Jan Beulich
@ 2026-03-27 10:39 ` Mykyta Poturai
2026-03-27 11:40 ` Jan Beulich
0 siblings, 1 reply; 15+ messages in thread
From: Mykyta Poturai @ 2026-03-27 10:39 UTC (permalink / raw)
To: Jan Beulich
Cc: Stefano Stabellini, Julien Grall, Bertrand Marquis, Michal Orzel,
Volodymyr Babchuk, Andrew Cooper, Anthony PERARD,
Roger Pau Monné, Timothy Pearson, Alistair Francis,
Connor Davis, Oleksii Kurochko, Daniel P. Smith,
xen-devel@lists.xenproject.org
On 3/23/26 13:09, Jan Beulich wrote:
> On 12.03.2026 10:39, Mykyta Poturai wrote:
>> --- a/xen/arch/arm/smp.c
>> +++ b/xen/arch/arm/smp.c
>> @@ -44,6 +44,15 @@ void smp_send_call_function_mask(const cpumask_t *mask)
>> }
>> }
>>
>> +/*
>> + * We currently don't support SMT on ARM so we don't need any special logic for
>> + * CPU disabling
>> + */
>> +bool arch_cpu_can_stay_online(unsigned int cpu)
>> +{
>> + return true;
>> +}
>
> Something as simple as this would be nice to be an inline function (or, less
> desirably, a macro).
>
>> --- a/xen/arch/x86/platform_hypercall.c
>> +++ b/xen/arch/x86/platform_hypercall.c
>> @@ -735,6 +735,12 @@ ret_t do_platform_op(
>> {
>> int cpu = op->u.cpu_ol.cpuid;
>>
>> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
>> + {
>> + ret = -EOPNOTSUPP;
>> + break;
>> + }
>> +
>> ret = xsm_resource_plug_core(XSM_HOOK);
>> if ( ret )
>> break;
>> @@ -761,6 +767,12 @@ ret_t do_platform_op(
>> {
>> int cpu = op->u.cpu_ol.cpuid;
>>
>> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
>> + {
>> + ret = -EOPNOTSUPP;
>> + break;
>> + }
>> +
>> ret = xsm_resource_unplug_core(XSM_HOOK);
>> if ( ret )
>> break;
>
> I wonder whether on x86 this really should become an optional thing (and
> if so, whether that wouldn't better be a separate change with proper
> justification). See also the comment on common/Kconfig further down - by
> the name of the option, and given the support status the change above may
> be legitimate, but not some of the similar restrictions added elsewhere.
>
Maybe force it to be always on like x86 then? I don't really have a
justification for making it optional on x86, it just happened as side
effect of creating a config option.
>> --- a/xen/arch/x86/smp.c
>> +++ b/xen/arch/x86/smp.c
>> @@ -418,35 +418,8 @@ void cf_check call_function_interrupt(void)
>> smp_call_function_interrupt();
>> }
>>
>> -long cf_check cpu_up_helper(void *data)
>> +bool arch_cpu_can_stay_online(unsigned int cpu)
>> {
>> - unsigned int cpu = (unsigned long)data;
>> - int ret = cpu_up(cpu);
>> -
>> - /* Have one more go on EBUSY. */
>> - if ( ret == -EBUSY )
>> - ret = cpu_up(cpu);
>> -
>> - if ( !ret && !opt_smt &&
>> - cpu_data[cpu].compute_unit_id == INVALID_CUID &&
>> - cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 )
>> - {
>> - ret = cpu_down_helper(data);
>> - if ( ret )
>> - printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
>> - else
>> - ret = -EPERM;
>> - }
>> -
>> - return ret;
>> -}
>> -
>> -long cf_check cpu_down_helper(void *data)
>> -{
>> - int cpu = (unsigned long)data;
>> - int ret = cpu_down(cpu);
>> - /* Have one more go on EBUSY. */
>> - if ( ret == -EBUSY )
>> - ret = cpu_down(cpu);
>> - return ret;
>> + return opt_smt || cpu_data[cpu].compute_unit_id != INVALID_CUID ||
>> + cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) <= 1;
>> }
>
> Unlike for Arm, this may indeed better be an out-of-line function.
>
>> --- a/xen/arch/x86/sysctl.c
>> +++ b/xen/arch/x86/sysctl.c
>> @@ -49,6 +49,7 @@ static void cf_check l3_cache_get(void *arg)
>>
>> static long cf_check smt_up_down_helper(void *data)
>> {
>> + #ifdef CONFIG_CPU_HOTPLUG
>> bool up = (bool)data;
>> unsigned int cpu, sibling_mask = boot_cpu_data.x86_num_siblings - 1;
>> int ret = 0;
>> @@ -89,6 +90,8 @@ static long cf_check smt_up_down_helper(void *data)
>> up ? "enabled" : "disabled", CPUMASK_PR(&cpu_online_map));
>>
>> return ret;
>> + #endif /* CONFIG_CPU_HOTPLUG */
>> + return 0;
>> }
>
> The #-es or pre-processor directives want to be in the very first column.
>
> Sharing "return ret" would also be nice, imo. Would require ret's decl to
> move ahead of the #ifdef. Actually - is there anything preventing
>
> if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
> return 0;
>
> at the top of the function? Perhaps even with ASSERT_UNREACHABLE() added
> in?
>
>> @@ -115,24 +118,24 @@ long arch_do_sysctl(
>>
>> case XEN_SYSCTL_cpu_hotplug:
>> {
>> - unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
>> unsigned int op = sysctl->u.cpu_hotplug.op;
>> bool plug;
>> long (*fn)(void *data);
>> void *hcpu;
>>
>> - switch ( op )
>> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
>> {
>> - case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
>> - plug = true;
>> - fn = cpu_up_helper;
>> - hcpu = _p(cpu);
>> + ret = -EOPNOTSUPP;
>> break;
>
> ASSERT_UNREACHABLE() looks to also be valid to be added here, seeing how
> do_sysctl() now works.
>
>> --- a/xen/common/Kconfig
>> +++ b/xen/common/Kconfig
>> @@ -637,6 +637,12 @@ config SYSTEM_SUSPEND
>>
>> If unsure, say N.
>>
>> +config CPU_HOTPLUG
>> + bool "Enable CPU hotplug"
>
> I'm not happy with this prompt. For x86 SUPPORT.md declares (ACPI) CPU
> hotplug as experimental. That's physical hotplug. The code you're
> fiddling with, however, is also used for soft-{off,on}lining. Which,
> e.g. to disable SMT on x86, may need to be used for security purposes.
>
>> + depends on (X86 || ARM_64) && !FFA && !TEE && !HAS_ITS
>
> What if on x86 FFA, TEE, or ITS gain a meaning?
>
>> + default y
>> +
>> +
>
> Nit: No double blank lines please.
>
>> --- a/xen/common/sysctl.c
>> +++ b/xen/common/sysctl.c
>> @@ -483,6 +483,52 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
>> copyback = 1;
>> break;
>>
>> + case XEN_SYSCTL_cpu_hotplug:
>> + {
>> + unsigned int cpu = op->u.cpu_hotplug.cpu;
>
> I don't think this variable is very useful to keep. Instead use ...
>
>> + unsigned int hp_op = op->u.cpu_hotplug.op;
>> + bool plug;
>> + long (*fn)(void *data);
>> + void *hcpu;
>
> void *hcpu = _p(op->u.cpu_hotplug.op);
>
> right here, dropping the assignments further down.
>
>> + ret = -EOPNOTSUPP;
>> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
>> + break;
>> +
>> + switch ( hp_op )
>> + {
>> + case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
>> + plug = true;
>> + fn = cpu_up_helper;
>> + hcpu = _p(cpu);
>> + break;
>> +
>> + case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
>> + plug = false;
>> + fn = cpu_down_helper;
>> + hcpu = _p(cpu);
>> + break;
>> +
>> + default:
>> + fn = NULL;
>> + break;
>> + }
>> +
>> + if ( fn )
>> + {
>> + ret = plug ? xsm_resource_plug_core(XSM_HOOK)
>> + : xsm_resource_unplug_core(XSM_HOOK);
>> +
>> + if ( !ret )
>> + ret = continue_hypercall_on_cpu(0, fn, hcpu);
>> +
>> + break;
>> + }
>> +
>> + /* Use the arch handler for cases not handled here */
>> + fallthrough;
>> + }
>> +
>> default:
>> ret = arch_do_sysctl(op, u_sysctl);
>> copyback = 0;
>
> This form of falling through may be a little risky, towards someone not
> looking closely enough and inserting another case label immediately ahead
> of the default one. While I don't think there's a really good solution to
> this, please consider
>
> }
> /* Use the arch handler for cases not handled above */
> fallthrough;
> default:
>
> instead.
>
Just want to clarirfy if I got the idea. Is this what you meant?
switch ( op->cmd )
{
....
case XEN_SYSCTL_cpu_hotplug:
{
....
}
/* Use the arch handler for cases not handled here */
fallthrough;
default:
ret = arch_do_sysctl(op, u_sysctl);
copyback = 0;
break;
}
>> --- a/xen/xsm/flask/hooks.c
>> +++ b/xen/xsm/flask/hooks.c
>> @@ -835,7 +835,7 @@ static int cf_check flask_sysctl(int cmd)
>> case XEN_SYSCTL_getdomaininfolist:
>> case XEN_SYSCTL_page_offline_op:
>> case XEN_SYSCTL_scheduler_op:
>> -#ifdef CONFIG_X86
>> +#ifdef CONFIG_CPU_HOTPLUG
>> case XEN_SYSCTL_cpu_hotplug:
>> #endif
>> return 0;
>
> Is there a reason the #ifdef can't simply be dropped?
>
> Jan
--
Mykyta
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops
2026-03-27 10:39 ` Mykyta Poturai
@ 2026-03-27 11:40 ` Jan Beulich
0 siblings, 0 replies; 15+ messages in thread
From: Jan Beulich @ 2026-03-27 11:40 UTC (permalink / raw)
To: Mykyta Poturai
Cc: Stefano Stabellini, Julien Grall, Bertrand Marquis, Michal Orzel,
Volodymyr Babchuk, Andrew Cooper, Anthony PERARD,
Roger Pau Monné, Timothy Pearson, Alistair Francis,
Connor Davis, Oleksii Kurochko, Daniel P. Smith,
xen-devel@lists.xenproject.org
On 27.03.2026 11:39, Mykyta Poturai wrote:
> On 3/23/26 13:09, Jan Beulich wrote:
>> On 12.03.2026 10:39, Mykyta Poturai wrote:
>>> --- a/xen/arch/x86/platform_hypercall.c
>>> +++ b/xen/arch/x86/platform_hypercall.c
>>> @@ -735,6 +735,12 @@ ret_t do_platform_op(
>>> {
>>> int cpu = op->u.cpu_ol.cpuid;
>>>
>>> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
>>> + {
>>> + ret = -EOPNOTSUPP;
>>> + break;
>>> + }
>>> +
>>> ret = xsm_resource_plug_core(XSM_HOOK);
>>> if ( ret )
>>> break;
>>> @@ -761,6 +767,12 @@ ret_t do_platform_op(
>>> {
>>> int cpu = op->u.cpu_ol.cpuid;
>>>
>>> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
>>> + {
>>> + ret = -EOPNOTSUPP;
>>> + break;
>>> + }
>>> +
>>> ret = xsm_resource_unplug_core(XSM_HOOK);
>>> if ( ret )
>>> break;
>>
>> I wonder whether on x86 this really should become an optional thing (and
>> if so, whether that wouldn't better be a separate change with proper
>> justification). See also the comment on common/Kconfig further down - by
>> the name of the option, and given the support status the change above may
>> be legitimate, but not some of the similar restrictions added elsewhere.
>
> Maybe force it to be always on like x86 then? I don't really have a
> justification for making it optional on x86, it just happened as side
> effect of creating a config option.
Well, I would have asked for that if there wasn't a (presumed) use case for
making it optional - certification, where as much unused code as possible
(apparently) wants compiling out. You'll need to ask those doing prep work
for certification whether they'd be interested in it becoming optional.
>>> --- a/xen/common/sysctl.c
>>> +++ b/xen/common/sysctl.c
>>> @@ -483,6 +483,52 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
>>> copyback = 1;
>>> break;
>>>
>>> + case XEN_SYSCTL_cpu_hotplug:
>>> + {
>>> + unsigned int cpu = op->u.cpu_hotplug.cpu;
>>
>> I don't think this variable is very useful to keep. Instead use ...
>>
>>> + unsigned int hp_op = op->u.cpu_hotplug.op;
>>> + bool plug;
>>> + long (*fn)(void *data);
>>> + void *hcpu;
>>
>> void *hcpu = _p(op->u.cpu_hotplug.op);
>>
>> right here, dropping the assignments further down.
>>
>>> + ret = -EOPNOTSUPP;
>>> + if ( !IS_ENABLED(CONFIG_CPU_HOTPLUG) )
>>> + break;
>>> +
>>> + switch ( hp_op )
>>> + {
>>> + case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
>>> + plug = true;
>>> + fn = cpu_up_helper;
>>> + hcpu = _p(cpu);
>>> + break;
>>> +
>>> + case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
>>> + plug = false;
>>> + fn = cpu_down_helper;
>>> + hcpu = _p(cpu);
>>> + break;
>>> +
>>> + default:
>>> + fn = NULL;
>>> + break;
>>> + }
>>> +
>>> + if ( fn )
>>> + {
>>> + ret = plug ? xsm_resource_plug_core(XSM_HOOK)
>>> + : xsm_resource_unplug_core(XSM_HOOK);
>>> +
>>> + if ( !ret )
>>> + ret = continue_hypercall_on_cpu(0, fn, hcpu);
>>> +
>>> + break;
>>> + }
>>> +
>>> + /* Use the arch handler for cases not handled here */
>>> + fallthrough;
>>> + }
>>> +
>>> default:
>>> ret = arch_do_sysctl(op, u_sysctl);
>>> copyback = 0;
>>
>> This form of falling through may be a little risky, towards someone not
>> looking closely enough and inserting another case label immediately ahead
>> of the default one. While I don't think there's a really good solution to
>> this, please consider
>>
>> }
>> /* Use the arch handler for cases not handled above */
>> fallthrough;
>> default:
>>
>> instead.
>>
>
> Just want to clarirfy if I got the idea. Is this what you meant?
>
> switch ( op->cmd )
> {
> ....
> case XEN_SYSCTL_cpu_hotplug:
> {
> ....
> }
>
> /* Use the arch handler for cases not handled here */
> fallthrough;
> default:
> ret = arch_do_sysctl(op, u_sysctl);
> copyback = 0;
> break;
> }
I can't spot anything different from what I wrote, so (presumably) yes.
Jan
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH v6 4/5] tools: Allow building xen-hptool without CONFIG_MIGRATE
2026-03-12 9:39 [PATCH v6 0/5] Implement CPU hotplug on Arm Mykyta Poturai
` (2 preceding siblings ...)
2026-03-12 9:39 ` [PATCH v6 3/5] arm/sysctl: Implement cpu hotplug ops Mykyta Poturai
@ 2026-03-12 9:39 ` Mykyta Poturai
2026-03-12 9:39 ` [PATCH v6 5/5] docs: Document CPU hotplug Mykyta Poturai
4 siblings, 0 replies; 15+ messages in thread
From: Mykyta Poturai @ 2026-03-12 9:39 UTC (permalink / raw)
To: xen-devel@lists.xenproject.org
Cc: Mykyta Poturai, Anthony PERARD, Juergen Gross
With CPU hotplug sysctls implemented on Arm it becomes useful to have a
tool for calling them.
According to the commit history it seems that putting hptool under
config MIGRATE was a measure to fix IA64 build. As IA64 is no longer
supported it can now be brought back. So build it unconditionally.
Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
---
v5->v6:
* don't change order in Makefile
v4->v5:
* make hptool always build
v3->v4:
* no changes
v2->v3:
* no changes
v1->v2:
* switch to configure from legacy config
---
tools/libs/guest/Makefile.common | 2 +-
tools/misc/Makefile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/libs/guest/Makefile.common b/tools/libs/guest/Makefile.common
index b928a4a246..03dfcee7fa 100644
--- a/tools/libs/guest/Makefile.common
+++ b/tools/libs/guest/Makefile.common
@@ -7,6 +7,7 @@ OBJS-y += xg_private.o
OBJS-y += xg_domain.o
OBJS-y += xg_suspend.o
OBJS-y += xg_resume.o
+OBJS-y += xg_offline_page.o
ifeq ($(CONFIG_MIGRATE),y)
OBJS-y += xg_sr_common.o
OBJS-$(CONFIG_X86) += xg_sr_common_x86.o
@@ -17,7 +18,6 @@ OBJS-$(CONFIG_X86) += xg_sr_save_x86_pv.o
OBJS-$(CONFIG_X86) += xg_sr_save_x86_hvm.o
OBJS-y += xg_sr_restore.o
OBJS-y += xg_sr_save.o
-OBJS-y += xg_offline_page.o
else
OBJS-y += xg_nomigrate.o
endif
diff --git a/tools/misc/Makefile b/tools/misc/Makefile
index 6ee783f43e..5a206133f7 100644
--- a/tools/misc/Makefile
+++ b/tools/misc/Makefile
@@ -16,7 +16,7 @@ INSTALL_BIN += xencov_split
INSTALL_BIN += $(INSTALL_BIN-y)
# Everything to be installed in regular sbin/
-INSTALL_SBIN-$(CONFIG_MIGRATE) += xen-hptool
+INSTALL_SBIN += xen-hptool
INSTALL_SBIN-$(CONFIG_X86) += xen-hvmcrash
INSTALL_SBIN-$(CONFIG_X86) += xen-hvmctx
INSTALL_SBIN-$(CONFIG_X86) += xen-lowmemd
--
2.51.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v6 5/5] docs: Document CPU hotplug
2026-03-12 9:39 [PATCH v6 0/5] Implement CPU hotplug on Arm Mykyta Poturai
` (3 preceding siblings ...)
2026-03-12 9:39 ` [PATCH v6 4/5] tools: Allow building xen-hptool without CONFIG_MIGRATE Mykyta Poturai
@ 2026-03-12 9:39 ` Mykyta Poturai
2026-03-18 8:28 ` Bertrand Marquis
4 siblings, 1 reply; 15+ messages in thread
From: Mykyta Poturai @ 2026-03-12 9:39 UTC (permalink / raw)
To: xen-devel@lists.xenproject.org
Cc: Mykyta Poturai, Andrew Cooper, Anthony PERARD, Michal Orzel,
Jan Beulich, Julien Grall, Roger Pau Monné,
Stefano Stabellini
Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
---
v5->v6:
* no changes
v4->v5:
* s/supported/implemented/
* update SUPPORT.md
v3->v4:
* update configuration section
v2->v3:
* patch introduced
---
SUPPORT.md | 1 +
docs/misc/cpu-hotplug.txt | 50 +++++++++++++++++++++++++++++++++++++++
2 files changed, 51 insertions(+)
create mode 100644 docs/misc/cpu-hotplug.txt
diff --git a/SUPPORT.md b/SUPPORT.md
index d441bccf37..7b93ae69e7 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -52,6 +52,7 @@ For the Cortex A77 r0p0 - r1p0, see Errata 1508412.
### ACPI CPU Hotplug
Status, x86: Experimental
+ Status, Arm64: Experimental
### Physical Memory
diff --git a/docs/misc/cpu-hotplug.txt b/docs/misc/cpu-hotplug.txt
new file mode 100644
index 0000000000..45f20c2002
--- /dev/null
+++ b/docs/misc/cpu-hotplug.txt
@@ -0,0 +1,50 @@
+CPU Hotplug
+===========
+
+CPU hotplug is a feature that allows pCPU cores to be added to or removed from a
+running system without requiring a reboot. It is implemented on x86 and Arm64
+architectures.
+
+Implementation Details
+----------------------
+
+CPU hotplug is implemented through the `XEN_SYSCTL_CPU_HOTPLUG_*` sysctl calls.
+The specific calls are:
+
+- `XEN_SYSCTL_CPU_HOTPLUG_ONLINE`: Brings a pCPU online
+- `XEN_SYSCTL_CPU_HOTPLUG_OFFLINE`: Takes a pCPU offline
+- `XEN_SYSCTL_CPU_HOTPLUG_SMT_ENABLE`: Enables SMT threads (x86 only)
+- `XEN_SYSCTL_CPU_HOTPLUG_SMT_DISABLE`: Disables SMT threads (x86 only)
+
+All cores can be disabled, assuming hardware support, except for the boot core.
+Sysctl calls are routed to the boot core before doing any actual up/down
+operations on other cores.
+
+Configuration
+-------------
+
+The presence of the feature is controlled by CONFIG_CPU_HOTPLUG option. It is
+enabled by default on x86 architecture. On Arm64, the option is enabled by
+default when ITS, FFA, and TEE configs are disabled.
+xen-hptool userspace tool is built unconditionally.
+
+Usage
+-----
+
+Disable core:
+
+$ xen-hptool cpu-offline 2
+Prepare to offline CPU 2
+(XEN) Removing cpu 2 from runqueue 0
+CPU 2 offlined successfully
+
+Enable core:
+
+$ xen-hptool cpu-online 2
+Prepare to online CPU 2
+(XEN) Bringing up CPU2
+(XEN) GICv3: CPU2: Found redistributor in region 0 @00000a004005c000
+(XEN) CPU2: Guest atomics will try 1 times before pausing the domain
+(XEN) CPU 2 booted.
+(XEN) Adding cpu 2 to runqueue 0
+CPU 2 onlined successfully
--
2.51.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v6 5/5] docs: Document CPU hotplug
2026-03-12 9:39 ` [PATCH v6 5/5] docs: Document CPU hotplug Mykyta Poturai
@ 2026-03-18 8:28 ` Bertrand Marquis
0 siblings, 0 replies; 15+ messages in thread
From: Bertrand Marquis @ 2026-03-18 8:28 UTC (permalink / raw)
To: Mykyta Poturai
Cc: xen-devel@lists.xenproject.org, Andrew Cooper, Anthony PERARD,
Michal Orzel, Jan Beulich, Julien Grall, Roger Pau Monné,
Stefano Stabellini
Hi Mykyta,
> On 12 Mar 2026, at 10:39, Mykyta Poturai <Mykyta_Poturai@epam.com> wrote:
>
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
> ---
>
> v5->v6:
> * no changes
>
> v4->v5:
> * s/supported/implemented/
> * update SUPPORT.md
>
> v3->v4:
> * update configuration section
>
> v2->v3:
> * patch introduced
> ---
> SUPPORT.md | 1 +
> docs/misc/cpu-hotplug.txt | 50 +++++++++++++++++++++++++++++++++++++++
> 2 files changed, 51 insertions(+)
> create mode 100644 docs/misc/cpu-hotplug.txt
>
> diff --git a/SUPPORT.md b/SUPPORT.md
> index d441bccf37..7b93ae69e7 100644
> --- a/SUPPORT.md
> +++ b/SUPPORT.md
> @@ -52,6 +52,7 @@ For the Cortex A77 r0p0 - r1p0, see Errata 1508412.
> ### ACPI CPU Hotplug
>
> Status, x86: Experimental
> + Status, Arm64: Experimental
>
> ### Physical Memory
>
> diff --git a/docs/misc/cpu-hotplug.txt b/docs/misc/cpu-hotplug.txt
> new file mode 100644
> index 0000000000..45f20c2002
> --- /dev/null
> +++ b/docs/misc/cpu-hotplug.txt
> @@ -0,0 +1,50 @@
> +CPU Hotplug
> +===========
> +
> +CPU hotplug is a feature that allows pCPU cores to be added to or removed from a
> +running system without requiring a reboot. It is implemented on x86 and Arm64
> +architectures.
> +
> +Implementation Details
> +----------------------
> +
> +CPU hotplug is implemented through the `XEN_SYSCTL_CPU_HOTPLUG_*` sysctl calls.
> +The specific calls are:
> +
> +- `XEN_SYSCTL_CPU_HOTPLUG_ONLINE`: Brings a pCPU online
> +- `XEN_SYSCTL_CPU_HOTPLUG_OFFLINE`: Takes a pCPU offline
> +- `XEN_SYSCTL_CPU_HOTPLUG_SMT_ENABLE`: Enables SMT threads (x86 only)
> +- `XEN_SYSCTL_CPU_HOTPLUG_SMT_DISABLE`: Disables SMT threads (x86 only)
> +
> +All cores can be disabled, assuming hardware support, except for the boot core.
> +Sysctl calls are routed to the boot core before doing any actual up/down
> +operations on other cores.
> +
> +Configuration
> +-------------
> +
> +The presence of the feature is controlled by CONFIG_CPU_HOTPLUG option. It is
> +enabled by default on x86 architecture. On Arm64, the option is enabled by
> +default when ITS, FFA, and TEE configs are disabled.
> +xen-hptool userspace tool is built unconditionally.
> +
> +Usage
> +-----
> +
> +Disable core:
> +
> +$ xen-hptool cpu-offline 2
> +Prepare to offline CPU 2
> +(XEN) Removing cpu 2 from runqueue 0
> +CPU 2 offlined successfully
> +
> +Enable core:
> +
> +$ xen-hptool cpu-online 2
> +Prepare to online CPU 2
> +(XEN) Bringing up CPU2
> +(XEN) GICv3: CPU2: Found redistributor in region 0 @00000a004005c000
> +(XEN) CPU2: Guest atomics will try 1 times before pausing the domain
> +(XEN) CPU 2 booted.
> +(XEN) Adding cpu 2 to runqueue 0
> +CPU 2 onlined successfully
It would be good to mention here what has been tested and what was out of scope
or not supported at all (ITS for example).
As part of this list i would mention:
- TEE in general (FFA or optee)
- passthrough (unless you did some tests but you did not answer to my question on
that on the previous version of the serie)
And maybe even describe quickly how the tests were executed.
Just so that people are aware of what scope is currently realist to try.
Cheers
Bertrand
> --
> 2.51.2
>
^ permalink raw reply [flat|nested] 15+ messages in thread