public inbox for linux-arm-kernel@lists.infradead.org
 help / color / mirror / Atom feed
* [PATCH v2 0/4] KVM: arm64: Don't perform vgic-v2 lazy init on timer injection
@ 2026-04-22 10:02 Marc Zyngier
  2026-04-22 10:02 ` [PATCH v2 1/4] KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to kvm_timer_{pending,enabled}() Marc Zyngier
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Marc Zyngier @ 2026-04-22 10:02 UTC (permalink / raw)
  To: kvmarm, linux-arm-kernel
  Cc: Deepanshu Kartikey, Joey Gouly, Suzuki K Poulose, Oliver Upton,
	Zenghui Yu

This is the second version of this series aiming at fixing issues
with vgic-v2 being initialised from non-preemptible context.

* From v1 [1]:

  - Repaint kvm_timer_irq_can_fire() to kvm_timer_enabled()

  - Drop duplicate kvm_timer_update_status() call

  - Force lazy init on the irqfd slow-path for SPIs

[1] https://lore.kernel.org/r/20260417124612.2770268-1-maz@kernel.org

Marc Zyngier (4):
  KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to
    kvm_timer_{pending,enabled}()
  KVM: arm64: timer: Kill the per-timer level cache
  KVM: arm64: vgic-v2: Force vgic init on injection outside the run loop
  KVM: arm64: vgic-v2: Don't init the vgic on in-kernel interrupt
    injection

 arch/arm64/kvm/arch_timer.c      | 59 +++++++++++++++-----------------
 arch/arm64/kvm/arm.c             |  7 ++++
 arch/arm64/kvm/vgic/vgic-irqfd.c |  6 ++++
 arch/arm64/kvm/vgic/vgic.c       |  6 ++--
 include/kvm/arm_arch_timer.h     |  5 ---
 5 files changed, 43 insertions(+), 40 deletions(-)

-- 
2.47.3



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/4] KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to kvm_timer_{pending,enabled}()
  2026-04-22 10:02 [PATCH v2 0/4] KVM: arm64: Don't perform vgic-v2 lazy init on timer injection Marc Zyngier
@ 2026-04-22 10:02 ` Marc Zyngier
  2026-04-22 13:30   ` Joey Gouly
  2026-04-22 10:02 ` [PATCH v2 2/4] KVM: arm64: timer: Kill the per-timer level cache Marc Zyngier
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 6+ messages in thread
From: Marc Zyngier @ 2026-04-22 10:02 UTC (permalink / raw)
  To: kvmarm, linux-arm-kernel
  Cc: Deepanshu Kartikey, Joey Gouly, Suzuki K Poulose, Oliver Upton,
	Zenghui Yu

kvm_timer_should_fire() seems to date back to a time where the author
of the timer code didn't seem to have made the word "pending" part of
their vocabulary.

Having since slightly improved on that front, let's rename this predicate
to kvm_timer_pending(), which clearly indicates whether the timer
interrupt is pending or not.

Similarly, kvm_timer_irq_can_fire() is renamed to kvm_timer_enabled().

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/arch_timer.c | 49 ++++++++++++++++++-------------------
 1 file changed, 24 insertions(+), 25 deletions(-)

diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index cbea4d9ee9552..22e79ecb34bc4 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -39,10 +39,9 @@ static const u8 default_ppi[] = {
 	[TIMER_HVTIMER] = 28,
 };
 
-static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
 				 struct arch_timer_context *timer_ctx);
-static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
+static bool kvm_timer_pending(struct arch_timer_context *timer_ctx);
 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
 				struct arch_timer_context *timer,
 				enum kvm_arch_timer_regs treg,
@@ -224,7 +223,7 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 	else
 		ctx = map.direct_ptimer;
 
-	if (kvm_timer_should_fire(ctx))
+	if (kvm_timer_pending(ctx))
 		kvm_timer_update_irq(vcpu, true, ctx);
 
 	if (userspace_irqchip(vcpu->kvm) &&
@@ -257,7 +256,7 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
 	return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
 }
 
-static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
+static bool kvm_timer_enabled(struct arch_timer_context *timer_ctx)
 {
 	WARN_ON(timer_ctx && timer_ctx->loaded);
 	return timer_ctx &&
@@ -294,7 +293,7 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
 		struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
 
 		WARN(ctx->loaded, "timer %d loaded\n", i);
-		if (kvm_timer_irq_can_fire(ctx))
+		if (kvm_timer_enabled(ctx))
 			min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
 	}
 
@@ -358,7 +357,7 @@ static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
 	return HRTIMER_NORESTART;
 }
 
-static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
+static bool kvm_timer_pending(struct arch_timer_context *timer_ctx)
 {
 	enum kvm_arch_timers index;
 	u64 cval, now;
@@ -391,7 +390,7 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
 		       !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
 	}
 
-	if (!kvm_timer_irq_can_fire(timer_ctx))
+	if (!kvm_timer_enabled(timer_ctx))
 		return false;
 
 	cval = timer_get_cval(timer_ctx);
@@ -417,9 +416,9 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
 	/* Populate the device bitmap with the timer states */
 	regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
 				    KVM_ARM_DEV_EL1_PTIMER);
-	if (kvm_timer_should_fire(vtimer))
+	if (kvm_timer_pending(vtimer))
 		regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
-	if (kvm_timer_should_fire(ptimer))
+	if (kvm_timer_pending(ptimer))
 		regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
 }
 
@@ -473,21 +472,21 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
 /* Only called for a fully emulated timer */
 static void timer_emulate(struct arch_timer_context *ctx)
 {
-	bool should_fire = kvm_timer_should_fire(ctx);
+	bool pending = kvm_timer_pending(ctx);
 
-	trace_kvm_timer_emulate(ctx, should_fire);
+	trace_kvm_timer_emulate(ctx, pending);
 
-	if (should_fire != ctx->irq.level)
-		kvm_timer_update_irq(timer_context_to_vcpu(ctx), should_fire, ctx);
+	if (pending != ctx->irq.level)
+		kvm_timer_update_irq(timer_context_to_vcpu(ctx), pending, ctx);
 
-	kvm_timer_update_status(ctx, should_fire);
+	kvm_timer_update_status(ctx, pending);
 
 	/*
 	 * If the timer can fire now, we don't need to have a soft timer
 	 * scheduled for the future.  If the timer cannot fire at all,
 	 * then we also don't need a soft timer.
 	 */
-	if (should_fire || !kvm_timer_irq_can_fire(ctx))
+	if (pending || !kvm_timer_enabled(ctx))
 		return;
 
 	soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
@@ -594,10 +593,10 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
 	 * If no timers are capable of raising interrupts (disabled or
 	 * masked), then there's no more work for us to do.
 	 */
-	if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
-	    !kvm_timer_irq_can_fire(map.direct_ptimer) &&
-	    !kvm_timer_irq_can_fire(map.emul_vtimer) &&
-	    !kvm_timer_irq_can_fire(map.emul_ptimer) &&
+	if (!kvm_timer_enabled(map.direct_vtimer) &&
+	    !kvm_timer_enabled(map.direct_ptimer) &&
+	    !kvm_timer_enabled(map.emul_vtimer) &&
+	    !kvm_timer_enabled(map.emul_ptimer) &&
 	    !vcpu_has_wfit_active(vcpu))
 		return;
 
@@ -685,7 +684,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
 	 * this point and the register restoration, we'll take the
 	 * interrupt anyway.
 	 */
-	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(ctx), ctx);
+	kvm_timer_update_irq(vcpu, kvm_timer_pending(ctx), ctx);
 
 	if (irqchip_in_kernel(vcpu->kvm))
 		phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
@@ -706,7 +705,7 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 	 * this point and the register restoration, we'll take the
 	 * interrupt anyway.
 	 */
-	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
+	kvm_timer_update_irq(vcpu, kvm_timer_pending(vtimer), vtimer);
 
 	/*
 	 * When using a userspace irqchip with the architected timers and a
@@ -917,8 +916,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
 	vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
 	plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
 
-	return kvm_timer_should_fire(vtimer) != vlevel ||
-	       kvm_timer_should_fire(ptimer) != plevel;
+	return kvm_timer_pending(vtimer) != vlevel ||
+	       kvm_timer_pending(ptimer) != plevel;
 }
 
 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1006,7 +1005,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
 {
 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
-	if (!kvm_timer_should_fire(vtimer)) {
+	if (!kvm_timer_pending(vtimer)) {
 		kvm_timer_update_irq(vcpu, false, vtimer);
 		if (static_branch_likely(&has_gic_active_state))
 			set_timer_irq_phys_active(vtimer, false);
@@ -1579,7 +1578,7 @@ static bool kvm_arch_timer_get_input_level(int vintid)
 
 		ctx = vcpu_get_timer(vcpu, i);
 		if (timer_irq(ctx) == vintid)
-			return kvm_timer_should_fire(ctx);
+			return kvm_timer_pending(ctx);
 	}
 
 	/* A timer IRQ has fired, but no matching timer was found? */
-- 
2.47.3



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/4] KVM: arm64: timer: Kill the per-timer level cache
  2026-04-22 10:02 [PATCH v2 0/4] KVM: arm64: Don't perform vgic-v2 lazy init on timer injection Marc Zyngier
  2026-04-22 10:02 ` [PATCH v2 1/4] KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to kvm_timer_{pending,enabled}() Marc Zyngier
@ 2026-04-22 10:02 ` Marc Zyngier
  2026-04-22 10:02 ` [PATCH v2 3/4] KVM: arm64: vgic-v2: Force vgic init on injection outside the run loop Marc Zyngier
  2026-04-22 10:02 ` [PATCH v2 4/4] KVM: arm64: vgic-v2: Don't init the vgic on in-kernel interrupt injection Marc Zyngier
  3 siblings, 0 replies; 6+ messages in thread
From: Marc Zyngier @ 2026-04-22 10:02 UTC (permalink / raw)
  To: kvmarm, linux-arm-kernel
  Cc: Deepanshu Kartikey, Joey Gouly, Suzuki K Poulose, Oliver Upton,
	Zenghui Yu

The timer code makes use of a per-timer irq level cache, which
looks like a very minor optimisation to avoid taking a lock upon
updating the GIC view of the interrupt when it is unchanged from
the previous state.

This is coming in the way of more important correctness issues,
so get rid of the cache, which simplifies a couple of minor things.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/arch_timer.c  | 20 +++++++++-----------
 include/kvm/arm_arch_timer.h |  5 -----
 2 files changed, 9 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 22e79ecb34bc4..5cfe223da2996 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -445,9 +445,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
 {
 	kvm_timer_update_status(timer_ctx, new_level);
 
-	timer_ctx->irq.level = new_level;
 	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
-				   timer_ctx->irq.level);
+				   new_level);
 
 	if (userspace_irqchip(vcpu->kvm))
 		return;
@@ -465,7 +464,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
 
 	kvm_vgic_inject_irq(vcpu->kvm, vcpu,
 			    timer_irq(timer_ctx),
-			    timer_ctx->irq.level,
+			    new_level,
 			    timer_ctx);
 }
 
@@ -476,10 +475,7 @@ static void timer_emulate(struct arch_timer_context *ctx)
 
 	trace_kvm_timer_emulate(ctx, pending);
 
-	if (pending != ctx->irq.level)
-		kvm_timer_update_irq(timer_context_to_vcpu(ctx), pending, ctx);
-
-	kvm_timer_update_status(ctx, pending);
+	kvm_timer_update_irq(timer_context_to_vcpu(ctx), pending, ctx);
 
 	/*
 	 * If the timer can fire now, we don't need to have a soft timer
@@ -676,6 +672,7 @@ static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, boo
 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
 {
 	struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
+	bool pending = kvm_timer_pending(ctx);
 	bool phys_active = false;
 
 	/*
@@ -684,12 +681,12 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
 	 * this point and the register restoration, we'll take the
 	 * interrupt anyway.
 	 */
-	kvm_timer_update_irq(vcpu, kvm_timer_pending(ctx), ctx);
+	kvm_timer_update_irq(vcpu, pending, ctx);
 
 	if (irqchip_in_kernel(vcpu->kvm))
 		phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
 
-	phys_active |= ctx->irq.level;
+	phys_active |= pending;
 	phys_active |= vgic_is_v5(vcpu->kvm);
 
 	set_timer_irq_phys_active(ctx, phys_active);
@@ -698,6 +695,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 {
 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+	bool pending = kvm_timer_pending(vtimer);
 
 	/*
 	 * Update the timer output so that it is likely to match the
@@ -705,7 +703,7 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 	 * this point and the register restoration, we'll take the
 	 * interrupt anyway.
 	 */
-	kvm_timer_update_irq(vcpu, kvm_timer_pending(vtimer), vtimer);
+	kvm_timer_update_irq(vcpu, pending, vtimer);
 
 	/*
 	 * When using a userspace irqchip with the architected timers and a
@@ -717,7 +715,7 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 	 * being de-asserted, we unmask the interrupt again so that we exit
 	 * from the guest when the timer fires.
 	 */
-	if (vtimer->irq.level)
+	if (pending)
 		disable_percpu_irq(host_vtimer_irq);
 	else
 		enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index bf8cc9589bd09..2c26d457c3510 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -66,11 +66,6 @@ struct arch_timer_context {
 	 */
 	bool				loaded;
 
-	/* Output level of the timer IRQ */
-	struct {
-		bool			level;
-	} irq;
-
 	/* Who am I? */
 	enum kvm_arch_timers		timer_id;
 
-- 
2.47.3



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 3/4] KVM: arm64: vgic-v2: Force vgic init on injection outside the run loop
  2026-04-22 10:02 [PATCH v2 0/4] KVM: arm64: Don't perform vgic-v2 lazy init on timer injection Marc Zyngier
  2026-04-22 10:02 ` [PATCH v2 1/4] KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to kvm_timer_{pending,enabled}() Marc Zyngier
  2026-04-22 10:02 ` [PATCH v2 2/4] KVM: arm64: timer: Kill the per-timer level cache Marc Zyngier
@ 2026-04-22 10:02 ` Marc Zyngier
  2026-04-22 10:02 ` [PATCH v2 4/4] KVM: arm64: vgic-v2: Don't init the vgic on in-kernel interrupt injection Marc Zyngier
  3 siblings, 0 replies; 6+ messages in thread
From: Marc Zyngier @ 2026-04-22 10:02 UTC (permalink / raw)
  To: kvmarm, linux-arm-kernel
  Cc: Deepanshu Kartikey, Joey Gouly, Suzuki K Poulose, Oliver Upton,
	Zenghui Yu

Make sure that any attempt to inject an interrupt from userspace
or an irqfd results in the GICv2 lazy init to take place.

This is not currently necessary as the init is also performed on
*any* interrupt injection. But as we're about to remove that,
let's introduce it here.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/arm.c             | 7 +++++++
 arch/arm64/kvm/vgic/vgic-irqfd.c | 6 ++++++
 2 files changed, 13 insertions(+)

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 176cbe8baad30..e856cf4099f42 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -51,6 +51,7 @@
 
 #include <linux/irqchip/arm-gic-v5.h>
 
+#include "vgic/vgic.h"
 #include "sys_regs.h"
 
 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
@@ -1475,6 +1476,12 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 
 	trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
 
+	if (irqchip_in_kernel(kvm)) {
+		int ret = vgic_lazy_init(kvm);
+		if (ret)
+			return ret;
+	}
+
 	switch (irq_type) {
 	case KVM_ARM_IRQ_TYPE_CPU:
 		if (irqchip_in_kernel(kvm))
diff --git a/arch/arm64/kvm/vgic/vgic-irqfd.c b/arch/arm64/kvm/vgic/vgic-irqfd.c
index b9b86e3a6c862..19a1094536e6a 100644
--- a/arch/arm64/kvm/vgic/vgic-irqfd.c
+++ b/arch/arm64/kvm/vgic/vgic-irqfd.c
@@ -20,9 +20,15 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
 			int level, bool line_status)
 {
 	unsigned int spi_id = e->irqchip.pin + VGIC_NR_PRIVATE_IRQS;
+	int ret;
 
 	if (!vgic_valid_spi(kvm, spi_id))
 		return -EINVAL;
+
+	ret = vgic_lazy_init(kvm);
+	if (ret)
+		return ret;
+
 	return kvm_vgic_inject_irq(kvm, NULL, spi_id, level, NULL);
 }
 
-- 
2.47.3



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 4/4] KVM: arm64: vgic-v2: Don't init the vgic on in-kernel interrupt injection
  2026-04-22 10:02 [PATCH v2 0/4] KVM: arm64: Don't perform vgic-v2 lazy init on timer injection Marc Zyngier
                   ` (2 preceding siblings ...)
  2026-04-22 10:02 ` [PATCH v2 3/4] KVM: arm64: vgic-v2: Force vgic init on injection outside the run loop Marc Zyngier
@ 2026-04-22 10:02 ` Marc Zyngier
  3 siblings, 0 replies; 6+ messages in thread
From: Marc Zyngier @ 2026-04-22 10:02 UTC (permalink / raw)
  To: kvmarm, linux-arm-kernel
  Cc: Deepanshu Kartikey, Joey Gouly, Suzuki K Poulose, Oliver Upton,
	Zenghui Yu

We how have the lazy init on three paths:

- on first run of a vcpu
- on first injection of an interrupt from userspace and irqfd
- on first injection of an interrupt from kernel space as
  part of the device emulation (timers, PMU, vgic MI)

Given that we recompute the state of each in-kernel interrupt
every time we are about to enter the guest, we can drop the lazy
init from the kernel injection path.

This solves a bunch of issues related to vgic_lazy_init() being called
in non-preemptible context, such as vcpu reset.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/vgic/vgic.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 1e9fe8764584d..9e29f03d3463c 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -534,11 +534,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
 {
 	struct vgic_irq *irq;
 	unsigned long flags;
-	int ret;
 
-	ret = vgic_lazy_init(kvm);
-	if (ret)
-		return ret;
+	if (unlikely(!vgic_initialized(kvm)))
+		return 0;
 
 	if (!vcpu && irq_is_private(kvm, intid))
 		return -EINVAL;
-- 
2.47.3



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/4] KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to kvm_timer_{pending,enabled}()
  2026-04-22 10:02 ` [PATCH v2 1/4] KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to kvm_timer_{pending,enabled}() Marc Zyngier
@ 2026-04-22 13:30   ` Joey Gouly
  0 siblings, 0 replies; 6+ messages in thread
From: Joey Gouly @ 2026-04-22 13:30 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: kvmarm, linux-arm-kernel, Deepanshu Kartikey, Suzuki K Poulose,
	Oliver Upton, Zenghui Yu

On Wed, Apr 22, 2026 at 11:02:07AM +0100, Marc Zyngier wrote:
> kvm_timer_should_fire() seems to date back to a time where the author
> of the timer code didn't seem to have made the word "pending" part of
> their vocabulary.
> 
> Having since slightly improved on that front, let's rename this predicate
> to kvm_timer_pending(), which clearly indicates whether the timer
> interrupt is pending or not.
> 
> Similarly, kvm_timer_irq_can_fire() is renamed to kvm_timer_enabled().
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>

Minor comment below.

Reviewed-by: Joey Gouly <joey.gouly@arm.com>

> ---
>  arch/arm64/kvm/arch_timer.c | 49 ++++++++++++++++++-------------------
>  1 file changed, 24 insertions(+), 25 deletions(-)
> 
> diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
> index cbea4d9ee9552..22e79ecb34bc4 100644
> --- a/arch/arm64/kvm/arch_timer.c
> +++ b/arch/arm64/kvm/arch_timer.c
> @@ -39,10 +39,9 @@ static const u8 default_ppi[] = {
>  	[TIMER_HVTIMER] = 28,
>  };
>  
> -static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
>  static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
>  				 struct arch_timer_context *timer_ctx);
> -static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
> +static bool kvm_timer_pending(struct arch_timer_context *timer_ctx);
>  static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
>  				struct arch_timer_context *timer,
>  				enum kvm_arch_timer_regs treg,
> @@ -224,7 +223,7 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
>  	else
>  		ctx = map.direct_ptimer;
>  
> -	if (kvm_timer_should_fire(ctx))
> +	if (kvm_timer_pending(ctx))
>  		kvm_timer_update_irq(vcpu, true, ctx);
>  
>  	if (userspace_irqchip(vcpu->kvm) &&
> @@ -257,7 +256,7 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
>  	return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
>  }
>  
> -static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
> +static bool kvm_timer_enabled(struct arch_timer_context *timer_ctx)
>  {
>  	WARN_ON(timer_ctx && timer_ctx->loaded);
>  	return timer_ctx &&
> @@ -294,7 +293,7 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
>  		struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
>  
>  		WARN(ctx->loaded, "timer %d loaded\n", i);
> -		if (kvm_timer_irq_can_fire(ctx))
> +		if (kvm_timer_enabled(ctx))
>  			min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
>  	}
>  
> @@ -358,7 +357,7 @@ static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
>  	return HRTIMER_NORESTART;
>  }
>  
> -static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
> +static bool kvm_timer_pending(struct arch_timer_context *timer_ctx)
>  {
>  	enum kvm_arch_timers index;
>  	u64 cval, now;
> @@ -391,7 +390,7 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
>  		       !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
>  	}
>  
> -	if (!kvm_timer_irq_can_fire(timer_ctx))
> +	if (!kvm_timer_enabled(timer_ctx))
>  		return false;
>  
>  	cval = timer_get_cval(timer_ctx);
> @@ -417,9 +416,9 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
>  	/* Populate the device bitmap with the timer states */
>  	regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
>  				    KVM_ARM_DEV_EL1_PTIMER);
> -	if (kvm_timer_should_fire(vtimer))
> +	if (kvm_timer_pending(vtimer))
>  		regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
> -	if (kvm_timer_should_fire(ptimer))
> +	if (kvm_timer_pending(ptimer))
>  		regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
>  }
>  
> @@ -473,21 +472,21 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
>  /* Only called for a fully emulated timer */
>  static void timer_emulate(struct arch_timer_context *ctx)
>  {
> -	bool should_fire = kvm_timer_should_fire(ctx);
> +	bool pending = kvm_timer_pending(ctx);
>  
> -	trace_kvm_timer_emulate(ctx, should_fire);
> +	trace_kvm_timer_emulate(ctx, pending);
>  
> -	if (should_fire != ctx->irq.level)
> -		kvm_timer_update_irq(timer_context_to_vcpu(ctx), should_fire, ctx);
> +	if (pending != ctx->irq.level)
> +		kvm_timer_update_irq(timer_context_to_vcpu(ctx), pending, ctx);
>  
> -	kvm_timer_update_status(ctx, should_fire);
> +	kvm_timer_update_status(ctx, pending);
>  
>  	/*
>  	 * If the timer can fire now, we don't need to have a soft timer
>  	 * scheduled for the future.  If the timer cannot fire at all,
>  	 * then we also don't need a soft timer.
>  	 */

Could update this comment to use pending/enabled now?

> -	if (should_fire || !kvm_timer_irq_can_fire(ctx))
> +	if (pending || !kvm_timer_enabled(ctx))
>  		return;
>  
>  	soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
> @@ -594,10 +593,10 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
>  	 * If no timers are capable of raising interrupts (disabled or
>  	 * masked), then there's no more work for us to do.
>  	 */
> -	if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
> -	    !kvm_timer_irq_can_fire(map.direct_ptimer) &&
> -	    !kvm_timer_irq_can_fire(map.emul_vtimer) &&
> -	    !kvm_timer_irq_can_fire(map.emul_ptimer) &&
> +	if (!kvm_timer_enabled(map.direct_vtimer) &&
> +	    !kvm_timer_enabled(map.direct_ptimer) &&
> +	    !kvm_timer_enabled(map.emul_vtimer) &&
> +	    !kvm_timer_enabled(map.emul_ptimer) &&
>  	    !vcpu_has_wfit_active(vcpu))
>  		return;
>  
> @@ -685,7 +684,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
>  	 * this point and the register restoration, we'll take the
>  	 * interrupt anyway.
>  	 */
> -	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(ctx), ctx);
> +	kvm_timer_update_irq(vcpu, kvm_timer_pending(ctx), ctx);
>  
>  	if (irqchip_in_kernel(vcpu->kvm))
>  		phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
> @@ -706,7 +705,7 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
>  	 * this point and the register restoration, we'll take the
>  	 * interrupt anyway.
>  	 */
> -	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
> +	kvm_timer_update_irq(vcpu, kvm_timer_pending(vtimer), vtimer);
>  
>  	/*
>  	 * When using a userspace irqchip with the architected timers and a
> @@ -917,8 +916,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
>  	vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
>  	plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
>  
> -	return kvm_timer_should_fire(vtimer) != vlevel ||
> -	       kvm_timer_should_fire(ptimer) != plevel;
> +	return kvm_timer_pending(vtimer) != vlevel ||
> +	       kvm_timer_pending(ptimer) != plevel;
>  }
>  
>  void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
> @@ -1006,7 +1005,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
>  {
>  	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
>  
> -	if (!kvm_timer_should_fire(vtimer)) {
> +	if (!kvm_timer_pending(vtimer)) {
>  		kvm_timer_update_irq(vcpu, false, vtimer);
>  		if (static_branch_likely(&has_gic_active_state))
>  			set_timer_irq_phys_active(vtimer, false);
> @@ -1579,7 +1578,7 @@ static bool kvm_arch_timer_get_input_level(int vintid)
>  
>  		ctx = vcpu_get_timer(vcpu, i);
>  		if (timer_irq(ctx) == vintid)
> -			return kvm_timer_should_fire(ctx);
> +			return kvm_timer_pending(ctx);
>  	}
>  
>  	/* A timer IRQ has fired, but no matching timer was found? */
> -- 
> 2.47.3
> 


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2026-04-22 13:30 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-22 10:02 [PATCH v2 0/4] KVM: arm64: Don't perform vgic-v2 lazy init on timer injection Marc Zyngier
2026-04-22 10:02 ` [PATCH v2 1/4] KVM: arm64: timer: Repaint kvm_timer_{should,irq_can}_fire() to kvm_timer_{pending,enabled}() Marc Zyngier
2026-04-22 13:30   ` Joey Gouly
2026-04-22 10:02 ` [PATCH v2 2/4] KVM: arm64: timer: Kill the per-timer level cache Marc Zyngier
2026-04-22 10:02 ` [PATCH v2 3/4] KVM: arm64: vgic-v2: Force vgic init on injection outside the run loop Marc Zyngier
2026-04-22 10:02 ` [PATCH v2 4/4] KVM: arm64: vgic-v2: Don't init the vgic on in-kernel interrupt injection Marc Zyngier

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox