* [PATCH v2 01/11] KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-21 9:11 ` Zenghui Yu
2023-09-20 18:17 ` [PATCH v2 02/11] KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id Marc Zyngier
` (11 subsequent siblings)
12 siblings, 1 reply; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
Passing a vcpu_id to kvm_vgic_inject_irq() is silly for two reasons:
- we often confuse vcpu_id and vcpu_idx
- we eventually have to convert it back to a vcpu
- we can't count
Instead, pass a vcpu pointer, which is unambiguous. A NULL vcpu
is also allowed for interrupts that are not private to a vcpu
(such as SPIs).
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/arch_timer.c | 2 +-
arch/arm64/kvm/arm.c | 20 ++++++++++----------
arch/arm64/kvm/pmu-emul.c | 2 +-
arch/arm64/kvm/vgic/vgic-irqfd.c | 2 +-
arch/arm64/kvm/vgic/vgic.c | 12 +++++-------
include/kvm/arm_vgic.h | 4 ++--
6 files changed, 20 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 6dcdae4d38cb..1f828f3b854c 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -458,7 +458,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
timer_ctx->irq.level);
if (!userspace_irqchip(vcpu->kvm)) {
- ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
timer_irq(timer_ctx),
timer_ctx->irq.level,
timer_ctx);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 4866b3f7b4ea..872679a0cbd7 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1134,27 +1134,27 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status)
{
u32 irq = irq_level->irq;
- unsigned int irq_type, vcpu_idx, irq_num;
+ unsigned int irq_type, vcpu_id, irq_num;
int nrcpus = atomic_read(&kvm->online_vcpus);
struct kvm_vcpu *vcpu = NULL;
bool level = irq_level->level;
irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
- vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
- vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
+ vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
+ vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
- trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
+ trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
switch (irq_type) {
case KVM_ARM_IRQ_TYPE_CPU:
if (irqchip_in_kernel(kvm))
return -ENXIO;
- if (vcpu_idx >= nrcpus)
+ if (vcpu_id >= nrcpus)
return -EINVAL;
- vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+ vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
if (!vcpu)
return -EINVAL;
@@ -1166,17 +1166,17 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (!irqchip_in_kernel(kvm))
return -ENXIO;
- if (vcpu_idx >= nrcpus)
+ if (vcpu_id >= nrcpus)
return -EINVAL;
- vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+ vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
if (!vcpu)
return -EINVAL;
if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
- return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
+ return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL);
case KVM_ARM_IRQ_TYPE_SPI:
if (!irqchip_in_kernel(kvm))
return -ENXIO;
@@ -1184,7 +1184,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (irq_num < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
- return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
+ return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL);
}
return -EINVAL;
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 6b066e04dc5d..3afb281ed8d2 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -348,7 +348,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
pmu->irq_level = overflow;
if (likely(irqchip_in_kernel(vcpu->kvm))) {
- int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
pmu->irq_num, overflow, pmu);
WARN_ON(ret);
}
diff --git a/arch/arm64/kvm/vgic/vgic-irqfd.c b/arch/arm64/kvm/vgic/vgic-irqfd.c
index 475059bacedf..8c711deb25aa 100644
--- a/arch/arm64/kvm/vgic/vgic-irqfd.c
+++ b/arch/arm64/kvm/vgic/vgic-irqfd.c
@@ -23,7 +23,7 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
if (!vgic_valid_spi(kvm, spi_id))
return -EINVAL;
- return kvm_vgic_inject_irq(kvm, 0, spi_id, level, NULL);
+ return kvm_vgic_inject_irq(kvm, NULL, spi_id, level, NULL);
}
/**
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 8be4c1ebdec2..db2a95762b1b 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -422,7 +422,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
/**
* kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
* @kvm: The VM structure pointer
- * @cpuid: The CPU for PPIs
+ * @vcpu: The CPU for PPIs or NULL for global interrupts
* @intid: The INTID to inject a new state to.
* @level: Edge-triggered: true: to trigger the interrupt
* false: to ignore the call
@@ -436,24 +436,22 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
* level-sensitive interrupts. You can think of the level parameter as 1
* being HIGH and 0 being LOW and all devices being active-HIGH.
*/
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level, void *owner)
+int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int intid, bool level, void *owner)
{
- struct kvm_vcpu *vcpu;
struct vgic_irq *irq;
unsigned long flags;
int ret;
- trace_vgic_update_irq_pending(cpuid, intid, level);
-
ret = vgic_lazy_init(kvm);
if (ret)
return ret;
- vcpu = kvm_get_vcpu(kvm, cpuid);
if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
+ trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level);
+
irq = vgic_get_irq(kvm, vcpu, intid);
if (!irq)
return -EINVAL;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 5b27f94d4fad..8cc38e836f54 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -375,8 +375,8 @@ int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level, void *owner);
+int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int intid, bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH v2 01/11] KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer
2023-09-20 18:17 ` [PATCH v2 01/11] KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer Marc Zyngier
@ 2023-09-21 9:11 ` Zenghui Yu
2023-09-21 11:20 ` Marc Zyngier
0 siblings, 1 reply; 24+ messages in thread
From: Zenghui Yu @ 2023-09-21 9:11 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On 2023/9/21 2:17, Marc Zyngier wrote:
> Passing a vcpu_id to kvm_vgic_inject_irq() is silly for two reasons:
>
> - we often confuse vcpu_id and vcpu_idx
> - we eventually have to convert it back to a vcpu
> - we can't count
>
> Instead, pass a vcpu pointer, which is unambiguous. A NULL vcpu
> is also allowed for interrupts that are not private to a vcpu
> (such as SPIs).
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> arch/arm64/kvm/arch_timer.c | 2 +-
> arch/arm64/kvm/arm.c | 20 ++++++++++----------
> arch/arm64/kvm/pmu-emul.c | 2 +-
> arch/arm64/kvm/vgic/vgic-irqfd.c | 2 +-
> arch/arm64/kvm/vgic/vgic.c | 12 +++++-------
> include/kvm/arm_vgic.h | 4 ++--
> 6 files changed, 20 insertions(+), 22 deletions(-)
>
> diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
> index 6dcdae4d38cb..1f828f3b854c 100644
> --- a/arch/arm64/kvm/arch_timer.c
> +++ b/arch/arm64/kvm/arch_timer.c
> @@ -458,7 +458,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
> timer_ctx->irq.level);
>
> if (!userspace_irqchip(vcpu->kvm)) {
> - ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
> + ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
> timer_irq(timer_ctx),
> timer_ctx->irq.level,
> timer_ctx);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 4866b3f7b4ea..872679a0cbd7 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1134,27 +1134,27 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
> bool line_status)
> {
> u32 irq = irq_level->irq;
> - unsigned int irq_type, vcpu_idx, irq_num;
> + unsigned int irq_type, vcpu_id, irq_num;
> int nrcpus = atomic_read(&kvm->online_vcpus);
> struct kvm_vcpu *vcpu = NULL;
> bool level = irq_level->level;
>
> irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
> - vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
> - vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
> + vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
> + vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
> irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
>
> - trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
> + trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
>
> switch (irq_type) {
> case KVM_ARM_IRQ_TYPE_CPU:
> if (irqchip_in_kernel(kvm))
> return -ENXIO;
>
> - if (vcpu_idx >= nrcpus)
> + if (vcpu_id >= nrcpus)
> return -EINVAL;
What we actually need to check is 'vcpu->vcpu_idx >= nrcpus' and this is
covered by the 'if (!vcpu)' statement below. Let's just drop this
_incorrect_ checking?
>
> - vcpu = kvm_get_vcpu(kvm, vcpu_idx);
> + vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
> if (!vcpu)
> return -EINVAL;
>
> @@ -1166,17 +1166,17 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
> if (!irqchip_in_kernel(kvm))
> return -ENXIO;
>
> - if (vcpu_idx >= nrcpus)
> + if (vcpu_id >= nrcpus)
> return -EINVAL;
Same here.
Thanks,
Zenghui
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v2 01/11] KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer
2023-09-21 9:11 ` Zenghui Yu
@ 2023-09-21 11:20 ` Marc Zyngier
0 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-21 11:20 UTC (permalink / raw)
To: Zenghui Yu
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On Thu, 21 Sep 2023 10:11:00 +0100,
Zenghui Yu <yuzenghui@huawei.com> wrote:
>
> On 2023/9/21 2:17, Marc Zyngier wrote:
> > Passing a vcpu_id to kvm_vgic_inject_irq() is silly for two reasons:
> >
> > - we often confuse vcpu_id and vcpu_idx
> > - we eventually have to convert it back to a vcpu
> > - we can't count
> >
> > Instead, pass a vcpu pointer, which is unambiguous. A NULL vcpu
> > is also allowed for interrupts that are not private to a vcpu
> > (such as SPIs).
> >
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> > arch/arm64/kvm/arch_timer.c | 2 +-
> > arch/arm64/kvm/arm.c | 20 ++++++++++----------
> > arch/arm64/kvm/pmu-emul.c | 2 +-
> > arch/arm64/kvm/vgic/vgic-irqfd.c | 2 +-
> > arch/arm64/kvm/vgic/vgic.c | 12 +++++-------
> > include/kvm/arm_vgic.h | 4 ++--
> > 6 files changed, 20 insertions(+), 22 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
> > index 6dcdae4d38cb..1f828f3b854c 100644
> > --- a/arch/arm64/kvm/arch_timer.c
> > +++ b/arch/arm64/kvm/arch_timer.c
> > @@ -458,7 +458,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
> > timer_ctx->irq.level);
> > if (!userspace_irqchip(vcpu->kvm)) {
> > - ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
> > + ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
> > timer_irq(timer_ctx),
> > timer_ctx->irq.level,
> > timer_ctx);
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 4866b3f7b4ea..872679a0cbd7 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -1134,27 +1134,27 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
> > bool line_status)
> > {
> > u32 irq = irq_level->irq;
> > - unsigned int irq_type, vcpu_idx, irq_num;
> > + unsigned int irq_type, vcpu_id, irq_num;
> > int nrcpus = atomic_read(&kvm->online_vcpus);
> > struct kvm_vcpu *vcpu = NULL;
> > bool level = irq_level->level;
> > irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) &
> > KVM_ARM_IRQ_TYPE_MASK;
> > - vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
> > - vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
> > + vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
> > + vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
> > irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
> > - trace_kvm_irq_line(irq_type, vcpu_idx, irq_num,
> > irq_level->level);
> > + trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
> > switch (irq_type) {
> > case KVM_ARM_IRQ_TYPE_CPU:
> > if (irqchip_in_kernel(kvm))
> > return -ENXIO;
> > - if (vcpu_idx >= nrcpus)
> > + if (vcpu_id >= nrcpus)
> > return -EINVAL;
>
> What we actually need to check is 'vcpu->vcpu_idx >= nrcpus' and this is
> covered by the 'if (!vcpu)' statement below. Let's just drop this
> _incorrect_ checking?
Good point. Let me fix this.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v2 02/11] KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 01/11] KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-21 9:14 ` Zenghui Yu
2023-09-20 18:17 ` [PATCH v2 03/11] KVM: arm64: vgic-v3: Refactor GICv3 SGI generation Marc Zyngier
` (10 subsequent siblings)
12 siblings, 1 reply; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
Since our emulated ITS advertises GITS_TYPER.PTA=0, the target
address associated to a collection is a PE number and not
an address. So far, so good. However, the PE number is what userspace
has provided given us (aka the vcpu_id), and not the internal vcpu
index.
Make sure we consistently retrieve the vcpu by ID rather than
by index, adding a helper that deals with most of the cases.
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/vgic/vgic-its.c | 21 ++++++++++++++-------
1 file changed, 14 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 5fe2365a629f..4aadcd24f6f6 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -378,6 +378,12 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
return ret;
}
+static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
+ struct its_collection *col)
+{
+ return kvm_get_vcpu_by_id(kvm, col->target_addr);
+}
+
/*
* Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
* is targeting) to the VGIC's view, which deals with target VCPUs.
@@ -391,7 +397,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
if (!its_is_collection_mapped(ite->collection))
return;
- vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
+ vcpu = collection_to_vcpu(kvm, ite->collection);
update_affinity(ite->irq, vcpu);
}
@@ -679,7 +685,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
if (!ite || !its_is_collection_mapped(ite->collection))
return E_ITS_INT_UNMAPPED_INTERRUPT;
- vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
+ vcpu = collection_to_vcpu(kvm, ite->collection);
if (!vcpu)
return E_ITS_INT_UNMAPPED_INTERRUPT;
@@ -887,7 +893,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
return E_ITS_MOVI_UNMAPPED_COLLECTION;
ite->collection = collection;
- vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+ vcpu = collection_to_vcpu(kvm, collection);
vgic_its_invalidate_cache(kvm);
@@ -1121,7 +1127,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
}
if (its_is_collection_mapped(collection))
- vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+ vcpu = collection_to_vcpu(kvm, collection);
irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
if (IS_ERR(irq)) {
@@ -1382,7 +1388,7 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
if (!its_is_collection_mapped(collection))
return E_ITS_INVALL_UNMAPPED_COLLECTION;
- vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+ vcpu = collection_to_vcpu(kvm, collection);
vgic_its_invall(vcpu);
return 0;
@@ -1413,8 +1419,9 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
if (target1_addr == target2_addr)
return 0;
- vcpu1 = kvm_get_vcpu(kvm, target1_addr);
- vcpu2 = kvm_get_vcpu(kvm, target2_addr);
+ /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
+ vcpu1 = kvm_get_vcpu_by_id(kvm, target1_addr);
+ vcpu2 = kvm_get_vcpu_by_id(kvm, target2_addr);
irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
if (irq_count < 0)
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH v2 02/11] KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id
2023-09-20 18:17 ` [PATCH v2 02/11] KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id Marc Zyngier
@ 2023-09-21 9:14 ` Zenghui Yu
2023-09-21 11:46 ` Marc Zyngier
0 siblings, 1 reply; 24+ messages in thread
From: Zenghui Yu @ 2023-09-21 9:14 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On 2023/9/21 2:17, Marc Zyngier wrote:
> Since our emulated ITS advertises GITS_TYPER.PTA=0, the target
> address associated to a collection is a PE number and not
> an address. So far, so good. However, the PE number is what userspace
> has provided given us (aka the vcpu_id), and not the internal vcpu
> index.
>
> Make sure we consistently retrieve the vcpu by ID rather than
> by index, adding a helper that deals with most of the cases.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
Looks good, with 2 more points:
- Like patch#1, we should have a go at all
'target_addr >= kvm->online_vcpus' comparisons in vgic-its.c
- There is still a remaining kvm_get_vcpu() in vgic_its_restore_ite()
which needs to be fixed
Thanks,
Zenghui
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread
* Re: [PATCH v2 02/11] KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id
2023-09-21 9:14 ` Zenghui Yu
@ 2023-09-21 11:46 ` Marc Zyngier
2023-09-21 13:12 ` Zenghui Yu
0 siblings, 1 reply; 24+ messages in thread
From: Marc Zyngier @ 2023-09-21 11:46 UTC (permalink / raw)
To: Zenghui Yu
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On Thu, 21 Sep 2023 10:14:55 +0100,
Zenghui Yu <yuzenghui@huawei.com> wrote:
>
> On 2023/9/21 2:17, Marc Zyngier wrote:
> > Since our emulated ITS advertises GITS_TYPER.PTA=0, the target
> > address associated to a collection is a PE number and not
> > an address. So far, so good. However, the PE number is what userspace
> > has provided given us (aka the vcpu_id), and not the internal vcpu
> > index.
> >
> > Make sure we consistently retrieve the vcpu by ID rather than
> > by index, adding a helper that deals with most of the cases.
> >
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
>
> Looks good, with 2 more points:
>
> - Like patch#1, we should have a go at all
> 'target_addr >= kvm->online_vcpus' comparisons in vgic-its.c
> - There is still a remaining kvm_get_vcpu() in vgic_its_restore_ite()
> which needs to be fixed
Yup, well spotted. I have this additional hack which I plan to put on
top.
Thanks,
M.
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 4aadcd24f6f6..6ec9dd970cbb 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -1248,21 +1248,22 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
u16 coll_id;
- u32 target_addr;
struct its_collection *collection;
bool valid;
valid = its_cmd_get_validbit(its_cmd);
coll_id = its_cmd_get_collection(its_cmd);
- target_addr = its_cmd_get_target_addr(its_cmd);
-
- if (target_addr >= atomic_read(&kvm->online_vcpus))
- return E_ITS_MAPC_PROCNUM_OOR;
if (!valid) {
vgic_its_free_collection(its, coll_id);
vgic_its_invalidate_cache(kvm);
} else {
+ struct kvm_vcpu *vcpu;
+
+ vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
+ if (!vcpu)
+ return E_ITS_MAPC_PROCNUM_OOR;
+
collection = find_collection(its, coll_id);
if (!collection) {
@@ -1276,9 +1277,9 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
coll_id);
if (ret)
return ret;
- collection->target_addr = target_addr;
+ collection->target_addr = vcpu->vcpu_id;
} else {
- collection->target_addr = target_addr;
+ collection->target_addr = vcpu->vcpu_id;
update_affinity_collection(kvm, its, collection);
}
}
@@ -1405,24 +1406,21 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
- u32 target1_addr = its_cmd_get_target_addr(its_cmd);
- u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
struct kvm_vcpu *vcpu1, *vcpu2;
struct vgic_irq *irq;
u32 *intids;
int irq_count, i;
- if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
- target2_addr >= atomic_read(&kvm->online_vcpus))
+ /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
+ vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
+ vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
+
+ if (!vcpu1 || !vcpu2)
return E_ITS_MOVALL_PROCNUM_OOR;
- if (target1_addr == target2_addr)
+ if (vcpu1 == vcpu2)
return 0;
- /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
- vcpu1 = kvm_get_vcpu_by_id(kvm, target1_addr);
- vcpu2 = kvm_get_vcpu_by_id(kvm, target2_addr);
-
irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
if (irq_count < 0)
return irq_count;
@@ -2265,7 +2263,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
return PTR_ERR(ite);
if (its_is_collection_mapped(collection))
- vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+ vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
irq = vgic_add_lpi(kvm, lpi_id, vcpu);
if (IS_ERR(irq)) {
@@ -2580,7 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
coll_id = val & KVM_ITS_CTE_ICID_MASK;
if (target_addr != COLLECTION_NOT_MAPPED &&
- target_addr >= atomic_read(&kvm->online_vcpus))
+ !kvm_get_vcpu_by_id(kvm, target_addr))
return -EINVAL;
collection = find_collection(its, coll_id);
--
Without deviation from the norm, progress is not possible.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH v2 02/11] KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id
2023-09-21 11:46 ` Marc Zyngier
@ 2023-09-21 13:12 ` Zenghui Yu
0 siblings, 0 replies; 24+ messages in thread
From: Zenghui Yu @ 2023-09-21 13:12 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On 2023/9/21 19:46, Marc Zyngier wrote:
> I have this additional hack which I plan to put on top.
Looks good. :-)
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v2 03/11] KVM: arm64: vgic-v3: Refactor GICv3 SGI generation
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 01/11] KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 02/11] KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-21 9:42 ` Joey Gouly
2023-09-20 18:17 ` [PATCH v2 04/11] KVM: arm64: vgic-v2: Use cpuid from userspace as vcpu_id Marc Zyngier
` (9 subsequent siblings)
12 siblings, 1 reply; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
As we're about to change the way SGIs are sent, start by splitting
out some of the basic functionnality: instead of intermingling
the broadcast and non-broadcast cases with the actual SGI generation,
perform the following cleanups:
- move the SGI queuing into its own helper
- split the broadcast code from the affinity-driven code
- replace the mask/shift combinations with FIELD_GET()
The result is much more readable, and paves the way for further
optimisations.
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/vgic/vgic-mmio-v3.c | 110 ++++++++++++++++-------------
1 file changed, 59 insertions(+), 51 deletions(-)
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 188d2187eede..88b8d4524854 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -1052,6 +1052,38 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1)
+{
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, sgi);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ /*
+ * An access targeting Group0 SGIs can only generate
+ * those, while an access targeting Group1 SGIs can
+ * generate interrupts of either group.
+ */
+ if (!irq->group || allow_group1) {
+ if (!irq->hw) {
+ irq->pending_latch = true;
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ } else {
+ /* HW SGI? Ask the GIC to inject it */
+ int err;
+ err = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ true);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
+ } else {
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
+
+ vgic_put_irq(vcpu->kvm, irq);
+}
+
/**
* vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
* @vcpu: The VCPU requesting a SGI
@@ -1070,19 +1102,30 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *c_vcpu;
- u16 target_cpus;
+ unsigned long target_cpus;
u64 mpidr;
- int sgi;
- int vcpu_id = vcpu->vcpu_id;
- bool broadcast;
- unsigned long c, flags;
-
- sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
- broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
- target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
+ u32 sgi;
+ unsigned long c;
+
+ sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
+
+ /* Broadcast */
+ if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) {
+ kvm_for_each_vcpu(c, c_vcpu, kvm) {
+ /* Don't signal the calling VCPU */
+ if (c_vcpu == vcpu)
+ continue;
+
+ vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
+ }
+
+ return;
+ }
+
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
+ target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
/*
* We iterate over all VCPUs to find the MPIDRs matching the request.
@@ -1091,54 +1134,19 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
* VCPUs when most of the times we just signal a single VCPU.
*/
kvm_for_each_vcpu(c, c_vcpu, kvm) {
- struct vgic_irq *irq;
+ int level0;
/* Exit early if we have dealt with all requested CPUs */
- if (!broadcast && target_cpus == 0)
+ if (target_cpus == 0)
break;
-
- /* Don't signal the calling VCPU */
- if (broadcast && c == vcpu_id)
+ level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
+ if (level0 == -1)
continue;
- if (!broadcast) {
- int level0;
-
- level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
- if (level0 == -1)
- continue;
-
- /* remove this matching VCPU from the mask */
- target_cpus &= ~BIT(level0);
- }
+ /* remove this matching VCPU from the mask */
+ target_cpus &= ~BIT(level0);
- irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
-
- raw_spin_lock_irqsave(&irq->irq_lock, flags);
-
- /*
- * An access targeting Group0 SGIs can only generate
- * those, while an access targeting Group1 SGIs can
- * generate interrupts of either group.
- */
- if (!irq->group || allow_group1) {
- if (!irq->hw) {
- irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
- } else {
- /* HW SGI? Ask the GIC to inject it */
- int err;
- err = irq_set_irqchip_state(irq->host_irq,
- IRQCHIP_STATE_PENDING,
- true);
- WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
- }
- } else {
- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
- }
-
- vgic_put_irq(vcpu->kvm, irq);
+ vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
}
}
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH v2 03/11] KVM: arm64: vgic-v3: Refactor GICv3 SGI generation
2023-09-20 18:17 ` [PATCH v2 03/11] KVM: arm64: vgic-v3: Refactor GICv3 SGI generation Marc Zyngier
@ 2023-09-21 9:42 ` Joey Gouly
2023-09-21 11:13 ` Marc Zyngier
0 siblings, 1 reply; 24+ messages in thread
From: Joey Gouly @ 2023-09-21 9:42 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Zenghui Yu, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
Hi Marc, Oliver,
On Wed, Sep 20, 2023 at 07:17:23PM +0100, Marc Zyngier wrote:
> As we're about to change the way SGIs are sent, start by splitting
> out some of the basic functionnality: instead of intermingling
> the broadcast and non-broadcast cases with the actual SGI generation,
> perform the following cleanups:
>
> - move the SGI queuing into its own helper
> - split the broadcast code from the affinity-driven code
> - replace the mask/shift combinations with FIELD_GET()
>
> The result is much more readable, and paves the way for further
> optimisations.
>
> Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Want to point out that I didn't review this code, I only reviewed patches 1-3
from the original series.
https://lore.kernel.org/linux-arm-kernel/20230907100931.1186690-1-maz@kernel.org/
Since it seems Oliver is picking it up, can you remove my r-b tag from this patch.
Thanks,
Joey
> Tested-by: Joey Gouly <joey.gouly@arm.com>
> Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> arch/arm64/kvm/vgic/vgic-mmio-v3.c | 110 ++++++++++++++++-------------
> 1 file changed, 59 insertions(+), 51 deletions(-)
>
> diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
> index 188d2187eede..88b8d4524854 100644
> --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
> +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
> @@ -1052,6 +1052,38 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
> ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
> >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
>
> +static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1)
> +{
> + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, sgi);
> + unsigned long flags;
> +
> + raw_spin_lock_irqsave(&irq->irq_lock, flags);
> +
> + /*
> + * An access targeting Group0 SGIs can only generate
> + * those, while an access targeting Group1 SGIs can
> + * generate interrupts of either group.
> + */
> + if (!irq->group || allow_group1) {
> + if (!irq->hw) {
> + irq->pending_latch = true;
> + vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
> + } else {
> + /* HW SGI? Ask the GIC to inject it */
> + int err;
> + err = irq_set_irqchip_state(irq->host_irq,
> + IRQCHIP_STATE_PENDING,
> + true);
> + WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
> + raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
> + }
> + } else {
> + raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
> + }
> +
> + vgic_put_irq(vcpu->kvm, irq);
> +}
> +
> /**
> * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
> * @vcpu: The VCPU requesting a SGI
> @@ -1070,19 +1102,30 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
> {
> struct kvm *kvm = vcpu->kvm;
> struct kvm_vcpu *c_vcpu;
> - u16 target_cpus;
> + unsigned long target_cpus;
> u64 mpidr;
> - int sgi;
> - int vcpu_id = vcpu->vcpu_id;
> - bool broadcast;
> - unsigned long c, flags;
> -
> - sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
> - broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
> - target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
> + u32 sgi;
> + unsigned long c;
> +
> + sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
> +
> + /* Broadcast */
> + if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) {
> + kvm_for_each_vcpu(c, c_vcpu, kvm) {
> + /* Don't signal the calling VCPU */
> + if (c_vcpu == vcpu)
> + continue;
> +
> + vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
> + }
> +
> + return;
> + }
> +
> mpidr = SGI_AFFINITY_LEVEL(reg, 3);
> mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
> mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
> + target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
>
> /*
> * We iterate over all VCPUs to find the MPIDRs matching the request.
> @@ -1091,54 +1134,19 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
> * VCPUs when most of the times we just signal a single VCPU.
> */
> kvm_for_each_vcpu(c, c_vcpu, kvm) {
> - struct vgic_irq *irq;
> + int level0;
>
> /* Exit early if we have dealt with all requested CPUs */
> - if (!broadcast && target_cpus == 0)
> + if (target_cpus == 0)
> break;
> -
> - /* Don't signal the calling VCPU */
> - if (broadcast && c == vcpu_id)
> + level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
> + if (level0 == -1)
> continue;
>
> - if (!broadcast) {
> - int level0;
> -
> - level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
> - if (level0 == -1)
> - continue;
> -
> - /* remove this matching VCPU from the mask */
> - target_cpus &= ~BIT(level0);
> - }
> + /* remove this matching VCPU from the mask */
> + target_cpus &= ~BIT(level0);
>
> - irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
> -
> - raw_spin_lock_irqsave(&irq->irq_lock, flags);
> -
> - /*
> - * An access targeting Group0 SGIs can only generate
> - * those, while an access targeting Group1 SGIs can
> - * generate interrupts of either group.
> - */
> - if (!irq->group || allow_group1) {
> - if (!irq->hw) {
> - irq->pending_latch = true;
> - vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
> - } else {
> - /* HW SGI? Ask the GIC to inject it */
> - int err;
> - err = irq_set_irqchip_state(irq->host_irq,
> - IRQCHIP_STATE_PENDING,
> - true);
> - WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
> - raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
> - }
> - } else {
> - raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
> - }
> -
> - vgic_put_irq(vcpu->kvm, irq);
> + vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
> }
> }
>
> --
> 2.34.1
>
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v2 03/11] KVM: arm64: vgic-v3: Refactor GICv3 SGI generation
2023-09-21 9:42 ` Joey Gouly
@ 2023-09-21 11:13 ` Marc Zyngier
0 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-21 11:13 UTC (permalink / raw)
To: Joey Gouly
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Zenghui Yu, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On 2023-09-21 10:42, Joey Gouly wrote:
> Hi Marc, Oliver,
>
> On Wed, Sep 20, 2023 at 07:17:23PM +0100, Marc Zyngier wrote:
>> As we're about to change the way SGIs are sent, start by splitting
>> out some of the basic functionnality: instead of intermingling
>> the broadcast and non-broadcast cases with the actual SGI generation,
>> perform the following cleanups:
>>
>> - move the SGI queuing into its own helper
>> - split the broadcast code from the affinity-driven code
>> - replace the mask/shift combinations with FIELD_GET()
>>
>> The result is much more readable, and paves the way for further
>> optimisations.
>>
>> Reviewed-by: Joey Gouly <joey.gouly@arm.com>
>
> Want to point out that I didn't review this code, I only reviewed
> patches 1-3
> from the original series.
Ah crap. I try to make sure I was only tagging the right patches,
but obviously failed... :-( Really sorry about that.
> https://lore.kernel.org/linux-arm-kernel/20230907100931.1186690-1-maz@kernel.org/
>
> Since it seems Oliver is picking it up, can you remove my r-b tag from
> this patch.
Well, Zenghui has found a couple of issues, so there will be a third
revision for sure. I'll amend the patch right away.
Thanks,
M.
--
Jazz is not dead. It just smells funny...
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v2 04/11] KVM: arm64: vgic-v2: Use cpuid from userspace as vcpu_id
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (2 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 03/11] KVM: arm64: vgic-v3: Refactor GICv3 SGI generation Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 05/11] KVM: arm64: vgic: Use vcpu_idx for the debug information Marc Zyngier
` (8 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/vgic/vgic-kvm-device.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index 212b73a715c1..82b264ad68c4 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -345,7 +345,7 @@ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
return -EINVAL;
- reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
return 0;
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH v2 05/11] KVM: arm64: vgic: Use vcpu_idx for the debug information
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (3 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 04/11] KVM: arm64: vgic-v2: Use cpuid from userspace as vcpu_id Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 06/11] KVM: arm64: Use vcpu_idx for invalidation tracking Marc Zyngier
` (7 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/vgic/vgic-debug.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index 07aa0437125a..85606a531dc3 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -166,7 +166,7 @@ static void print_header(struct seq_file *s, struct vgic_irq *irq,
if (vcpu) {
hdr = "VCPU";
- id = vcpu->vcpu_id;
+ id = vcpu->vcpu_idx;
}
seq_printf(s, "\n");
@@ -212,7 +212,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
" %2d "
"\n",
type, irq->intid,
- (irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
+ (irq->target_vcpu) ? irq->target_vcpu->vcpu_idx : -1,
pending,
irq->line_level,
irq->active,
@@ -224,7 +224,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
irq->mpidr,
irq->source,
irq->priority,
- (irq->vcpu) ? irq->vcpu->vcpu_id : -1);
+ (irq->vcpu) ? irq->vcpu->vcpu_idx : -1);
}
static int vgic_debug_show(struct seq_file *s, void *v)
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH v2 06/11] KVM: arm64: Use vcpu_idx for invalidation tracking
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (4 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 05/11] KVM: arm64: vgic: Use vcpu_idx for the debug information Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-21 9:16 ` Zenghui Yu
2023-09-20 18:17 ` [PATCH v2 07/11] KVM: arm64: Simplify kvm_vcpu_get_mpidr_aff() Marc Zyngier
` (6 subsequent siblings)
12 siblings, 1 reply; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/arm.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 872679a0cbd7..23c22dbd1969 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -438,9 +438,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* We might get preempted before the vCPU actually runs, but
* over-invalidation doesn't affect correctness.
*/
- if (*last_ran != vcpu->vcpu_id) {
+ if (*last_ran != vcpu->vcpu_idx) {
kvm_call_hyp(__kvm_flush_cpu_context, mmu);
- *last_ran = vcpu->vcpu_id;
+ *last_ran = vcpu->vcpu_idx;
}
vcpu->cpu = cpu;
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH v2 06/11] KVM: arm64: Use vcpu_idx for invalidation tracking
2023-09-20 18:17 ` [PATCH v2 06/11] KVM: arm64: Use vcpu_idx for invalidation tracking Marc Zyngier
@ 2023-09-21 9:16 ` Zenghui Yu
2023-09-21 12:58 ` Marc Zyngier
0 siblings, 1 reply; 24+ messages in thread
From: Zenghui Yu @ 2023-09-21 9:16 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On 2023/9/21 2:17, Marc Zyngier wrote:
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
> arch/arm64/kvm/arm.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 872679a0cbd7..23c22dbd1969 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -438,9 +438,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> * We might get preempted before the vCPU actually runs, but
> * over-invalidation doesn't affect correctness.
> */
> - if (*last_ran != vcpu->vcpu_id) {
> + if (*last_ran != vcpu->vcpu_idx) {
> kvm_call_hyp(__kvm_flush_cpu_context, mmu);
> - *last_ran = vcpu->vcpu_id;
> + *last_ran = vcpu->vcpu_idx;
> }
>
> vcpu->cpu = cpu;
Isn't the original code (using vcpu_id) enough to detect a different
previously run VCPU? What am I missing?
Thanks,
Zenghui
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v2 06/11] KVM: arm64: Use vcpu_idx for invalidation tracking
2023-09-21 9:16 ` Zenghui Yu
@ 2023-09-21 12:58 ` Marc Zyngier
0 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-21 12:58 UTC (permalink / raw)
To: Zenghui Yu
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On Thu, 21 Sep 2023 10:16:42 +0100,
Zenghui Yu <yuzenghui@huawei.com> wrote:
>
> On 2023/9/21 2:17, Marc Zyngier wrote:
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> > arch/arm64/kvm/arm.c | 4 ++--
> > 1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 872679a0cbd7..23c22dbd1969 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -438,9 +438,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> > * We might get preempted before the vCPU actually runs, but
> > * over-invalidation doesn't affect correctness.
> > */
> > - if (*last_ran != vcpu->vcpu_id) {
> > + if (*last_ran != vcpu->vcpu_idx) {
> > kvm_call_hyp(__kvm_flush_cpu_context, mmu);
> > - *last_ran = vcpu->vcpu_id;
> > + *last_ran = vcpu->vcpu_idx;
> > }
> > vcpu->cpu = cpu;
>
> Isn't the original code (using vcpu_id) enough to detect a different
> previously run VCPU? What am I missing?
It is in theory enough. However, I couldn't convince myself of the
*unicity* of the vcpu_id field. It really feels like something as
crucial as the TLB invalidation shouldn't rely on something that is
controlled by userspace.
And I really should write a commit message to capture this.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v2 07/11] KVM: arm64: Simplify kvm_vcpu_get_mpidr_aff()
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (5 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 06/11] KVM: arm64: Use vcpu_idx for invalidation tracking Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 08/11] KVM: arm64: Build MPIDR to vcpu index cache at runtime Marc Zyngier
` (5 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
By definition, MPIDR_EL1 cannot be modified by the guest. This
means it is pointless to check whether this is loaded on the CPU.
Simplify the kvm_vcpu_get_mpidr_aff() helper to directly access
the in-memory value.
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/include/asm/kvm_emulate.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3d6725ff0bf6..b66ef77cf49e 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -465,7 +465,7 @@ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
+ return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH v2 08/11] KVM: arm64: Build MPIDR to vcpu index cache at runtime
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (6 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 07/11] KVM: arm64: Simplify kvm_vcpu_get_mpidr_aff() Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 09/11] KVM: arm64: Fast-track kvm_mpidr_to_vcpu() when mpidr_data is available Marc Zyngier
` (4 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
The MPIDR_EL1 register contains a unique value that identifies
the CPU. The only problem with it is that it is stupidly large
(32 bits, once the useless stuff is removed).
Trying to obtain a vcpu from an MPIDR value is a fairly common,
yet costly operation: we iterate over all the vcpus until we
find the correct one. While this is cheap for small VMs, it is
pretty expensive on large ones, specially if you are trying to
get to the one that's at the end of the list...
In order to help with this, it is important to realise that
the MPIDR values are actually structured, and that implementations
tend to use a small number of significant bits in the 32bit space.
We can use this fact to our advantage by computing a small hash
table that uses the "compression" of the significant MPIDR bits
as an index, giving us the vcpu index as a result.
Given that the MPIDR values can be supplied by userspace, and
that an evil VMM could decide to make *all* bits significant,
resulting in a 4G-entry table, we only use this method if the
resulting table fits in a single page. Otherwise, we fallback
to the good old iterative method.
Nothing uses that table just yet, but keep your eyes peeled.
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/include/asm/kvm_host.h | 28 ++++++++++++++++
arch/arm64/kvm/arm.c | 54 +++++++++++++++++++++++++++++++
2 files changed, 82 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index af06ccb7ee34..4b5a57a60b6a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -202,6 +202,31 @@ struct kvm_protected_vm {
struct kvm_hyp_memcache teardown_mc;
};
+struct kvm_mpidr_data {
+ u64 mpidr_mask;
+ DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
+};
+
+static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
+{
+ unsigned long mask = data->mpidr_mask;
+ u64 aff = mpidr & MPIDR_HWID_BITMASK;
+ int nbits, bit, bit_idx = 0;
+ u16 index = 0;
+
+ /*
+ * If this looks like RISC-V's BEXT or x86's PEXT
+ * instructions, it isn't by accident.
+ */
+ nbits = fls(mask);
+ for_each_set_bit(bit, &mask, nbits) {
+ index |= (aff & BIT(bit)) >> (bit - bit_idx);
+ bit_idx++;
+ }
+
+ return index;
+}
+
struct kvm_arch {
struct kvm_s2_mmu mmu;
@@ -248,6 +273,9 @@ struct kvm_arch {
/* VM-wide vCPU feature set */
DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
+ /* MPIDR to vcpu index mapping, optional */
+ struct kvm_mpidr_data *mpidr_data;
+
/*
* VM-wide PMU filter, implemented as a bitmap and big enough for
* up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 23c22dbd1969..d9de4d3a339c 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -205,6 +205,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);
+ kfree(kvm->arch.mpidr_data);
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
@@ -578,6 +579,57 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
}
+static void kvm_init_mpidr_data(struct kvm *kvm)
+{
+ struct kvm_mpidr_data *data = NULL;
+ unsigned long c, mask, nr_entries;
+ u64 aff_set = 0, aff_clr = ~0UL;
+ struct kvm_vcpu *vcpu;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
+ goto out;
+
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
+ aff_set |= aff;
+ aff_clr &= aff;
+ }
+
+ /*
+ * A significant bit can be either 0 or 1, and will only appear in
+ * aff_set. Use aff_clr to weed out the useless stuff.
+ */
+ mask = aff_set ^ aff_clr;
+ nr_entries = BIT_ULL(hweight_long(mask));
+
+ /*
+ * Don't let userspace fool us. If we need more than a single page
+ * to describe the compressed MPIDR array, just fall back to the
+ * iterative method. Single vcpu VMs do not need this either.
+ */
+ if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
+ data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries),
+ GFP_KERNEL_ACCOUNT);
+
+ if (!data)
+ goto out;
+
+ data->mpidr_mask = mask;
+
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
+ u16 index = kvm_mpidr_index(data, aff);
+
+ data->cmpidr_to_idx[index] = c;
+ }
+
+ kvm->arch.mpidr_data = data;
+out:
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
/*
* Handle both the initialisation that is being done when the vcpu is
* run for the first time, as well as the updates that must be
@@ -601,6 +653,8 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (likely(vcpu_has_run_once(vcpu)))
return 0;
+ kvm_init_mpidr_data(kvm);
+
kvm_arm_vcpu_init_debug(vcpu);
if (likely(irqchip_in_kernel(kvm))) {
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH v2 09/11] KVM: arm64: Fast-track kvm_mpidr_to_vcpu() when mpidr_data is available
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (7 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 08/11] KVM: arm64: Build MPIDR to vcpu index cache at runtime Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-20 18:17 ` [PATCH v2 10/11] KVM: arm64: vgic-v3: Optimize affinity-based SGI injection Marc Zyngier
` (3 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
If our fancy little table is present when calling kvm_mpidr_to_vcpu(),
use it to recover the corresponding vcpu.
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/arm.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index d9de4d3a339c..28f3940fa724 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2395,6 +2395,18 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
unsigned long i;
mpidr &= MPIDR_HWID_BITMASK;
+
+ if (kvm->arch.mpidr_data) {
+ u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
+
+ vcpu = kvm_get_vcpu(kvm,
+ kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
+ if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
+ vcpu = NULL;
+
+ return vcpu;
+ }
+
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
return vcpu;
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH v2 10/11] KVM: arm64: vgic-v3: Optimize affinity-based SGI injection
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (8 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 09/11] KVM: arm64: Fast-track kvm_mpidr_to_vcpu() when mpidr_data is available Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-21 9:32 ` Zenghui Yu
2023-09-20 18:17 ` [PATCH v2 11/11] KVM: arm64: Clarify the ordering requirements for vcpu/RD creation Marc Zyngier
` (2 subsequent siblings)
12 siblings, 1 reply; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
Our affinity-based SGI injection code is a bit daft. We iterate
over all the CPUs trying to match the set of affinities that the
guest is trying to reach, leading to some very bad behaviours
if the selected targets are at a high vcpu index.
Instead, we can now use the fact that we have an optimised
MPIDR to vcpu mapping, and only look at the relevant values.
This results in a much faster injection for large VMs, and
in a near constant time, irrespective of the position in the
vcpu index space.
As a bonus, this is mostly deleting a lot of hard-to-read
code. Nobody will complain about that.
Suggested-by: Xu Zhao <zhaoxu.35@bytedance.com>
Tested-by: Joey Gouly <joey.gouly@arm.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/kvm/vgic/vgic-mmio-v3.c | 56 ++++--------------------------
1 file changed, 6 insertions(+), 50 deletions(-)
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 88b8d4524854..c7337a0fd242 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -1013,35 +1013,6 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
return 0;
}
-/*
- * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
- * generation register ICC_SGI1R_EL1) with a given VCPU.
- * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
- * return -1.
- */
-static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
-{
- unsigned long affinity;
- int level0;
-
- /*
- * Split the current VCPU's MPIDR into affinity level 0 and the
- * rest as this is what we have to compare against.
- */
- affinity = kvm_vcpu_get_mpidr_aff(vcpu);
- level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
- affinity &= ~MPIDR_LEVEL_MASK;
-
- /* bail out if the upper three levels don't match */
- if (sgi_aff != affinity)
- return -1;
-
- /* Is this VCPU's bit set in the mask ? */
- if (!(sgi_cpu_mask & BIT(level0)))
- return -1;
-
- return level0;
-}
/*
* The ICC_SGI* registers encode the affinity differently from the MPIDR,
@@ -1104,7 +1075,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
struct kvm_vcpu *c_vcpu;
unsigned long target_cpus;
u64 mpidr;
- u32 sgi;
+ u32 sgi, aff0;
unsigned long c;
sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
@@ -1122,31 +1093,16 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
return;
}
+ /* We iterate over affinities to find the corresponding vcpus */
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
- /*
- * We iterate over all VCPUs to find the MPIDRs matching the request.
- * If we have handled one CPU, we clear its bit to detect early
- * if we are already finished. This avoids iterating through all
- * VCPUs when most of the times we just signal a single VCPU.
- */
- kvm_for_each_vcpu(c, c_vcpu, kvm) {
- int level0;
-
- /* Exit early if we have dealt with all requested CPUs */
- if (target_cpus == 0)
- break;
- level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
- if (level0 == -1)
- continue;
-
- /* remove this matching VCPU from the mask */
- target_cpus &= ~BIT(level0);
-
- vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
+ for_each_set_bit(aff0, &target_cpus, hweight_long(ICC_SGI1R_TARGET_LIST_MASK)) {
+ c_vcpu = kvm_mpidr_to_vcpu(kvm, mpidr | aff0);
+ if (c_vcpu)
+ vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
}
}
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH v2 10/11] KVM: arm64: vgic-v3: Optimize affinity-based SGI injection
2023-09-20 18:17 ` [PATCH v2 10/11] KVM: arm64: vgic-v3: Optimize affinity-based SGI injection Marc Zyngier
@ 2023-09-21 9:32 ` Zenghui Yu
0 siblings, 0 replies; 24+ messages in thread
From: Zenghui Yu @ 2023-09-21 9:32 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
On 2023/9/21 2:17, Marc Zyngier wrote:
> Our affinity-based SGI injection code is a bit daft. We iterate
> over all the CPUs trying to match the set of affinities that the
> guest is trying to reach, leading to some very bad behaviours
> if the selected targets are at a high vcpu index.
>
> Instead, we can now use the fact that we have an optimised
> MPIDR to vcpu mapping, and only look at the relevant values.
>
> This results in a much faster injection for large VMs, and
> in a near constant time, irrespective of the position in the
> vcpu index space.
>
> As a bonus, this is mostly deleting a lot of hard-to-read
> code. Nobody will complain about that.
>
> Suggested-by: Xu Zhao <zhaoxu.35@bytedance.com>
> Tested-by: Joey Gouly <joey.gouly@arm.com>
> Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
Probably update the comment on top of vgic_v3_dispatch_sgi() to reflect
the new approach.
| * If the interrupt routing mode bit is not set, we iterate over all
VCPUs to
| * check for matching ones. If this bit is set, we signal all, but not the
| * calling VCPU.
Zenghui
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v2 11/11] KVM: arm64: Clarify the ordering requirements for vcpu/RD creation
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (9 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 10/11] KVM: arm64: vgic-v3: Optimize affinity-based SGI injection Marc Zyngier
@ 2023-09-20 18:17 ` Marc Zyngier
2023-09-21 2:11 ` [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Oliver Upton
2023-09-21 9:39 ` Zenghui Yu
12 siblings, 0 replies; 24+ messages in thread
From: Marc Zyngier @ 2023-09-20 18:17 UTC (permalink / raw)
To: kvmarm, linux-arm-kernel, kvm
Cc: James Morse, Suzuki K Poulose, Oliver Upton, Zenghui Yu,
Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao, Eric Auger
It goes without saying, but it is probably better to spell it out:
If userspace tries to restore and VM, but creates vcpus and/or RDs
in a different order, the vcpu/RD mapping will be different. Yes,
our API is an ugly piece of crap and I can't believe that we missed
this.
If we want to relax the above, we'll need to define a new userspace
API that allows the mapping to be specified, rather than relying
on the kernel to perform the mapping on its own.
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
Documentation/virt/kvm/devices/arm-vgic-v3.rst | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/Documentation/virt/kvm/devices/arm-vgic-v3.rst b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
index 51e5e5762571..5817edb4e046 100644
--- a/Documentation/virt/kvm/devices/arm-vgic-v3.rst
+++ b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
@@ -59,6 +59,13 @@ Groups:
It is invalid to mix calls with KVM_VGIC_V3_ADDR_TYPE_REDIST and
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION attributes.
+ Note that to obtain reproducible results (the same VCPU being associated
+ with the same redistributor across a save/restore operation), VCPU creation
+ order, redistributor region creation order as well as the respective
+ interleaves of VCPU and region creation MUST be preserved. Any change in
+ either ordering may result in a different vcpu_id/redistributor association,
+ resulting in a VM that will fail to run at restore time.
+
Errors:
======= =============================================================
--
2.34.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes)
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (10 preceding siblings ...)
2023-09-20 18:17 ` [PATCH v2 11/11] KVM: arm64: Clarify the ordering requirements for vcpu/RD creation Marc Zyngier
@ 2023-09-21 2:11 ` Oliver Upton
2023-09-21 9:39 ` Zenghui Yu
12 siblings, 0 replies; 24+ messages in thread
From: Oliver Upton @ 2023-09-21 2:11 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Zenghui Yu, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
Heh, "and other fixes" is quite the understatement :)
This looks good to me, only issues I've spotted so far are a few typos
that are easty enough to fix when I apply the series. I'll give it a
couple days for folks to have a look, but otherwise intend on picking
this up for 6.7.
On Wed, Sep 20, 2023 at 07:17:20PM +0100, Marc Zyngier wrote:
> This is a follow-up on [1], which contains the original patches, only
> augmented with a bunch of fixes after Zenghui pointed out that I was
> inadvertently fixing bugs (yay!), but that there were plenty more.
>
> The core of the issue is that we tend to confuse vcpu_id with
> vcpu_idx. The first is a userspace-provided value, from which we
> derive the default MPIDR_EL1 value, while the second is something that
> used to represent the position in the vcpu array, and is now more of
> an internal identifier.
--
Thanks,
Oliver
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes)
2023-09-20 18:17 [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Marc Zyngier
` (11 preceding siblings ...)
2023-09-21 2:11 ` [PATCH v2 00/11] KVM: arm64: Accelerate lookup of vcpus by MPIDR values (and other fixes) Oliver Upton
@ 2023-09-21 9:39 ` Zenghui Yu
12 siblings, 0 replies; 24+ messages in thread
From: Zenghui Yu @ 2023-09-21 9:39 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, linux-arm-kernel, kvm, James Morse, Suzuki K Poulose,
Oliver Upton, Joey Gouly, Shameerali Kolothum Thodi, Xu Zhao,
Eric Auger
Hi Marc,
On 2023/9/21 2:17, Marc Zyngier wrote:
> So this series, on top of trying to optimise SGI injection, also aims
> at disambiguating this mess, and documenting some of the implicit
> requirements for userspace.
Apart from the minor comments, this series looks good.
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 24+ messages in thread