* [PATCH v6 1/4] arm64: Modify _midr_range() functions to read MIDR/REVIDR internally
2025-02-05 13:22 [PATCH v6 0/4] KVM: arm64: Errata management for VM Live migration Shameer Kolothum
@ 2025-02-05 13:22 ` Shameer Kolothum
2025-02-05 13:22 ` [PATCH v6 2/4] KVM: arm64: Introduce hypercall support for retrieving target implementations Shameer Kolothum
` (2 subsequent siblings)
3 siblings, 0 replies; 13+ messages in thread
From: Shameer Kolothum @ 2025-02-05 13:22 UTC (permalink / raw)
To: kvmarm, maz, oliver.upton
Cc: catalin.marinas, will, mark.rutland, cohuck, eric.auger, sebott,
yuzenghui, wangzhou1, jiangkunkun, jonathan.cameron,
anthony.jebson, linux-arm-kernel, linuxarm
These changes lay the groundwork for adding support for guest kernels,
allowing them to leverage target CPU implementations provided by the
VMM.
No functional changes intended.
Suggested-by: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Sebastian Ott <sebott@redhat.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
arch/arm64/include/asm/cputype.h | 28 ++++++++++++++--------------
arch/arm64/include/asm/mmu.h | 3 +--
arch/arm64/kernel/cpu_errata.c | 23 +++++++++++++----------
arch/arm64/kernel/cpufeature.c | 6 +++---
arch/arm64/kernel/proton-pack.c | 17 ++++++++---------
arch/arm64/kvm/vgic/vgic-v3.c | 2 +-
drivers/clocksource/arm_arch_timer.c | 2 +-
7 files changed, 41 insertions(+), 40 deletions(-)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 6f3f4142e214..2a76f0e30006 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -231,6 +231,16 @@
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
+/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant. Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline u32 __attribute_const__ read_cpuid_id(void)
+{
+ return read_cpuid(MIDR_EL1);
+}
+
/*
* Represent a range of MIDR values for a given CPU model and a
* range of variant/revision values.
@@ -266,31 +276,21 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
return _model == model && rv >= rv_min && rv <= rv_max;
}
-static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
+static inline bool is_midr_in_range(struct midr_range const *range)
{
- return midr_is_cpu_model_range(midr, range->model,
+ return midr_is_cpu_model_range(read_cpuid_id(), range->model,
range->rv_min, range->rv_max);
}
static inline bool
-is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
+is_midr_in_range_list(struct midr_range const *ranges)
{
while (ranges->model)
- if (is_midr_in_range(midr, ranges++))
+ if (is_midr_in_range(ranges++))
return true;
return false;
}
-/*
- * The CPU ID never changes at run time, so we might as well tell the
- * compiler that it's constant. Use this function to read the CPU ID
- * rather than directly reading processor_id or read_cpuid() directly.
- */
-static inline u32 __attribute_const__ read_cpuid_id(void)
-{
- return read_cpuid(MIDR_EL1);
-}
-
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
return read_cpuid(MPIDR_EL1);
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 662471cfc536..30a29e88994b 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -101,8 +101,7 @@ static inline bool kaslr_requires_kpti(void)
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
extern const struct midr_range cavium_erratum_27456_cpus[];
- if (is_midr_in_range_list(read_cpuid_id(),
- cavium_erratum_27456_cpus))
+ if (is_midr_in_range_list(cavium_erratum_27456_cpus))
return false;
}
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 7ce555862895..99b55893fc4e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -15,30 +15,34 @@
#include <asm/smp_plat.h>
static bool __maybe_unused
-is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+__is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
+ u32 midr, u32 revidr)
{
const struct arm64_midr_revidr *fix;
- u32 midr = read_cpuid_id(), revidr;
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- if (!is_midr_in_range(midr, &entry->midr_range))
+ if (!is_midr_in_range(&entry->midr_range))
return false;
midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
- revidr = read_cpuid(REVIDR_EL1);
for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
return false;
-
return true;
}
+static bool __maybe_unused
+is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return __is_affected_midr_range(entry, read_cpuid_id(),
+ read_cpuid(REVIDR_EL1));
+}
+
static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
int scope)
{
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+ return is_midr_in_range_list(entry->midr_range_list);
}
static bool __maybe_unused
@@ -186,12 +190,11 @@ static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u32 midr = read_cpuid_id();
bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range(midr, &range) && has_dic;
+ return is_midr_in_range(&range) && has_dic;
}
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4eb7c6698ae4..72e876f37cd4 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1792,7 +1792,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
char const *str = "kpti command line option";
bool meltdown_safe;
- meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+ meltdown_safe = is_midr_in_range_list(kpti_safe_list);
/* Defer to CPU feature registers */
if (has_cpuid_feature(entry, scope))
@@ -1862,7 +1862,7 @@ static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope)
return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) &&
!(has_cpuid_feature(entry, scope) ||
- is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)));
+ is_midr_in_range_list(nv1_ni_list)));
}
#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
@@ -2045,7 +2045,7 @@ static bool cpu_has_broken_dbm(void)
{},
};
- return is_midr_in_range_list(read_cpuid_id(), cpus);
+ return is_midr_in_range_list(cpus);
}
static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index da53722f95d4..a573fa40d4b6 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -172,7 +172,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */
- if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ if (is_midr_in_range_list(spectre_v2_safe_list))
return SPECTRE_UNAFFECTED;
return SPECTRE_VULNERABLE;
@@ -331,7 +331,7 @@ bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
};
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
+ return is_midr_in_range_list(spectre_v3a_unsafe_list);
}
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
@@ -475,7 +475,7 @@ static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
{ /* sentinel */ },
};
- if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
+ if (is_midr_in_range_list(spectre_v4_safe_list))
return SPECTRE_UNAFFECTED;
/* CPU features are detected first */
@@ -878,13 +878,13 @@ u8 spectre_bhb_loop_affected(int scope)
{},
};
- if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
+ if (is_midr_in_range_list(spectre_bhb_k32_list))
k = 32;
- else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+ else if (is_midr_in_range_list(spectre_bhb_k24_list))
k = 24;
- else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
+ else if (is_midr_in_range_list(spectre_bhb_k11_list))
k = 11;
- else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+ else if (is_midr_in_range_list(spectre_bhb_k8_list))
k = 8;
max_bhb_k = max(max_bhb_k, k);
@@ -926,8 +926,7 @@ static bool is_spectre_bhb_fw_affected(int scope)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
{},
};
- bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
- spectre_bhb_firmware_mitigated_list);
+ bool cpu_in_list = is_midr_in_range_list(spectre_bhb_firmware_mitigated_list);
if (scope != SCOPE_LOCAL_CPU)
return system_affected;
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index d7233ab982d0..87a8a96c7a41 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -633,7 +633,7 @@ static const struct midr_range broken_seis[] = {
static bool vgic_v3_broken_seis(void)
{
return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
- is_midr_in_range_list(read_cpuid_id(), broken_seis));
+ is_midr_in_range_list(broken_seis));
}
/**
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 808f259781fd..981a578043a5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -842,7 +842,7 @@ static u64 __arch_timer_check_delta(void)
{},
};
- if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ if (is_midr_in_range_list(broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
return CLOCKSOURCE_MASK(31);
}
--
2.47.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH v6 2/4] KVM: arm64: Introduce hypercall support for retrieving target implementations
2025-02-05 13:22 [PATCH v6 0/4] KVM: arm64: Errata management for VM Live migration Shameer Kolothum
2025-02-05 13:22 ` [PATCH v6 1/4] arm64: Modify _midr_range() functions to read MIDR/REVIDR internally Shameer Kolothum
@ 2025-02-05 13:22 ` Shameer Kolothum
2025-02-05 13:22 ` [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls Shameer Kolothum
2025-02-05 13:22 ` [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs Shameer Kolothum
3 siblings, 0 replies; 13+ messages in thread
From: Shameer Kolothum @ 2025-02-05 13:22 UTC (permalink / raw)
To: kvmarm, maz, oliver.upton
Cc: catalin.marinas, will, mark.rutland, cohuck, eric.auger, sebott,
yuzenghui, wangzhou1, jiangkunkun, jonathan.cameron,
anthony.jebson, linux-arm-kernel, linuxarm
If the Guest requires migration to multiple targets, these hypercalls
will provide a way to retrieve the target CPU implementations from
the user space VMM.
Subsequent patch will use this to enable the associated errata.
Suggested-by: Oliver Upton <oliver.upton@linux.dev>
Suggested-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
Documentation/virt/kvm/arm/hypercalls.rst | 59 +++++++++++++++++++++++
include/linux/arm-smccc.h | 15 ++++++
2 files changed, 74 insertions(+)
diff --git a/Documentation/virt/kvm/arm/hypercalls.rst b/Documentation/virt/kvm/arm/hypercalls.rst
index af7bc2c2e0cb..7400a89aabf8 100644
--- a/Documentation/virt/kvm/arm/hypercalls.rst
+++ b/Documentation/virt/kvm/arm/hypercalls.rst
@@ -142,3 +142,62 @@ region is equal to the memory protection granule advertised by
| | | +---------------------------------------------+
| | | | ``INVALID_PARAMETER (-3)`` |
+---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID``
+-------------------------------------------------------
+Request the target CPU implementation version information and the number of target
+implementations for the Guest VM.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; KVM/ARM64 Guests only |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000040 |
++---------------------+----------+--------------------------------------------------+
+| Arguments: | None |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
+| | | +---------------------------------------------+
+| | | | ``NOT_SUPPORTED (-1)`` |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R1 | Bits [63:32] Reserved/Must be zero |
+| | | +---------------------------------------------+
+| | | | Bits [31:16] Major version |
+| | | +---------------------------------------------+
+| | | | Bits [15:0] Minor version |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Number of target implementations |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID``
+-------------------------------------------------------
+
+Request the target CPU implementation information for the Guest VM. The Guest kernel
+will use this information to enable the associated errata.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; KVM/ARM64 Guests only |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000041 |
++---------------------+----------+----+---------------------------------------------+
+| Arguments: | (uint64) | R1 | selected implementation index |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Reserved / Must be zero |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
+| | | +---------------------------------------------+
+| | | | ``INVALID_PARAMETER (-3)`` |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R1 | MIDR_EL1 of the selected implementation |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | REVIDR_EL1 of the selected implementation |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | AIDR_EL1 of the selected implementation |
++---------------------+----------+----+---------------------------------------------+
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 67f6fdf2e7cd..f19be5754090 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -179,6 +179,9 @@
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62
#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63
/* End of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER 64
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS 65
+
#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
#define ARM_SMCCC_KVM_NUM_FUNCS 128
@@ -225,6 +228,18 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)
+
/* ptp_kvm counter type ID */
#define KVM_PTP_VIRT_COUNTER 0
#define KVM_PTP_PHYS_COUNTER 1
--
2.47.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls
2025-02-05 13:22 [PATCH v6 0/4] KVM: arm64: Errata management for VM Live migration Shameer Kolothum
2025-02-05 13:22 ` [PATCH v6 1/4] arm64: Modify _midr_range() functions to read MIDR/REVIDR internally Shameer Kolothum
2025-02-05 13:22 ` [PATCH v6 2/4] KVM: arm64: Introduce hypercall support for retrieving target implementations Shameer Kolothum
@ 2025-02-05 13:22 ` Shameer Kolothum
2025-02-07 18:21 ` Oliver Upton
2025-02-05 13:22 ` [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs Shameer Kolothum
3 siblings, 1 reply; 13+ messages in thread
From: Shameer Kolothum @ 2025-02-05 13:22 UTC (permalink / raw)
To: kvmarm, maz, oliver.upton
Cc: catalin.marinas, will, mark.rutland, cohuck, eric.auger, sebott,
yuzenghui, wangzhou1, jiangkunkun, jonathan.cameron,
anthony.jebson, linux-arm-kernel, linuxarm
Currently ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID returns the
bitmap corresponding to KVM_REG_ARM_VENDOR_HYP_BMAP and it only
returns _KVM_FEATURES_FUNC_ID and _KVM_PTP_FUNC_ID. Change that
to return all the KVM/arm64-specific hypercalls exposed by
KVM/arm64 to guest operating systems.
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
arch/arm64/kvm/hypercalls.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 27ce4cb44904..5cef2590ffdf 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -359,7 +359,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
break;
case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
- val[0] = smccc_feat->vendor_hyp_bmap;
+ val[0] = GENMASK(ARM_SMCCC_KVM_FUNC_PTP,
+ ARM_SMCCC_KVM_FUNC_FEATURES);
+ /* Function numbers 2-63 are reserved for pKVM for now */
+ val[2] = GENMASK((ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS - 64),
+ (ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER - 64));
break;
case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
kvm_ptp_get_time(vcpu, val);
--
2.47.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls
2025-02-05 13:22 ` [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls Shameer Kolothum
@ 2025-02-07 18:21 ` Oliver Upton
2025-02-07 18:24 ` Oliver Upton
0 siblings, 1 reply; 13+ messages in thread
From: Oliver Upton @ 2025-02-07 18:21 UTC (permalink / raw)
To: Shameer Kolothum
Cc: kvmarm, maz, catalin.marinas, will, mark.rutland, cohuck,
eric.auger, sebott, yuzenghui, wangzhou1, jiangkunkun,
jonathan.cameron, anthony.jebson, linux-arm-kernel, linuxarm
On Wed, Feb 05, 2025 at 01:22:21PM +0000, Shameer Kolothum wrote:
> Currently ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID returns the
> bitmap corresponding to KVM_REG_ARM_VENDOR_HYP_BMAP and it only
> returns _KVM_FEATURES_FUNC_ID and _KVM_PTP_FUNC_ID. Change that
> to return all the KVM/arm64-specific hypercalls exposed by
> KVM/arm64 to guest operating systems.
>
> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> ---
> arch/arm64/kvm/hypercalls.c | 6 +++++-
> 1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
> index 27ce4cb44904..5cef2590ffdf 100644
> --- a/arch/arm64/kvm/hypercalls.c
> +++ b/arch/arm64/kvm/hypercalls.c
> @@ -359,7 +359,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
> val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
> break;
> case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
> - val[0] = smccc_feat->vendor_hyp_bmap;
> + val[0] = GENMASK(ARM_SMCCC_KVM_FUNC_PTP,
> + ARM_SMCCC_KVM_FUNC_FEATURES);
> + /* Function numbers 2-63 are reserved for pKVM for now */
> + val[2] = GENMASK((ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS - 64),
> + (ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER - 64));
> break;
This isn't right, vendor_hyp_bmap is very much load bearing. We have a
documented UAPI that allows userspace to control the hypercalls exposed
to the guest.
The idea being a user wants kernel rollback safety and doesn't expose
hypercalls that could potentially be revoked.
https://docs.kernel.org/virt/kvm/arm/fw-pseudo-registers.html#bitmap-feature-firmware-registers
--
Thanks,
Oliver
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls
2025-02-07 18:21 ` Oliver Upton
@ 2025-02-07 18:24 ` Oliver Upton
2025-02-10 10:36 ` Shameerali Kolothum Thodi
0 siblings, 1 reply; 13+ messages in thread
From: Oliver Upton @ 2025-02-07 18:24 UTC (permalink / raw)
To: Shameer Kolothum
Cc: kvmarm, maz, catalin.marinas, will, mark.rutland, cohuck,
eric.auger, sebott, yuzenghui, wangzhou1, jiangkunkun,
jonathan.cameron, anthony.jebson, linux-arm-kernel, linuxarm
On Fri, Feb 07, 2025 at 10:21:13AM -0800, Oliver Upton wrote:
> On Wed, Feb 05, 2025 at 01:22:21PM +0000, Shameer Kolothum wrote:
> > Currently ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID returns the
> > bitmap corresponding to KVM_REG_ARM_VENDOR_HYP_BMAP and it only
> > returns _KVM_FEATURES_FUNC_ID and _KVM_PTP_FUNC_ID. Change that
> > to return all the KVM/arm64-specific hypercalls exposed by
> > KVM/arm64 to guest operating systems.
> >
> > Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> > ---
> > arch/arm64/kvm/hypercalls.c | 6 +++++-
> > 1 file changed, 5 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
> > index 27ce4cb44904..5cef2590ffdf 100644
> > --- a/arch/arm64/kvm/hypercalls.c
> > +++ b/arch/arm64/kvm/hypercalls.c
> > @@ -359,7 +359,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
> > val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
> > break;
> > case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
> > - val[0] = smccc_feat->vendor_hyp_bmap;
> > + val[0] = GENMASK(ARM_SMCCC_KVM_FUNC_PTP,
> > + ARM_SMCCC_KVM_FUNC_FEATURES);
> > + /* Function numbers 2-63 are reserved for pKVM for now */
> > + val[2] = GENMASK((ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS - 64),
> > + (ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER - 64));
> > break;
>
> This isn't right, vendor_hyp_bmap is very much load bearing. We have a
> documented UAPI that allows userspace to control the hypercalls exposed
> to the guest.
>
> The idea being a user wants kernel rollback safety and doesn't expose
> hypercalls that could potentially be revoked.
>
> https://docs.kernel.org/virt/kvm/arm/fw-pseudo-registers.html#bitmap-feature-firmware-registers
To add:
KVM cannot advertise the DISCOVER_IMPL* stuff unconditionally, since the
expectation is that userspace implements these hypercalls. These bits
may need to be writable from userspace but have a reset value of 0.
--
Thanks,
Oliver
^ permalink raw reply [flat|nested] 13+ messages in thread
* RE: [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls
2025-02-07 18:24 ` Oliver Upton
@ 2025-02-10 10:36 ` Shameerali Kolothum Thodi
2025-02-10 18:57 ` Oliver Upton
0 siblings, 1 reply; 13+ messages in thread
From: Shameerali Kolothum Thodi @ 2025-02-10 10:36 UTC (permalink / raw)
To: Oliver Upton
Cc: kvmarm@lists.linux.dev, maz@kernel.org, catalin.marinas@arm.com,
will@kernel.org, mark.rutland@arm.com, cohuck@redhat.com,
eric.auger@redhat.com, sebott@redhat.com, yuzenghui, Wangzhou (B),
jiangkunkun, Jonathan Cameron, Anthony Jebson,
linux-arm-kernel@lists.infradead.org, Linuxarm
> -----Original Message-----
> From: Oliver Upton <oliver.upton@linux.dev>
> Sent: Friday, February 7, 2025 6:24 PM
> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
> Cc: kvmarm@lists.linux.dev; maz@kernel.org; catalin.marinas@arm.com;
> will@kernel.org; mark.rutland@arm.com; cohuck@redhat.com;
> eric.auger@redhat.com; sebott@redhat.com; yuzenghui
> <yuzenghui@huawei.com>; Wangzhou (B) <wangzhou1@hisilicon.com>;
> jiangkunkun <jiangkunkun@huawei.com>; Jonathan Cameron
> <jonathan.cameron@huawei.com>; Anthony Jebson
> <anthony.jebson@huawei.com>; linux-arm-kernel@lists.infradead.org;
> Linuxarm <linuxarm@huawei.com>
> Subject: Re: [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific
> hypercalls
>
> On Fri, Feb 07, 2025 at 10:21:13AM -0800, Oliver Upton wrote:
> > On Wed, Feb 05, 2025 at 01:22:21PM +0000, Shameer Kolothum wrote:
> > > Currently ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID returns
> the
> > > bitmap corresponding to KVM_REG_ARM_VENDOR_HYP_BMAP and it
> only
> > > returns _KVM_FEATURES_FUNC_ID and _KVM_PTP_FUNC_ID. Change
> that
> > > to return all the KVM/arm64-specific hypercalls exposed by
> > > KVM/arm64 to guest operating systems.
> > >
> > > Signed-off-by: Shameer Kolothum
> <shameerali.kolothum.thodi@huawei.com>
> > > ---
> > > arch/arm64/kvm/hypercalls.c | 6 +++++-
> > > 1 file changed, 5 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
> > > index 27ce4cb44904..5cef2590ffdf 100644
> > > --- a/arch/arm64/kvm/hypercalls.c
> > > +++ b/arch/arm64/kvm/hypercalls.c
> > > @@ -359,7 +359,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu
> *vcpu)
> > > val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
> > > break;
> > > case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
> > > - val[0] = smccc_feat->vendor_hyp_bmap;
> > > + val[0] = GENMASK(ARM_SMCCC_KVM_FUNC_PTP,
> > > + ARM_SMCCC_KVM_FUNC_FEATURES);
> > > + /* Function numbers 2-63 are reserved for pKVM for now */
> > > + val[2] =
> GENMASK((ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS - 64),
> > > +
> (ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER - 64));
> > > break;
> >
> > This isn't right, vendor_hyp_bmap is very much load bearing. We have a
> > documented UAPI that allows userspace to control the hypercalls exposed
> > to the guest.
> >
> > The idea being a user wants kernel rollback safety and doesn't expose
> > hypercalls that could potentially be revoked.
> >
> > https://docs.kernel.org/virt/kvm/arm/fw-pseudo-registers.html#bitmap-
> feature-firmware-registers
>
> To add:
>
> KVM cannot advertise the DISCOVER_IMPL* stuff unconditionally, since the
> expectation is that userspace implements these hypercalls. These bits
> may need to be writable from userspace but have a reset value of 0.
>
Ok. So IIUC, vendor_hyp_bmap actually holds the information which Vendor Hyp
services are available to the user space and can be get/set using GET/SET _ONE_REG
interfaces.
Currently this bitmap is a 64 bit one and if we have to have a one to one mapping
between these bitmap and the hypercall function numbers, then that requires
some changes.
Because function numbers 2-63 are now reserved for pKVM and the new ones
introduced in this series take 64 & 65.
May be we can have KVM_REG_ARM_VENDOR_HYP_BMAP_2 which represents
64-127?
Or can we take the next available bits(2 & 3) for KVM_REG_ARM_VENDOR_HYP_BMAP
and then map it to the function number appropriately (64 & 65) when
ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID is handled?
Thoughts?
Thanks,
Shameer
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls
2025-02-10 10:36 ` Shameerali Kolothum Thodi
@ 2025-02-10 18:57 ` Oliver Upton
0 siblings, 0 replies; 13+ messages in thread
From: Oliver Upton @ 2025-02-10 18:57 UTC (permalink / raw)
To: Shameerali Kolothum Thodi
Cc: kvmarm@lists.linux.dev, maz@kernel.org, catalin.marinas@arm.com,
will@kernel.org, mark.rutland@arm.com, cohuck@redhat.com,
eric.auger@redhat.com, sebott@redhat.com, yuzenghui, Wangzhou (B),
jiangkunkun, Jonathan Cameron, Anthony Jebson,
linux-arm-kernel@lists.infradead.org, Linuxarm
> > > This isn't right, vendor_hyp_bmap is very much load bearing. We have a
> > > documented UAPI that allows userspace to control the hypercalls exposed
> > > to the guest.
> > >
> > > The idea being a user wants kernel rollback safety and doesn't expose
> > > hypercalls that could potentially be revoked.
> > >
> > > https://docs.kernel.org/virt/kvm/arm/fw-pseudo-registers.html#bitmap-
> > feature-firmware-registers
> >
> > To add:
> >
> > KVM cannot advertise the DISCOVER_IMPL* stuff unconditionally, since the
> > expectation is that userspace implements these hypercalls. These bits
> > may need to be writable from userspace but have a reset value of 0.
> >
>
> Ok. So IIUC, vendor_hyp_bmap actually holds the information which Vendor Hyp
> services are available to the user space and can be get/set using GET/SET _ONE_REG
> interfaces.
Right.
> Currently this bitmap is a 64 bit one and if we have to have a one to one mapping
> between these bitmap and the hypercall function numbers, then that requires
> some changes.
>
> Because function numbers 2-63 are now reserved for pKVM and the new ones
> introduced in this series take 64 & 65.
>
> May be we can have KVM_REG_ARM_VENDOR_HYP_BMAP_2 which represents
> 64-127?
>
> Or can we take the next available bits(2 & 3) for KVM_REG_ARM_VENDOR_HYP_BMAP
> and then map it to the function number appropriately (64 & 65) when
> ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID is handled?
>
> Thoughts?
Adding a second register seems reasonable so we can preserve the mapping
of bit position / hypercall #.
--
Thanks,
Oliver
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs
2025-02-05 13:22 [PATCH v6 0/4] KVM: arm64: Errata management for VM Live migration Shameer Kolothum
` (2 preceding siblings ...)
2025-02-05 13:22 ` [PATCH v6 3/4] KVM: arm64: Report all the KVM/arm64-specific hypercalls Shameer Kolothum
@ 2025-02-05 13:22 ` Shameer Kolothum
2025-02-07 14:08 ` Catalin Marinas
3 siblings, 1 reply; 13+ messages in thread
From: Shameer Kolothum @ 2025-02-05 13:22 UTC (permalink / raw)
To: kvmarm, maz, oliver.upton
Cc: catalin.marinas, will, mark.rutland, cohuck, eric.auger, sebott,
yuzenghui, wangzhou1, jiangkunkun, jonathan.cameron,
anthony.jebson, linux-arm-kernel, linuxarm
Retrieve any migration target implementation CPUs using the hypercall
and enable associated errata.
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
arch/arm64/include/asm/cputype.h | 24 ++++++++++-
arch/arm64/include/asm/hypervisor.h | 1 +
arch/arm64/kernel/cpu_errata.c | 20 +++++++--
arch/arm64/kernel/cpufeature.c | 2 +
arch/arm64/kernel/image-vars.h | 2 +
drivers/firmware/smccc/kvm_guest.c | 63 +++++++++++++++++++++++++++++
6 files changed, 107 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 2a76f0e30006..975fbdfc9354 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -267,6 +267,15 @@ struct midr_range {
#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+struct target_impl_cpu {
+ u64 midr;
+ u64 revidr;
+ u64 aidr;
+};
+
+extern u32 target_impl_cpu_num;
+extern struct target_impl_cpu *target_impl_cpus;
+
static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
u32 rv_max)
{
@@ -278,8 +287,19 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
static inline bool is_midr_in_range(struct midr_range const *range)
{
- return midr_is_cpu_model_range(read_cpuid_id(), range->model,
- range->rv_min, range->rv_max);
+ int i;
+
+ if (!target_impl_cpu_num)
+ return midr_is_cpu_model_range(read_cpuid_id(), range->model,
+ range->rv_min, range->rv_max);
+
+ for (i = 0; i < target_impl_cpu_num; i++) {
+ if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
+ range->model,
+ range->rv_min, range->rv_max))
+ return true;
+ }
+ return false;
}
static inline bool
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h
index 409e239834d1..a12fd897c877 100644
--- a/arch/arm64/include/asm/hypervisor.h
+++ b/arch/arm64/include/asm/hypervisor.h
@@ -6,6 +6,7 @@
void kvm_init_hyp_services(void);
bool kvm_arm_hyp_service_available(u32 func_id);
+void kvm_arm_target_impl_cpu_init(void);
#ifdef CONFIG_ARM_PKVM_GUEST
void pkvm_init_hyp_services(void);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 99b55893fc4e..1177a3094cf2 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -14,6 +14,9 @@
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
+u32 target_impl_cpu_num;
+struct target_impl_cpu *target_impl_cpus;
+
static bool __maybe_unused
__is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
u32 midr, u32 revidr)
@@ -32,9 +35,20 @@ __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
{
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return __is_affected_midr_range(entry, read_cpuid_id(),
- read_cpuid(REVIDR_EL1));
+ int i;
+
+ if (!target_impl_cpu_num) {
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return __is_affected_midr_range(entry, read_cpuid_id(),
+ read_cpuid(REVIDR_EL1));
+ }
+
+ for (i = 0; i < target_impl_cpu_num; i++) {
+ if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
+ target_impl_cpus[i].midr))
+ return true;
+ }
+ return false;
}
static bool __maybe_unused
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 72e876f37cd4..5c61d9d9f097 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -86,6 +86,7 @@
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
+#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
@@ -3679,6 +3680,7 @@ unsigned long cpu_get_elf_hwcap3(void)
static void __init setup_boot_cpu_capabilities(void)
{
+ kvm_arm_target_impl_cpu_init();
/*
* The boot CPU's feature register values have been recorded. Detect
* boot cpucaps and local cpucaps for the boot CPU, then enable and
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index ef3a69cc398e..a6926750faaf 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -49,6 +49,8 @@ PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
#ifdef CONFIG_CAVIUM_ERRATUM_27456
PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
+PROVIDE(__pi_target_impl_cpu_num = target_impl_cpu_num);
+PROVIDE(__pi_target_impl_cpus = target_impl_cpus);
#endif
PROVIDE(__pi__ctype = _ctype);
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index f3319be20b36..f0b0154150b5 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -6,8 +6,11 @@
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/string.h>
+#include <uapi/linux/psci.h>
+
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
@@ -51,3 +54,63 @@ bool kvm_arm_hyp_service_available(u32 func_id)
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+
+void __init kvm_arm_target_impl_cpu_init(void)
+{
+ int i;
+ u32 ver;
+ u64 max_cpus;
+ struct arm_smccc_res res;
+
+ /* Check we have already set targets */
+ if (target_impl_cpu_num)
+ return;
+
+ if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
+ !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+ 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
+ ver = lower_32_bits(res.a1);
+ if (PSCI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Unsupported target CPU implementation version v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+ return;
+ }
+
+ if (!res.a2) {
+ pr_warn("No target implementation CPUs specified\n");
+ return;
+ }
+
+ max_cpus = res.a2;
+ target_impl_cpus = memblock_alloc(sizeof(*target_impl_cpus) * max_cpus,
+ __alignof__(*target_impl_cpus));
+ if (!target_impl_cpus) {
+ pr_warn("Not enough memory for struct target_impl_cpu\n");
+ return;
+ }
+
+ for (i = 0; i < max_cpus; i++) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+ i, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ memblock_free(target_impl_cpus,
+ sizeof(*target_impl_cpus) * max_cpus);
+ target_impl_cpus = NULL;
+ pr_warn("Discovering target implementation CPUs failed\n");
+ return;
+ }
+ target_impl_cpus[i].midr = res.a1;
+ target_impl_cpus[i].revidr = res.a2;
+ target_impl_cpus[i].aidr = res.a3;
+ };
+
+ target_impl_cpu_num = max_cpus;
+ pr_info("Number of target implementation CPUs is %d\n", target_impl_cpu_num);
+}
--
2.47.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs
2025-02-05 13:22 ` [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs Shameer Kolothum
@ 2025-02-07 14:08 ` Catalin Marinas
2025-02-07 14:31 ` Marc Zyngier
0 siblings, 1 reply; 13+ messages in thread
From: Catalin Marinas @ 2025-02-07 14:08 UTC (permalink / raw)
To: Shameer Kolothum
Cc: kvmarm, maz, oliver.upton, will, mark.rutland, cohuck, eric.auger,
sebott, yuzenghui, wangzhou1, jiangkunkun, jonathan.cameron,
anthony.jebson, linux-arm-kernel, linuxarm
On Wed, Feb 05, 2025 at 01:22:22PM +0000, Shameer Kolothum wrote:
> static inline bool is_midr_in_range(struct midr_range const *range)
> {
> - return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> - range->rv_min, range->rv_max);
> + int i;
> +
> + if (!target_impl_cpu_num)
> + return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> + range->rv_min, range->rv_max);
> +
> + for (i = 0; i < target_impl_cpu_num; i++) {
> + if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
> + range->model,
> + range->rv_min, range->rv_max))
> + return true;
> + }
> + return false;
> }
It's a interesting approach but how does this work in practice if an
erratum requires a firmware counterpart? Do we expect firmwares on all
machines involved to have workarounds for the other machines? Or is KVM
going to intercept those SMCs and pretend the EL3 counterpart is there?
--
Catalin
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs
2025-02-07 14:08 ` Catalin Marinas
@ 2025-02-07 14:31 ` Marc Zyngier
2025-02-07 18:10 ` Catalin Marinas
0 siblings, 1 reply; 13+ messages in thread
From: Marc Zyngier @ 2025-02-07 14:31 UTC (permalink / raw)
To: Catalin Marinas
Cc: Shameer Kolothum, kvmarm, oliver.upton, will, mark.rutland,
cohuck, eric.auger, sebott, yuzenghui, wangzhou1, jiangkunkun,
jonathan.cameron, anthony.jebson, linux-arm-kernel, linuxarm
On Fri, 07 Feb 2025 14:08:44 +0000,
Catalin Marinas <catalin.marinas@arm.com> wrote:
>
> On Wed, Feb 05, 2025 at 01:22:22PM +0000, Shameer Kolothum wrote:
> > static inline bool is_midr_in_range(struct midr_range const *range)
> > {
> > - return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> > - range->rv_min, range->rv_max);
> > + int i;
> > +
> > + if (!target_impl_cpu_num)
> > + return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> > + range->rv_min, range->rv_max);
> > +
> > + for (i = 0; i < target_impl_cpu_num; i++) {
> > + if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
> > + range->model,
> > + range->rv_min, range->rv_max))
> > + return true;
> > + }
> > + return false;
> > }
>
> It's a interesting approach but how does this work in practice if an
> erratum requires a firmware counterpart? Do we expect firmwares on all
> machines involved to have workarounds for the other machines? Or is KVM
> going to intercept those SMCs and pretend the EL3 counterpart is there?
KVM already traps SMCs, and could do something on behalf of the guest
(such as pretending that the mitigation has happened if not on the
correct host) *IF* the mitigation is architected (à la WA{1,2,3}).
If it is implementation specific, then we can immediately stop
pretending that a guest running on those systems can be migrated.
The only thing it helps a bit is big-little.
Overall, this isn't meant to be foolproof at all. It is only giving
people enough ammunition to make it plain that the cross-platform
story is a bit of a sad joke right now.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs
2025-02-07 14:31 ` Marc Zyngier
@ 2025-02-07 18:10 ` Catalin Marinas
2025-02-07 18:17 ` Marc Zyngier
0 siblings, 1 reply; 13+ messages in thread
From: Catalin Marinas @ 2025-02-07 18:10 UTC (permalink / raw)
To: Marc Zyngier
Cc: Shameer Kolothum, kvmarm, oliver.upton, will, mark.rutland,
cohuck, eric.auger, sebott, yuzenghui, wangzhou1, jiangkunkun,
jonathan.cameron, anthony.jebson, linux-arm-kernel, linuxarm
On Fri, Feb 07, 2025 at 02:31:12PM +0000, Marc Zyngier wrote:
> On Fri, 07 Feb 2025 14:08:44 +0000,
> Catalin Marinas <catalin.marinas@arm.com> wrote:
> > On Wed, Feb 05, 2025 at 01:22:22PM +0000, Shameer Kolothum wrote:
> > > static inline bool is_midr_in_range(struct midr_range const *range)
> > > {
> > > - return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> > > - range->rv_min, range->rv_max);
> > > + int i;
> > > +
> > > + if (!target_impl_cpu_num)
> > > + return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> > > + range->rv_min, range->rv_max);
> > > +
> > > + for (i = 0; i < target_impl_cpu_num; i++) {
> > > + if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
> > > + range->model,
> > > + range->rv_min, range->rv_max))
> > > + return true;
> > > + }
> > > + return false;
> > > }
> >
> > It's a interesting approach but how does this work in practice if an
> > erratum requires a firmware counterpart? Do we expect firmwares on all
> > machines involved to have workarounds for the other machines? Or is KVM
> > going to intercept those SMCs and pretend the EL3 counterpart is there?
>
> KVM already traps SMCs, and could do something on behalf of the guest
> (such as pretending that the mitigation has happened if not on the
> correct host) *IF* the mitigation is architected (à la WA{1,2,3}).
That's the main thing I had in mind. I don't think we have any other
errata that requires firmware run-time discovery and interaction, though
you never know when we'll add new one.
> If it is implementation specific, then we can immediately stop
> pretending that a guest running on those systems can be migrated.
Makes sense.
> The only thing it helps a bit is big-little.
It does help a bit or, at least, we have some code for handling these
variations that cab be extended. However, with this patchset, the host
only probes the availability of the workarounds on the SoC it booted. It
has no idea about the extra MIDRs the VMM picks and what the other
machines in the clouds support.
Anyway, let's hope the VMs only migrate between platforms that are
equally broken.
--
Catalin
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [PATCH v6 4/4] arm64: paravirt: Enable errata based on implementation CPUs
2025-02-07 18:10 ` Catalin Marinas
@ 2025-02-07 18:17 ` Marc Zyngier
0 siblings, 0 replies; 13+ messages in thread
From: Marc Zyngier @ 2025-02-07 18:17 UTC (permalink / raw)
To: Catalin Marinas
Cc: Shameer Kolothum, kvmarm, oliver.upton, will, mark.rutland,
cohuck, eric.auger, sebott, yuzenghui, wangzhou1, jiangkunkun,
jonathan.cameron, anthony.jebson, linux-arm-kernel, linuxarm
On Fri, 07 Feb 2025 18:10:08 +0000,
Catalin Marinas <catalin.marinas@arm.com> wrote:
>
> On Fri, Feb 07, 2025 at 02:31:12PM +0000, Marc Zyngier wrote:
> > On Fri, 07 Feb 2025 14:08:44 +0000,
> > Catalin Marinas <catalin.marinas@arm.com> wrote:
> > > On Wed, Feb 05, 2025 at 01:22:22PM +0000, Shameer Kolothum wrote:
> > > > static inline bool is_midr_in_range(struct midr_range const *range)
> > > > {
> > > > - return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> > > > - range->rv_min, range->rv_max);
> > > > + int i;
> > > > +
> > > > + if (!target_impl_cpu_num)
> > > > + return midr_is_cpu_model_range(read_cpuid_id(), range->model,
> > > > + range->rv_min, range->rv_max);
> > > > +
> > > > + for (i = 0; i < target_impl_cpu_num; i++) {
> > > > + if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
> > > > + range->model,
> > > > + range->rv_min, range->rv_max))
> > > > + return true;
> > > > + }
> > > > + return false;
> > > > }
> > >
> > > It's a interesting approach but how does this work in practice if an
> > > erratum requires a firmware counterpart? Do we expect firmwares on all
> > > machines involved to have workarounds for the other machines? Or is KVM
> > > going to intercept those SMCs and pretend the EL3 counterpart is there?
> >
> > KVM already traps SMCs, and could do something on behalf of the guest
> > (such as pretending that the mitigation has happened if not on the
> > correct host) *IF* the mitigation is architected (à la WA{1,2,3}).
>
> That's the main thing I had in mind. I don't think we have any other
> errata that requires firmware run-time discovery and interaction, though
> you never know when we'll add new one.
>
> > If it is implementation specific, then we can immediately stop
> > pretending that a guest running on those systems can be migrated.
>
> Makes sense.
>
> > The only thing it helps a bit is big-little.
>
> It does help a bit or, at least, we have some code for handling these
> variations that cab be extended. However, with this patchset, the host
> only probes the availability of the workarounds on the SoC it booted. It
> has no idea about the extra MIDRs the VMM picks and what the other
> machines in the clouds support.
But that's the contract. The VMM has to be omniscient and know exactly
what it can safely migrate to. It literally says "trust me, I know
what I'm doing".
> Anyway, let's hope the VMs only migrate between platforms that are
> equally broken.
No shortage of that, I'm afraid! :)
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 13+ messages in thread