* [PATCH v3] KVM: Check the allocation of pv cpu mask
@ 2020-09-25 18:07 lihaiwei.kernel
2020-09-29 11:40 ` Vitaly Kuznetsov
0 siblings, 1 reply; 2+ messages in thread
From: lihaiwei.kernel @ 2020-09-25 18:07 UTC (permalink / raw)
To: linux-kernel, kvm, x86
Cc: pbonzini, sean.j.christopherson, vkuznets, wanpengli, jmattson,
joro, tglx, mingo, bp, hpa, Haiwei Li
From: Haiwei Li <lihaiwei@tencent.com>
check the allocation of per-cpu __pv_cpu_mask.
Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
---
v1 -> v2:
* add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
v2 -> v3:
* always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
arch/x86/kernel/kvm.c | 27 ++++++++++++++++++++++++---
1 file changed, 24 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9663ba31347c..1e5da6db519c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -553,7 +553,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
static void kvm_setup_pv_ipi(void)
{
apic->send_IPI_mask = kvm_send_ipi_mask;
- apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
pr_info("setup PV IPIs\n");
}
@@ -619,6 +618,11 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
struct kvm_steal_time *src;
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
+ if (unlikely(!flushmask)) {
+ native_flush_tlb_others(cpumask, info);
+ return;
+ }
+
cpumask_copy(flushmask, cpumask);
/*
* We have to call flush only on online vCPUs. And
@@ -765,6 +769,14 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
+static void kvm_free_cpumask(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
static __init int kvm_alloc_cpumask(void)
{
int cpu;
@@ -783,11 +795,20 @@ static __init int kvm_alloc_cpumask(void)
if (alloc)
for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
- GFP_KERNEL, cpu_to_node(cpu));
+ if (!zalloc_cpumask_var_node(
+ per_cpu_ptr(&__pv_cpu_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu)))
+ goto zalloc_cpumask_fail;
}
+#if defined(CONFIG_SMP)
+ apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
+#endif
return 0;
+
+zalloc_cpumask_fail:
+ kvm_free_cpumask();
+ return -ENOMEM;
}
arch_initcall(kvm_alloc_cpumask);
--
2.18.4
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [PATCH v3] KVM: Check the allocation of pv cpu mask
2020-09-25 18:07 [PATCH v3] KVM: Check the allocation of pv cpu mask lihaiwei.kernel
@ 2020-09-29 11:40 ` Vitaly Kuznetsov
0 siblings, 0 replies; 2+ messages in thread
From: Vitaly Kuznetsov @ 2020-09-29 11:40 UTC (permalink / raw)
To: lihaiwei.kernel, linux-kernel
Cc: pbonzini, sean.j.christopherson, wanpengli, jmattson, joro, tglx,
mingo, bp, hpa, Haiwei Li, kvm, x86
lihaiwei.kernel@gmail.com writes:
> From: Haiwei Li <lihaiwei@tencent.com>
>
> check the allocation of per-cpu __pv_cpu_mask.
>
> Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
> ---
> v1 -> v2:
> * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
> v2 -> v3:
> * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
>
> arch/x86/kernel/kvm.c | 27 ++++++++++++++++++++++++---
> 1 file changed, 24 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 9663ba31347c..1e5da6db519c 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -553,7 +553,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
> static void kvm_setup_pv_ipi(void)
> {
> apic->send_IPI_mask = kvm_send_ipi_mask;
> - apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
I see that kvm_send_ipi_mask_allbutself() uses per CPU __pv_cpu_mask and
kvm_send_ipi_mask doesn't but assigning send_IPI_mask here and
send_IPI_mask_allbutself in kvm_alloc_cpumask() looks weird. Personally,
I'd prefet to move apic->send_IPI_mask to kvm_alloc_cpumask() too
(probably call kvm_setup_pv_ipi() and get rid of kvm_apic_init()
completely).
Alternatively, we can save the original apic->send_IPI_mask_allbutself
value to a variable and call it from kvm_send_ipi_mask_allbutself() when
__pv_cpu_mask wasn't allocated.
> pr_info("setup PV IPIs\n");
> }
>
> @@ -619,6 +618,11 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
> struct kvm_steal_time *src;
> struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
>
> + if (unlikely(!flushmask)) {
> + native_flush_tlb_others(cpumask, info);
> + return;
> + }
> +
> cpumask_copy(flushmask, cpumask);
> /*
> * We have to call flush only on online vCPUs. And
> @@ -765,6 +769,14 @@ static __init int activate_jump_labels(void)
> }
> arch_initcall(activate_jump_labels);
>
> +static void kvm_free_cpumask(void)
> +{
> + unsigned int cpu;
> +
> + for_each_possible_cpu(cpu)
> + free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
> +}
> +
> static __init int kvm_alloc_cpumask(void)
> {
> int cpu;
> @@ -783,11 +795,20 @@ static __init int kvm_alloc_cpumask(void)
>
> if (alloc)
> for_each_possible_cpu(cpu) {
> - zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
> - GFP_KERNEL, cpu_to_node(cpu));
> + if (!zalloc_cpumask_var_node(
> + per_cpu_ptr(&__pv_cpu_mask, cpu),
> + GFP_KERNEL, cpu_to_node(cpu)))
> + goto zalloc_cpumask_fail;
> }
>
> +#if defined(CONFIG_SMP)
> + apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
> +#endif
> return 0;
> +
> +zalloc_cpumask_fail:
> + kvm_free_cpumask();
> + return -ENOMEM;
> }
> arch_initcall(kvm_alloc_cpumask);
--
Vitaly
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2020-09-29 11:40 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-09-25 18:07 [PATCH v3] KVM: Check the allocation of pv cpu mask lihaiwei.kernel
2020-09-29 11:40 ` Vitaly Kuznetsov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox