From: avinashlalotra <abinashlalotra@gmail.com>
To: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, vkuznets@redhat.com,
seanjc@google.com, pbonzini@redhat.com, tglx@linutronix.de,
mingo@redhat.com, bp@alien8.de, dave.hansen@linux.intel.com,
x86@kernel.org, hpa@zytor.com,
avinashlalotra <abinashsinghlalotra@gmail.com>
Subject: [RFC PATCH] KVM: x86: Dynamically allocate bitmap to fix -Wframe-larger-than error
Date: Fri, 13 Jun 2025 16:40:23 +0530 [thread overview]
Message-ID: <20250613111023.786265-1-abinashsinghlalotra@gmail.com> (raw)
Building the kernel with LLVM fails due to
a stack frame size overflow in `kvm_hv_flush_tlb()`:
arch/x86/kvm/hyperv.c:2001:12: error: stack frame size (1336) exceeds limit (1024) in 'kvm_hv_flush_tlb' [-Werror,-Wframe-larger-than]
The issue is caused by a large bitmap allocated on the stack. To resolve
this, dynamically allocate the bitmap using `bitmap_zalloc()` and free it with
`bitmap_free()` after use. This reduces the function's stack usage and avoids
the compiler error when `-Werror` is set.
New variable 'ret' is introduced to return after freeing the allocated memory.
"HV_STATUS_INSUFFICIENT_MEMORY" is returned when memory allocation fails .
I checked the functions calling this functions and It seems this error code
will not affect the existing system.
Please provide me feedback about this patch . There were more warnings like that,
So If this is the correct way to fic such issues then I will submit patches for
them.
This follows similar prior fixes, such as:
https://lore.kernel.org/all/ab75a444-22a1-47f5-b3c0-253660395b5a@arm.com/
where a large on-stack `struct device` was moved to heap memory in
`arm_lpae_do_selftests()` for the same reason.
Signed-off-by: avinashlalotra <abinashsinghlalotra@gmail.com>
---
arch/x86/kvm/hyperv.c | 48 ++++++++++++++++++++++++++++++-------------
1 file changed, 34 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 24f0318c50d7..78bb8d58fe94 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2005,7 +2005,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
struct kvm *kvm = vcpu->kvm;
struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush;
- DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
+ unsigned long *vcpu_mask;
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
/*
* Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
@@ -2019,6 +2019,11 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
struct kvm_vcpu *v;
unsigned long i;
bool all_cpus;
+ u64 ret;
+
+ vcpu_mask = bitmap_zalloc(KVM_MAX_VCPUS, GFP_KERNEL);
+ if (!vcpu_mask)
+ return HV_STATUS_INSUFFICIENT_MEMORY;
/*
* The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
@@ -2036,8 +2041,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
*/
if (!hc->fast && is_guest_mode(vcpu)) {
hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
- if (unlikely(hc->ingpa == INVALID_GPA))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (unlikely(hc->ingpa == INVALID_GPA)){
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ goto out_free;
+ }
}
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
@@ -2049,8 +2056,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
hc->consumed_xmm_halves = 1;
} else {
if (unlikely(kvm_read_guest(kvm, hc->ingpa,
- &flush, sizeof(flush))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ &flush, sizeof(flush)))) {
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ goto out_free;
+ }
hc->data_offset = sizeof(flush);
}
@@ -2079,8 +2088,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
hc->consumed_xmm_halves = 2;
} else {
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
- sizeof(flush_ex))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ sizeof(flush_ex)))){
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ goto out_free;
+ }
hc->data_offset = sizeof(flush_ex);
}
@@ -2093,15 +2104,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
all_cpus = flush_ex.hv_vp_set.format !=
HV_GENERIC_SET_SPARSE_4K;
- if (hc->var_cnt != hweight64(valid_bank_mask))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (hc->var_cnt != hweight64(valid_bank_mask)){
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ goto out_free;
+ }
if (!all_cpus) {
if (!hc->var_cnt)
goto ret_success;
- if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks)){
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ goto out_free;
+ }
}
/*
@@ -2122,8 +2137,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
tlb_flush_entries = NULL;
} else {
- if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries)){
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ goto out_free;
+ }
tlb_flush_entries = __tlb_flush_entries;
}
@@ -2189,8 +2206,11 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
ret_success:
/* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
- return (u64)HV_STATUS_SUCCESS |
+ ret = (u64)HV_STATUS_SUCCESS |
((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
+out_free:
+ bitmap_free(vcpu_mask);
+ return ret;
}
static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
--
2.43.0
next reply other threads:[~2025-06-13 11:10 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-13 11:10 avinashlalotra [this message]
2025-06-13 11:25 ` [RFC PATCH] KVM: x86: Dynamically allocate bitmap to fix -Wframe-larger-than error Paolo Bonzini
2025-06-13 13:58 ` Sean Christopherson
2025-06-13 17:17 ` Abinash
2025-06-13 22:24 ` Sean Christopherson
2025-06-24 19:36 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250613111023.786265-1-abinashsinghlalotra@gmail.com \
--to=abinashlalotra@gmail.com \
--cc=abinashsinghlalotra@gmail.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=vkuznets@redhat.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox