public inbox for linux-hyperv@vger.kernel.org
 help / color / mirror / Atom feed
From: Mukesh R <mrathor@linux.microsoft.com>
To: Ard Biesheuvel <ardb@kernel.org>, linux-kernel@vger.kernel.org
Cc: x86@kernel.org, Wei Liu <wei.liu@kernel.org>,
	Uros Bizjak <ubizjak@gmail.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	linux-hyperv@vger.kernel.org
Subject: Re: [PATCH v2] x86/hyperv: Use __naked attribute to fix stackless C function
Date: Fri, 27 Feb 2026 15:03:55 -0800	[thread overview]
Message-ID: <3cd719bb-334a-d05a-d44a-f68982a76a9d@linux.microsoft.com> (raw)
In-Reply-To: <20260227224030.299993-2-ardb@kernel.org>

On 2/27/26 14:40, Ard Biesheuvel wrote:
> hv_crash_c_entry() is a C function that is entered without a stack,
> and this is only allowed for functions that have the __naked attribute,
> which informs the compiler that it must not emit the usual prologue and
> epilogue or emit any other kind of instrumentation that relies on a
> stack frame.
> 
> So split up the function, and set the __naked attribute on the initial
> part that sets up the stack, GDT, IDT and other pieces that are needed
> for ordinary C execution. Given that function calls are not permitted
> either, use the existing long return coded in an asm() block to call the
> second part of the function, which is an ordinary function that is
> permitted to call other functions as usual.

Thank you for the patch. I'll start a build on the side and test it
out and let you know.

Thanks,
-Mukesh



> Cc: Mukesh Rathor <mrathor@linux.microsoft.com>
> Cc: Wei Liu <wei.liu@kernel.org>
> Cc: Uros Bizjak <ubizjak@gmail.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: linux-hyperv@vger.kernel.org
> Fixes: 94212d34618c ("x86/hyperv: Implement hypervisor RAM collection into vmcore")
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> v2: apply some asm tweaks suggested by Uros and Andrew
> 
>   arch/x86/hyperv/hv_crash.c | 79 ++++++++++----------
>   1 file changed, 41 insertions(+), 38 deletions(-)
> 
> diff --git a/arch/x86/hyperv/hv_crash.c b/arch/x86/hyperv/hv_crash.c
> index 92da1b4f2e73..1c0965eb346e 100644
> --- a/arch/x86/hyperv/hv_crash.c
> +++ b/arch/x86/hyperv/hv_crash.c
> @@ -107,14 +107,12 @@ static void __noreturn hv_panic_timeout_reboot(void)
>   		cpu_relax();
>   }
>   
> -/* This cannot be inlined as it needs stack */
> -static noinline __noclone void hv_crash_restore_tss(void)
> +static void hv_crash_restore_tss(void)
>   {
>   	load_TR_desc();
>   }
>   
> -/* This cannot be inlined as it needs stack */
> -static noinline void hv_crash_clear_kernpt(void)
> +static void hv_crash_clear_kernpt(void)
>   {
>   	pgd_t *pgd;
>   	p4d_t *p4d;
> @@ -125,6 +123,25 @@ static noinline void hv_crash_clear_kernpt(void)
>   	native_p4d_clear(p4d);
>   }
>   
> +
> +static void __noreturn hv_crash_handle(void)
> +{
> +	hv_crash_restore_tss();
> +	hv_crash_clear_kernpt();
> +
> +	/* we are now fully in devirtualized normal kernel mode */
> +	__crash_kexec(NULL);
> +
> +	hv_panic_timeout_reboot();
> +}
> +
> +/*
> + * __naked functions do not permit function calls, not even to __always_inline
> + * functions that only contain asm() blocks themselves. So use a macro instead.
> + */
> +#define hv_wrmsr(msr, val) \
> +	asm("wrmsr" :: "c"(msr), "a"((u32)val), "d"((u32)(val >> 32)) : "memory")
> +
>   /*
>    * This is the C entry point from the asm glue code after the disable hypercall.
>    * We enter here in IA32-e long mode, ie, full 64bit mode running on kernel
> @@ -133,49 +150,35 @@ static noinline void hv_crash_clear_kernpt(void)
>    * available. We restore kernel GDT, and rest of the context, and continue
>    * to kexec.
>    */
> -static asmlinkage void __noreturn hv_crash_c_entry(void)
> +static void __naked hv_crash_c_entry(void)
>   {
> -	struct hv_crash_ctxt *ctxt = &hv_crash_ctxt;
> -
>   	/* first thing, restore kernel gdt */
> -	native_load_gdt(&ctxt->gdtr);
> +	asm volatile("lgdt %0" : : "m" (hv_crash_ctxt.gdtr));
>   
> -	asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss));
> -	asm volatile("movq %0, %%rsp" : : "m"(ctxt->rsp));
> +	asm volatile("movw %0, %%ss" : : "m"(hv_crash_ctxt.ss));
> +	asm volatile("movq %0, %%rsp" : : "m"(hv_crash_ctxt.rsp));
>   
> -	asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds));
> -	asm volatile("movw %%ax, %%es" : : "a"(ctxt->es));
> -	asm volatile("movw %%ax, %%fs" : : "a"(ctxt->fs));
> -	asm volatile("movw %%ax, %%gs" : : "a"(ctxt->gs));
> +	asm volatile("movw %0, %%ds" : : "m"(hv_crash_ctxt.ds));
> +	asm volatile("movw %0, %%es" : : "m"(hv_crash_ctxt.es));
> +	asm volatile("movw %0, %%fs" : : "m"(hv_crash_ctxt.fs));
> +	asm volatile("movw %0, %%gs" : : "m"(hv_crash_ctxt.gs));
>   
> -	native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat);
> -	asm volatile("movq %0, %%cr0" : : "r"(ctxt->cr0));
> +	hv_wrmsr(MSR_IA32_CR_PAT, hv_crash_ctxt.pat);
> +	asm volatile("movq %0, %%cr0" : : "r"(hv_crash_ctxt.cr0));
>   
> -	asm volatile("movq %0, %%cr8" : : "r"(ctxt->cr8));
> -	asm volatile("movq %0, %%cr4" : : "r"(ctxt->cr4));
> -	asm volatile("movq %0, %%cr2" : : "r"(ctxt->cr4));
> +	asm volatile("movq %0, %%cr8" : : "r"(hv_crash_ctxt.cr8));
> +	asm volatile("movq %0, %%cr4" : : "r"(hv_crash_ctxt.cr4));
> +	asm volatile("movq %0, %%cr2" : : "r"(hv_crash_ctxt.cr4));
>   
> -	native_load_idt(&ctxt->idtr);
> -	native_wrmsrq(MSR_GS_BASE, ctxt->gsbase);
> -	native_wrmsrq(MSR_EFER, ctxt->efer);
> +	asm volatile("lidt %0" : : "m" (hv_crash_ctxt.idtr));
> +	hv_wrmsr(MSR_GS_BASE, hv_crash_ctxt.gsbase);
> +	hv_wrmsr(MSR_EFER, hv_crash_ctxt.efer);
>   
>   	/* restore the original kernel CS now via far return */
> -	asm volatile("movzwq %0, %%rax\n\t"
> -		     "pushq %%rax\n\t"
> -		     "pushq $1f\n\t"
> -		     "lretq\n\t"
> -		     "1:nop\n\t" : : "m"(ctxt->cs) : "rax");
> -
> -	/* We are in asmlinkage without stack frame, hence make C function
> -	 * calls which will buy stack frames.
> -	 */
> -	hv_crash_restore_tss();
> -	hv_crash_clear_kernpt();
> -
> -	/* we are now fully in devirtualized normal kernel mode */
> -	__crash_kexec(NULL);
> -
> -	hv_panic_timeout_reboot();
> +	asm volatile("pushq %q0\n\t"
> +		     "pushq %q1\n\t"
> +		     "lretq"
> +		     :: "r"(hv_crash_ctxt.cs), "r"(hv_crash_handle));
>   }
>   /* Tell gcc we are using lretq long jump in the above function intentionally */
>   STACK_FRAME_NON_STANDARD(hv_crash_c_entry);


  reply	other threads:[~2026-02-27 23:03 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-27 22:40 [PATCH v2] x86/hyperv: Use __naked attribute to fix stackless C function Ard Biesheuvel
2026-02-27 23:03 ` Mukesh R [this message]
2026-02-28  3:36   ` Mukesh R
2026-02-27 23:07 ` Andrew Cooper
2026-02-28  6:46 ` Uros Bizjak
2026-02-28  9:38 ` Uros Bizjak
2026-02-28 16:37   ` Ard Biesheuvel
2026-02-28 17:15     ` Uros Bizjak
2026-02-28 10:17 ` Uros Bizjak
2026-02-28 16:40   ` Ard Biesheuvel
2026-02-28 17:34     ` Uros Bizjak
2026-02-28 17:38       ` Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3cd719bb-334a-d05a-d44a-f68982a76a9d@linux.microsoft.com \
    --to=mrathor@linux.microsoft.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ardb@kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=ubizjak@gmail.com \
    --cc=wei.liu@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox