public inbox for linux-coco@lists.linux.dev
 help / color / mirror / Atom feed
* [PATCH 0/3] x86/sev: Carve out the SVSM support code
@ 2025-12-04 12:48 Borislav Petkov
  2025-12-04 12:48 ` [PATCH 1/3] x86/sev: Move the internal header Borislav Petkov
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Borislav Petkov @ 2025-12-04 12:48 UTC (permalink / raw)
  To: Tom Lendacky; +Cc: linux-coco, X86 ML, LKML, Borislav Petkov (AMD)

From: "Borislav Petkov (AMD)" <bp@alien8.de>

Hi,

I've been meaning to do this for a while now but didn't have a good idea how
to do it nicely. Using the internal header makes it almost trivial.

Thx.

Borislav Petkov (AMD) (3):
  x86/sev: Move the internal header
  x86/sev: Add internal header guards
  x86/sev: Carve out the SVSM code into a separate compilation unit

 arch/x86/boot/startup/sev-startup.c           |   3 +-
 arch/x86/coco/sev/Makefile                    |   2 +-
 arch/x86/coco/sev/core.c                      | 380 +-----------------
 .../sev-internal.h => coco/sev/internal.h}    |  32 ++
 arch/x86/coco/sev/noinstr.c                   |   3 +-
 arch/x86/coco/sev/svsm.c                      | 362 +++++++++++++++++
 arch/x86/coco/sev/vc-handle.c                 |   3 +-
 7 files changed, 403 insertions(+), 382 deletions(-)
 rename arch/x86/{include/asm/sev-internal.h => coco/sev/internal.h} (75%)
 create mode 100644 arch/x86/coco/sev/svsm.c

-- 
2.51.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/3] x86/sev: Move the internal header
  2025-12-04 12:48 [PATCH 0/3] x86/sev: Carve out the SVSM support code Borislav Petkov
@ 2025-12-04 12:48 ` Borislav Petkov
  2025-12-04 14:18   ` Tom Lendacky
  2025-12-04 12:48 ` [PATCH 2/3] x86/sev: Add internal header guards Borislav Petkov
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Borislav Petkov @ 2025-12-04 12:48 UTC (permalink / raw)
  To: Tom Lendacky; +Cc: linux-coco, X86 ML, LKML, Borislav Petkov (AMD)

From: "Borislav Petkov (AMD)" <bp@alien8.de>

Move the internal header out of the usual include/asm/ include path
because having an "internal" header there doesn't really make it so
- quite the opposite.

So move where it belongs and make it really internal.

No functional changes.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
---
 arch/x86/boot/startup/sev-startup.c                          | 3 ++-
 arch/x86/coco/sev/core.c                                     | 3 ++-
 arch/x86/{include/asm/sev-internal.h => coco/sev/internal.h} | 0
 arch/x86/coco/sev/noinstr.c                                  | 3 ++-
 arch/x86/coco/sev/vc-handle.c                                | 3 ++-
 5 files changed, 8 insertions(+), 4 deletions(-)
 rename arch/x86/{include/asm/sev-internal.h => coco/sev/internal.h} (100%)

diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c
index 09725428d3e6..1115214429fd 100644
--- a/arch/x86/boot/startup/sev-startup.c
+++ b/arch/x86/boot/startup/sev-startup.c
@@ -27,7 +27,6 @@
 #include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
 #include <asm/sev.h>
-#include <asm/sev-internal.h>
 #include <asm/insn-eval.h>
 #include <asm/fpu/xcr.h>
 #include <asm/processor.h>
@@ -41,6 +40,8 @@
 #include <asm/cpuid/api.h>
 #include <asm/cmdline.h>
 
+#include "../coco/sev/internal.h"
+
 /* Include code shared with pre-decompression boot stage */
 #include "sev-shared.c"
 
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 9ae3b11754e6..4e618e596267 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -31,7 +31,6 @@
 #include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
 #include <asm/sev.h>
-#include <asm/sev-internal.h>
 #include <asm/insn-eval.h>
 #include <asm/fpu/xcr.h>
 #include <asm/processor.h>
@@ -46,6 +45,8 @@
 #include <asm/cmdline.h>
 #include <asm/msr.h>
 
+#include "internal.h"
+
 /* Bitmap of SEV features supported by the hypervisor */
 u64 sev_hv_features __ro_after_init;
 SYM_PIC_ALIAS(sev_hv_features);
diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/coco/sev/internal.h
similarity index 100%
rename from arch/x86/include/asm/sev-internal.h
rename to arch/x86/coco/sev/internal.h
diff --git a/arch/x86/coco/sev/noinstr.c b/arch/x86/coco/sev/noinstr.c
index b527eafb6312..9d94aca4a698 100644
--- a/arch/x86/coco/sev/noinstr.c
+++ b/arch/x86/coco/sev/noinstr.c
@@ -16,7 +16,8 @@
 #include <asm/msr.h>
 #include <asm/ptrace.h>
 #include <asm/sev.h>
-#include <asm/sev-internal.h>
+
+#include "internal.h"
 
 static __always_inline bool on_vc_stack(struct pt_regs *regs)
 {
diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c
index f08c7505ed82..43f264afd590 100644
--- a/arch/x86/coco/sev/vc-handle.c
+++ b/arch/x86/coco/sev/vc-handle.c
@@ -23,7 +23,6 @@
 #include <asm/init.h>
 #include <asm/stacktrace.h>
 #include <asm/sev.h>
-#include <asm/sev-internal.h>
 #include <asm/insn-eval.h>
 #include <asm/fpu/xcr.h>
 #include <asm/processor.h>
@@ -35,6 +34,8 @@
 #include <asm/apic.h>
 #include <asm/cpuid/api.h>
 
+#include "internal.h"
+
 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
 					   unsigned long vaddr, phys_addr_t *paddr)
 {
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/3] x86/sev: Add internal header guards
  2025-12-04 12:48 [PATCH 0/3] x86/sev: Carve out the SVSM support code Borislav Petkov
  2025-12-04 12:48 ` [PATCH 1/3] x86/sev: Move the internal header Borislav Petkov
@ 2025-12-04 12:48 ` Borislav Petkov
  2025-12-04 12:48 ` [PATCH 3/3] x86/sev: Carve out the SVSM code into a separate compilation unit Borislav Petkov
  2025-12-04 14:53 ` [PATCH 0/3] x86/sev: Carve out the SVSM support code Tom Lendacky
  3 siblings, 0 replies; 8+ messages in thread
From: Borislav Petkov @ 2025-12-04 12:48 UTC (permalink / raw)
  To: Tom Lendacky; +Cc: linux-coco, X86 ML, LKML, Borislav Petkov (AMD)

From: "Borislav Petkov (AMD)" <bp@alien8.de>

All headers need guards ifdeffery.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
---
 arch/x86/coco/sev/internal.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/x86/coco/sev/internal.h b/arch/x86/coco/sev/internal.h
index c58c47c68ab6..af991f1da095 100644
--- a/arch/x86/coco/sev/internal.h
+++ b/arch/x86/coco/sev/internal.h
@@ -1,4 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __X86_COCO_SEV_INTERNAL_H__
+#define __X86_COCO_SEV_INTERNAL_H__
 
 #define DR7_RESET_VALUE        0x400
 
@@ -85,3 +87,4 @@ enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt
 u64 get_hv_features(void);
 
 const struct snp_cpuid_table *snp_cpuid_get_table(void);
+#endif /* __X86_COCO_SEV_INTERNAL_H__ */
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/3] x86/sev: Carve out the SVSM code into a separate compilation unit
  2025-12-04 12:48 [PATCH 0/3] x86/sev: Carve out the SVSM support code Borislav Petkov
  2025-12-04 12:48 ` [PATCH 1/3] x86/sev: Move the internal header Borislav Petkov
  2025-12-04 12:48 ` [PATCH 2/3] x86/sev: Add internal header guards Borislav Petkov
@ 2025-12-04 12:48 ` Borislav Petkov
  2025-12-04 14:53 ` [PATCH 0/3] x86/sev: Carve out the SVSM support code Tom Lendacky
  3 siblings, 0 replies; 8+ messages in thread
From: Borislav Petkov @ 2025-12-04 12:48 UTC (permalink / raw)
  To: Tom Lendacky; +Cc: linux-coco, X86 ML, LKML, Borislav Petkov (AMD)

From: "Borislav Petkov (AMD)" <bp@alien8.de>

Move the SVSM-related machinery into a separate compilation unit in
order to keep sev/core.c slim and "on-topic".

No functional changes.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
---
 arch/x86/coco/sev/Makefile   |   2 +-
 arch/x86/coco/sev/core.c     | 377 -----------------------------------
 arch/x86/coco/sev/internal.h |  29 +++
 arch/x86/coco/sev/svsm.c     | 362 +++++++++++++++++++++++++++++++++
 4 files changed, 392 insertions(+), 378 deletions(-)
 create mode 100644 arch/x86/coco/sev/svsm.c

diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile
index 3b8ae214a6a6..fb8ffedfc8b0 100644
--- a/arch/x86/coco/sev/Makefile
+++ b/arch/x86/coco/sev/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-obj-y += core.o noinstr.o vc-handle.o
+obj-y += core.o noinstr.o vc-handle.o svsm.o
 
 # Clang 14 and older may fail to respect __no_sanitize_undefined when inlining
 UBSAN_SANITIZE_noinstr.o	:= n
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 4e618e596267..379e0c09c7f3 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -55,40 +55,6 @@ SYM_PIC_ALIAS(sev_hv_features);
 u64 sev_secrets_pa __ro_after_init;
 SYM_PIC_ALIAS(sev_secrets_pa);
 
-/* For early boot SVSM communication */
-struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
-SYM_PIC_ALIAS(boot_svsm_ca_page);
-
-/*
- * SVSM related information:
- *   During boot, the page tables are set up as identity mapped and later
- *   changed to use kernel virtual addresses. Maintain separate virtual and
- *   physical addresses for the CAA to allow SVSM functions to be used during
- *   early boot, both with identity mapped virtual addresses and proper kernel
- *   virtual addresses.
- */
-u64 boot_svsm_caa_pa __ro_after_init;
-SYM_PIC_ALIAS(boot_svsm_caa_pa);
-
-DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
-DEFINE_PER_CPU(u64, svsm_caa_pa);
-
-static inline struct svsm_ca *svsm_get_caa(void)
-{
-	if (sev_cfg.use_cas)
-		return this_cpu_read(svsm_caa);
-	else
-		return rip_rel_ptr(&boot_svsm_ca_page);
-}
-
-static inline u64 svsm_get_caa_pa(void)
-{
-	if (sev_cfg.use_cas)
-		return this_cpu_read(svsm_caa_pa);
-	else
-		return boot_svsm_caa_pa;
-}
-
 /* AP INIT values as documented in the APM2  section "Processor Initialization State" */
 #define AP_INIT_CS_LIMIT		0xffff
 #define AP_INIT_DS_LIMIT		0xffff
@@ -218,95 +184,6 @@ static u64 __init get_jump_table_addr(void)
 	return ret;
 }
 
-static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
-{
-	struct es_em_ctxt ctxt;
-	u8 pending = 0;
-
-	vc_ghcb_invalidate(ghcb);
-
-	/*
-	 * Fill in protocol and format specifiers. This can be called very early
-	 * in the boot, so use rip-relative references as needed.
-	 */
-	ghcb->protocol_version = ghcb_version;
-	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
-
-	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
-	ghcb_set_sw_exit_info_1(ghcb, 0);
-	ghcb_set_sw_exit_info_2(ghcb, 0);
-
-	sev_es_wr_ghcb_msr(__pa(ghcb));
-
-	svsm_issue_call(call, &pending);
-
-	if (pending)
-		return -EINVAL;
-
-	switch (verify_exception_info(ghcb, &ctxt)) {
-	case ES_OK:
-		break;
-	case ES_EXCEPTION:
-		vc_forward_exception(&ctxt);
-		fallthrough;
-	default:
-		return -EINVAL;
-	}
-
-	return svsm_process_result_codes(call);
-}
-
-static int svsm_perform_call_protocol(struct svsm_call *call)
-{
-	struct ghcb_state state;
-	unsigned long flags;
-	struct ghcb *ghcb;
-	int ret;
-
-	flags = native_local_irq_save();
-
-	if (sev_cfg.ghcbs_initialized)
-		ghcb = __sev_get_ghcb(&state);
-	else if (boot_ghcb)
-		ghcb = boot_ghcb;
-	else
-		ghcb = NULL;
-
-	do {
-		ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
-			   : __pi_svsm_perform_msr_protocol(call);
-	} while (ret == -EAGAIN);
-
-	if (sev_cfg.ghcbs_initialized)
-		__sev_put_ghcb(&state);
-
-	native_local_irq_restore(flags);
-
-	return ret;
-}
-
-static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
-				    int ret, u64 svsm_ret)
-{
-	WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
-	     pfn, action, page_size, ret, svsm_ret);
-
-	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
-}
-
-static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
-{
-	unsigned int page_size;
-	bool action;
-	u64 pfn;
-
-	pfn = pc->entry[pc->cur_index].pfn;
-	action = pc->entry[pc->cur_index].action;
-	page_size = pc->entry[pc->cur_index].page_size;
-
-	__pval_terminate(pfn, action, page_size, ret, svsm_ret);
-}
-
 static void pval_pages(struct snp_psc_desc *desc)
 {
 	struct psc_entry *e;
@@ -343,152 +220,6 @@ static void pval_pages(struct snp_psc_desc *desc)
 	}
 }
 
-static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
-					struct svsm_pvalidate_call *pc)
-{
-	struct svsm_pvalidate_entry *pe;
-
-	/* Nothing in the CA yet */
-	pc->num_entries = 0;
-	pc->cur_index   = 0;
-
-	pe = &pc->entry[0];
-
-	while (pfn < pfn_end) {
-		pe->page_size = RMP_PG_SIZE_4K;
-		pe->action    = action;
-		pe->ignore_cf = 0;
-		pe->rsvd      = 0;
-		pe->pfn       = pfn;
-
-		pe++;
-		pfn++;
-
-		pc->num_entries++;
-		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
-			break;
-	}
-
-	return pfn;
-}
-
-static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
-				       struct svsm_pvalidate_call *pc)
-{
-	struct svsm_pvalidate_entry *pe;
-	struct psc_entry *e;
-
-	/* Nothing in the CA yet */
-	pc->num_entries = 0;
-	pc->cur_index   = 0;
-
-	pe = &pc->entry[0];
-	e  = &desc->entries[desc_entry];
-
-	while (desc_entry <= desc->hdr.end_entry) {
-		pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
-		pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
-		pe->ignore_cf = 0;
-		pe->rsvd      = 0;
-		pe->pfn       = e->gfn;
-
-		pe++;
-		e++;
-
-		desc_entry++;
-		pc->num_entries++;
-		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
-			break;
-	}
-
-	return desc_entry;
-}
-
-static void svsm_pval_pages(struct snp_psc_desc *desc)
-{
-	struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
-	unsigned int i, pv_4k_count = 0;
-	struct svsm_pvalidate_call *pc;
-	struct svsm_call call = {};
-	unsigned long flags;
-	bool action;
-	u64 pc_pa;
-	int ret;
-
-	/*
-	 * This can be called very early in the boot, use native functions in
-	 * order to avoid paravirt issues.
-	 */
-	flags = native_local_irq_save();
-
-	/*
-	 * The SVSM calling area (CA) can support processing 510 entries at a
-	 * time. Loop through the Page State Change descriptor until the CA is
-	 * full or the last entry in the descriptor is reached, at which time
-	 * the SVSM is invoked. This repeats until all entries in the descriptor
-	 * are processed.
-	 */
-	call.caa = svsm_get_caa();
-
-	pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
-	pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
-
-	/* Protocol 0, Call ID 1 */
-	call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
-	call.rcx = pc_pa;
-
-	for (i = 0; i <= desc->hdr.end_entry;) {
-		i = svsm_build_ca_from_psc_desc(desc, i, pc);
-
-		do {
-			ret = svsm_perform_call_protocol(&call);
-			if (!ret)
-				continue;
-
-			/*
-			 * Check if the entry failed because of an RMP mismatch (a
-			 * PVALIDATE at 2M was requested, but the page is mapped in
-			 * the RMP as 4K).
-			 */
-
-			if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
-			    pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
-				/* Save this entry for post-processing at 4K */
-				pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
-
-				/* Skip to the next one unless at the end of the list */
-				pc->cur_index++;
-				if (pc->cur_index < pc->num_entries)
-					ret = -EAGAIN;
-				else
-					ret = 0;
-			}
-		} while (ret == -EAGAIN);
-
-		if (ret)
-			svsm_pval_terminate(pc, ret, call.rax_out);
-	}
-
-	/* Process any entries that failed to be validated at 2M and validate them at 4K */
-	for (i = 0; i < pv_4k_count; i++) {
-		u64 pfn, pfn_end;
-
-		action  = pv_4k[i].action;
-		pfn     = pv_4k[i].pfn;
-		pfn_end = pfn + 512;
-
-		while (pfn < pfn_end) {
-			pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
-
-			ret = svsm_perform_call_protocol(&call);
-			if (ret)
-				svsm_pval_terminate(pc, ret, call.rax_out);
-		}
-	}
-
-	native_local_irq_restore(flags);
-}
-
 static void pvalidate_pages(struct snp_psc_desc *desc)
 {
 	struct psc_entry *e;
@@ -1589,56 +1320,6 @@ static int __init report_snp_info(void)
 }
 arch_initcall(report_snp_info);
 
-static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
-{
-	/* If (new) lengths have been returned, propagate them up */
-	if (call->rcx_out != call->rcx)
-		input->manifest_buf.len = call->rcx_out;
-
-	if (call->rdx_out != call->rdx)
-		input->certificates_buf.len = call->rdx_out;
-
-	if (call->r8_out != call->r8)
-		input->report_buf.len = call->r8_out;
-}
-
-int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
-			      struct svsm_attest_call *input)
-{
-	struct svsm_attest_call *ac;
-	unsigned long flags;
-	u64 attest_call_pa;
-	int ret;
-
-	if (!snp_vmpl)
-		return -EINVAL;
-
-	local_irq_save(flags);
-
-	call->caa = svsm_get_caa();
-
-	ac = (struct svsm_attest_call *)call->caa->svsm_buffer;
-	attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
-
-	*ac = *input;
-
-	/*
-	 * Set input registers for the request and set RDX and R8 to known
-	 * values in order to detect length values being returned in them.
-	 */
-	call->rax = call_id;
-	call->rcx = attest_call_pa;
-	call->rdx = -1;
-	call->r8 = -1;
-	ret = svsm_perform_call_protocol(call);
-	update_attest_input(call, input);
-
-	local_irq_restore(flags);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
-
 static int snp_issue_guest_request(struct snp_guest_req *req)
 {
 	struct snp_req_data *input = &req->input;
@@ -1703,64 +1384,6 @@ static int snp_issue_guest_request(struct snp_guest_req *req)
 	return ret;
 }
 
-/**
- * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device
- *
- * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND
- * which is the only request used so far.
- *
- * Return: true if the platform provides a vTPM SVSM device, false otherwise.
- */
-static bool snp_svsm_vtpm_probe(void)
-{
-	struct svsm_call call = {};
-
-	/* The vTPM device is available only if a SVSM is present */
-	if (!snp_vmpl)
-		return false;
-
-	call.caa = svsm_get_caa();
-	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY);
-
-	if (svsm_perform_call_protocol(&call))
-		return false;
-
-	/* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */
-	return call.rcx_out & BIT_ULL(8);
-}
-
-/**
- * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM
- * @buffer: A buffer used to both send the command and receive the response.
- *
- * Execute a SVSM_VTPM_CMD call as defined by
- * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00
- *
- * All command request/response buffers have a common structure as specified by
- * the following table:
- *     Byte      Size       In/Out    Description
- *     Offset    (Bytes)
- *     0x000     4          In        Platform command
- *                          Out       Platform command response size
- *
- * Each command can build upon this common request/response structure to create
- * a structure specific to the command. See include/linux/tpm_svsm.h for more
- * details.
- *
- * Return: 0 on success, -errno on failure
- */
-int snp_svsm_vtpm_send_command(u8 *buffer)
-{
-	struct svsm_call call = {};
-
-	call.caa = svsm_get_caa();
-	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD);
-	call.rcx = __pa(buffer);
-
-	return svsm_perform_call_protocol(&call);
-}
-EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command);
-
 static struct platform_device sev_guest_device = {
 	.name		= "sev-guest",
 	.id		= -1,
diff --git a/arch/x86/coco/sev/internal.h b/arch/x86/coco/sev/internal.h
index af991f1da095..039326b5c799 100644
--- a/arch/x86/coco/sev/internal.h
+++ b/arch/x86/coco/sev/internal.h
@@ -66,6 +66,9 @@ extern u64 boot_svsm_caa_pa;
 
 enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt);
 void vc_forward_exception(struct es_em_ctxt *ctxt);
+void svsm_pval_pages(struct snp_psc_desc *desc);
+int svsm_perform_call_protocol(struct svsm_call *call);
+bool snp_svsm_vtpm_probe(void);
 
 static inline u64 sev_es_rd_ghcb_msr(void)
 {
@@ -87,4 +90,30 @@ enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt
 u64 get_hv_features(void);
 
 const struct snp_cpuid_table *snp_cpuid_get_table(void);
+
+static inline struct svsm_ca *svsm_get_caa(void)
+{
+	if (sev_cfg.use_cas)
+		return this_cpu_read(svsm_caa);
+	else
+		return rip_rel_ptr(&boot_svsm_ca_page);
+}
+
+static inline u64 svsm_get_caa_pa(void)
+{
+	if (sev_cfg.use_cas)
+		return this_cpu_read(svsm_caa_pa);
+	else
+		return boot_svsm_caa_pa;
+}
+
+static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
+				    int ret, u64 svsm_ret)
+{
+	WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
+	     pfn, action, page_size, ret, svsm_ret);
+
+	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+}
+
 #endif /* __X86_COCO_SEV_INTERNAL_H__ */
diff --git a/arch/x86/coco/sev/svsm.c b/arch/x86/coco/sev/svsm.c
new file mode 100644
index 000000000000..2acf4a76afe7
--- /dev/null
+++ b/arch/x86/coco/sev/svsm.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SVSM support code
+ */
+
+#include <linux/types.h>
+
+#include <asm/sev.h>
+
+#include "internal.h"
+
+/* For early boot SVSM communication */
+struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE);
+SYM_PIC_ALIAS(boot_svsm_ca_page);
+
+/*
+ * SVSM related information:
+ *   During boot, the page tables are set up as identity mapped and later
+ *   changed to use kernel virtual addresses. Maintain separate virtual and
+ *   physical addresses for the CAA to allow SVSM functions to be used during
+ *   early boot, both with identity mapped virtual addresses and proper kernel
+ *   virtual addresses.
+ */
+u64 boot_svsm_caa_pa __ro_after_init;
+SYM_PIC_ALIAS(boot_svsm_caa_pa);
+
+DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
+DEFINE_PER_CPU(u64, svsm_caa_pa);
+
+static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
+{
+	struct es_em_ctxt ctxt;
+	u8 pending = 0;
+
+	vc_ghcb_invalidate(ghcb);
+
+	/*
+	 * Fill in protocol and format specifiers. This can be called very early
+	 * in the boot, so use rip-relative references as needed.
+	 */
+	ghcb->protocol_version = ghcb_version;
+	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
+
+	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
+	ghcb_set_sw_exit_info_1(ghcb, 0);
+	ghcb_set_sw_exit_info_2(ghcb, 0);
+
+	sev_es_wr_ghcb_msr(__pa(ghcb));
+
+	svsm_issue_call(call, &pending);
+
+	if (pending)
+		return -EINVAL;
+
+	switch (verify_exception_info(ghcb, &ctxt)) {
+	case ES_OK:
+		break;
+	case ES_EXCEPTION:
+		vc_forward_exception(&ctxt);
+		fallthrough;
+	default:
+		return -EINVAL;
+	}
+
+	return svsm_process_result_codes(call);
+}
+
+int svsm_perform_call_protocol(struct svsm_call *call)
+{
+	struct ghcb_state state;
+	unsigned long flags;
+	struct ghcb *ghcb;
+	int ret;
+
+	flags = native_local_irq_save();
+
+	if (sev_cfg.ghcbs_initialized)
+		ghcb = __sev_get_ghcb(&state);
+	else if (boot_ghcb)
+		ghcb = boot_ghcb;
+	else
+		ghcb = NULL;
+
+	do {
+		ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
+			   : __pi_svsm_perform_msr_protocol(call);
+	} while (ret == -EAGAIN);
+
+	if (sev_cfg.ghcbs_initialized)
+		__sev_put_ghcb(&state);
+
+	native_local_irq_restore(flags);
+
+	return ret;
+}
+
+static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
+					struct svsm_pvalidate_call *pc)
+{
+	struct svsm_pvalidate_entry *pe;
+
+	/* Nothing in the CA yet */
+	pc->num_entries = 0;
+	pc->cur_index   = 0;
+
+	pe = &pc->entry[0];
+
+	while (pfn < pfn_end) {
+		pe->page_size = RMP_PG_SIZE_4K;
+		pe->action    = action;
+		pe->ignore_cf = 0;
+		pe->rsvd      = 0;
+		pe->pfn       = pfn;
+
+		pe++;
+		pfn++;
+
+		pc->num_entries++;
+		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+			break;
+	}
+
+	return pfn;
+}
+
+static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
+				       struct svsm_pvalidate_call *pc)
+{
+	struct svsm_pvalidate_entry *pe;
+	struct psc_entry *e;
+
+	/* Nothing in the CA yet */
+	pc->num_entries = 0;
+	pc->cur_index   = 0;
+
+	pe = &pc->entry[0];
+	e  = &desc->entries[desc_entry];
+
+	while (desc_entry <= desc->hdr.end_entry) {
+		pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
+		pe->action    = e->operation == SNP_PAGE_STATE_PRIVATE;
+		pe->ignore_cf = 0;
+		pe->rsvd      = 0;
+		pe->pfn       = e->gfn;
+
+		pe++;
+		e++;
+
+		desc_entry++;
+		pc->num_entries++;
+		if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT)
+			break;
+	}
+
+	return desc_entry;
+}
+
+static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
+{
+	unsigned int page_size;
+	bool action;
+	u64 pfn;
+
+	pfn = pc->entry[pc->cur_index].pfn;
+	action = pc->entry[pc->cur_index].action;
+	page_size = pc->entry[pc->cur_index].page_size;
+
+	__pval_terminate(pfn, action, page_size, ret, svsm_ret);
+}
+
+void svsm_pval_pages(struct snp_psc_desc *desc)
+{
+	struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY];
+	unsigned int i, pv_4k_count = 0;
+	struct svsm_pvalidate_call *pc;
+	struct svsm_call call = {};
+	unsigned long flags;
+	bool action;
+	u64 pc_pa;
+	int ret;
+
+	/*
+	 * This can be called very early in the boot, use native functions in
+	 * order to avoid paravirt issues.
+	 */
+	flags = native_local_irq_save();
+
+	/*
+	 * The SVSM calling area (CA) can support processing 510 entries at a
+	 * time. Loop through the Page State Change descriptor until the CA is
+	 * full or the last entry in the descriptor is reached, at which time
+	 * the SVSM is invoked. This repeats until all entries in the descriptor
+	 * are processed.
+	 */
+	call.caa = svsm_get_caa();
+
+	pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer;
+	pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+
+	/* Protocol 0, Call ID 1 */
+	call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE);
+	call.rcx = pc_pa;
+
+	for (i = 0; i <= desc->hdr.end_entry;) {
+		i = svsm_build_ca_from_psc_desc(desc, i, pc);
+
+		do {
+			ret = svsm_perform_call_protocol(&call);
+			if (!ret)
+				continue;
+
+			/*
+			 * Check if the entry failed because of an RMP mismatch (a
+			 * PVALIDATE at 2M was requested, but the page is mapped in
+			 * the RMP as 4K).
+			 */
+
+			if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
+			    pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
+				/* Save this entry for post-processing at 4K */
+				pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
+
+				/* Skip to the next one unless at the end of the list */
+				pc->cur_index++;
+				if (pc->cur_index < pc->num_entries)
+					ret = -EAGAIN;
+				else
+					ret = 0;
+			}
+		} while (ret == -EAGAIN);
+
+		if (ret)
+			svsm_pval_terminate(pc, ret, call.rax_out);
+	}
+
+	/* Process any entries that failed to be validated at 2M and validate them at 4K */
+	for (i = 0; i < pv_4k_count; i++) {
+		u64 pfn, pfn_end;
+
+		action  = pv_4k[i].action;
+		pfn     = pv_4k[i].pfn;
+		pfn_end = pfn + 512;
+
+		while (pfn < pfn_end) {
+			pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
+
+			ret = svsm_perform_call_protocol(&call);
+			if (ret)
+				svsm_pval_terminate(pc, ret, call.rax_out);
+		}
+	}
+
+	native_local_irq_restore(flags);
+}
+
+static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
+{
+	/* If (new) lengths have been returned, propagate them up */
+	if (call->rcx_out != call->rcx)
+		input->manifest_buf.len = call->rcx_out;
+
+	if (call->rdx_out != call->rdx)
+		input->certificates_buf.len = call->rdx_out;
+
+	if (call->r8_out != call->r8)
+		input->report_buf.len = call->r8_out;
+}
+
+int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
+			      struct svsm_attest_call *input)
+{
+	struct svsm_attest_call *ac;
+	unsigned long flags;
+	u64 attest_call_pa;
+	int ret;
+
+	if (!snp_vmpl)
+		return -EINVAL;
+
+	local_irq_save(flags);
+
+	call->caa = svsm_get_caa();
+
+	ac = (struct svsm_attest_call *)call->caa->svsm_buffer;
+	attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer);
+
+	*ac = *input;
+
+	/*
+	 * Set input registers for the request and set RDX and R8 to known
+	 * values in order to detect length values being returned in them.
+	 */
+	call->rax = call_id;
+	call->rcx = attest_call_pa;
+	call->rdx = -1;
+	call->r8 = -1;
+	ret = svsm_perform_call_protocol(call);
+	update_attest_input(call, input);
+
+	local_irq_restore(flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
+
+/**
+ * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM
+ * @buffer: A buffer used to both send the command and receive the response.
+ *
+ * Execute a SVSM_VTPM_CMD call as defined by
+ * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00
+ *
+ * All command request/response buffers have a common structure as specified by
+ * the following table:
+ *     Byte      Size       In/Out    Description
+ *     Offset    (Bytes)
+ *     0x000     4          In        Platform command
+ *                          Out       Platform command response size
+ *
+ * Each command can build upon this common request/response structure to create
+ * a structure specific to the command. See include/linux/tpm_svsm.h for more
+ * details.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int snp_svsm_vtpm_send_command(u8 *buffer)
+{
+	struct svsm_call call = {};
+
+	call.caa = svsm_get_caa();
+	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD);
+	call.rcx = __pa(buffer);
+
+	return svsm_perform_call_protocol(&call);
+}
+EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command);
+
+/**
+ * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device
+ *
+ * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND
+ * which is the only request used so far.
+ *
+ * Return: true if the platform provides a vTPM SVSM device, false otherwise.
+ */
+bool snp_svsm_vtpm_probe(void)
+{
+	struct svsm_call call = {};
+
+	/* The vTPM device is available only if a SVSM is present */
+	if (!snp_vmpl)
+		return false;
+
+	call.caa = svsm_get_caa();
+	call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY);
+
+	if (svsm_perform_call_protocol(&call))
+		return false;
+
+	/* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */
+	return call.rcx_out & BIT_ULL(8);
+}
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] x86/sev: Move the internal header
  2025-12-04 12:48 ` [PATCH 1/3] x86/sev: Move the internal header Borislav Petkov
@ 2025-12-04 14:18   ` Tom Lendacky
  2025-12-04 14:57     ` Borislav Petkov
  0 siblings, 1 reply; 8+ messages in thread
From: Tom Lendacky @ 2025-12-04 14:18 UTC (permalink / raw)
  To: Borislav Petkov; +Cc: linux-coco, X86 ML, LKML, Borislav Petkov (AMD)

On 12/4/25 06:48, Borislav Petkov wrote:
> From: "Borislav Petkov (AMD)" <bp@alien8.de>
> 
> Move the internal header out of the usual include/asm/ include path
> because having an "internal" header there doesn't really make it so
> - quite the opposite.
> 
> So move where it belongs and make it really internal.
> 
> No functional changes.
> 
> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
> ---
>  arch/x86/boot/startup/sev-startup.c                          | 3 ++-
>  arch/x86/coco/sev/core.c                                     | 3 ++-
>  arch/x86/{include/asm/sev-internal.h => coco/sev/internal.h} | 0
>  arch/x86/coco/sev/noinstr.c                                  | 3 ++-
>  arch/x86/coco/sev/vc-handle.c                                | 3 ++-
>  5 files changed, 8 insertions(+), 4 deletions(-)
>  rename arch/x86/{include/asm/sev-internal.h => coco/sev/internal.h} (100%)
> 
> diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c
> index 09725428d3e6..1115214429fd 100644
> --- a/arch/x86/boot/startup/sev-startup.c
> +++ b/arch/x86/boot/startup/sev-startup.c
> @@ -27,7 +27,6 @@
>  #include <asm/cpu_entry_area.h>
>  #include <asm/stacktrace.h>
>  #include <asm/sev.h>
> -#include <asm/sev-internal.h>
>  #include <asm/insn-eval.h>
>  #include <asm/fpu/xcr.h>
>  #include <asm/processor.h>
> @@ -41,6 +40,8 @@
>  #include <asm/cpuid/api.h>
>  #include <asm/cmdline.h>
>  
> +#include "../coco/sev/internal.h"

Shouldn't this be "../../coco/sev/internal.h" ?

What is strange is that it works with either.

Thanks,
Tom

> +
>  /* Include code shared with pre-decompression boot stage */
>  #include "sev-shared.c"
>  
> diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
> index 9ae3b11754e6..4e618e596267 100644
> --- a/arch/x86/coco/sev/core.c
> +++ b/arch/x86/coco/sev/core.c
> @@ -31,7 +31,6 @@
>  #include <asm/cpu_entry_area.h>
>  #include <asm/stacktrace.h>
>  #include <asm/sev.h>
> -#include <asm/sev-internal.h>
>  #include <asm/insn-eval.h>
>  #include <asm/fpu/xcr.h>
>  #include <asm/processor.h>
> @@ -46,6 +45,8 @@
>  #include <asm/cmdline.h>
>  #include <asm/msr.h>
>  
> +#include "internal.h"
> +
>  /* Bitmap of SEV features supported by the hypervisor */
>  u64 sev_hv_features __ro_after_init;
>  SYM_PIC_ALIAS(sev_hv_features);
> diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/coco/sev/internal.h
> similarity index 100%
> rename from arch/x86/include/asm/sev-internal.h
> rename to arch/x86/coco/sev/internal.h
> diff --git a/arch/x86/coco/sev/noinstr.c b/arch/x86/coco/sev/noinstr.c
> index b527eafb6312..9d94aca4a698 100644
> --- a/arch/x86/coco/sev/noinstr.c
> +++ b/arch/x86/coco/sev/noinstr.c
> @@ -16,7 +16,8 @@
>  #include <asm/msr.h>
>  #include <asm/ptrace.h>
>  #include <asm/sev.h>
> -#include <asm/sev-internal.h>
> +
> +#include "internal.h"
>  
>  static __always_inline bool on_vc_stack(struct pt_regs *regs)
>  {
> diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c
> index f08c7505ed82..43f264afd590 100644
> --- a/arch/x86/coco/sev/vc-handle.c
> +++ b/arch/x86/coco/sev/vc-handle.c
> @@ -23,7 +23,6 @@
>  #include <asm/init.h>
>  #include <asm/stacktrace.h>
>  #include <asm/sev.h>
> -#include <asm/sev-internal.h>
>  #include <asm/insn-eval.h>
>  #include <asm/fpu/xcr.h>
>  #include <asm/processor.h>
> @@ -35,6 +34,8 @@
>  #include <asm/apic.h>
>  #include <asm/cpuid/api.h>
>  
> +#include "internal.h"
> +
>  static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
>  					   unsigned long vaddr, phys_addr_t *paddr)
>  {


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/3] x86/sev: Carve out the SVSM support code
  2025-12-04 12:48 [PATCH 0/3] x86/sev: Carve out the SVSM support code Borislav Petkov
                   ` (2 preceding siblings ...)
  2025-12-04 12:48 ` [PATCH 3/3] x86/sev: Carve out the SVSM code into a separate compilation unit Borislav Petkov
@ 2025-12-04 14:53 ` Tom Lendacky
  2025-12-04 15:06   ` Borislav Petkov
  3 siblings, 1 reply; 8+ messages in thread
From: Tom Lendacky @ 2025-12-04 14:53 UTC (permalink / raw)
  To: Borislav Petkov; +Cc: linux-coco, X86 ML, LKML, Borislav Petkov (AMD)

On 12/4/25 06:48, Borislav Petkov wrote:
> From: "Borislav Petkov (AMD)" <bp@alien8.de>
> 
> Hi,
> 
> I've been meaning to do this for a while now but didn't have a good idea how
> to do it nicely. Using the internal header makes it almost trivial.

Quick testing doesn't show any issues.

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
> 
> Thx.
> 
> Borislav Petkov (AMD) (3):
>   x86/sev: Move the internal header
>   x86/sev: Add internal header guards
>   x86/sev: Carve out the SVSM code into a separate compilation unit
> 
>  arch/x86/boot/startup/sev-startup.c           |   3 +-
>  arch/x86/coco/sev/Makefile                    |   2 +-
>  arch/x86/coco/sev/core.c                      | 380 +-----------------
>  .../sev-internal.h => coco/sev/internal.h}    |  32 ++
>  arch/x86/coco/sev/noinstr.c                   |   3 +-
>  arch/x86/coco/sev/svsm.c                      | 362 +++++++++++++++++
>  arch/x86/coco/sev/vc-handle.c                 |   3 +-
>  7 files changed, 403 insertions(+), 382 deletions(-)
>  rename arch/x86/{include/asm/sev-internal.h => coco/sev/internal.h} (75%)
>  create mode 100644 arch/x86/coco/sev/svsm.c
> 


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] x86/sev: Move the internal header
  2025-12-04 14:18   ` Tom Lendacky
@ 2025-12-04 14:57     ` Borislav Petkov
  0 siblings, 0 replies; 8+ messages in thread
From: Borislav Petkov @ 2025-12-04 14:57 UTC (permalink / raw)
  To: Tom Lendacky; +Cc: Borislav Petkov, linux-coco, X86 ML, LKML

On Thu, Dec 04, 2025 at 08:18:42AM -0600, Tom Lendacky wrote:
> > +#include "../coco/sev/internal.h"
> 
> Shouldn't this be "../../coco/sev/internal.h" ?
> 
> What is strange is that it works with either.

Pure luck :-\

gcc cmdline is:

gcc -Wp,-MMD,arch/x86/boot/startup/.sev-startup.o.d -nostdinc
-I./arch/x86/include...

so the include path becomes:

# 1 "./arch/x86/include/../coco/sev/internal.h" 1

which is

$ readlink -f  arch/x86/include/../coco/sev/internal.h 
/mnt/kernel/kernel/linux/arch/x86/coco/sev/internal.h

And /mnt/kernel is my kernel-sources-containing SSD :-)

In any case, good catch!

I'll do the "../../" so that it is perfectly clear and future
potential include directives changes do not break this.

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/3] x86/sev: Carve out the SVSM support code
  2025-12-04 14:53 ` [PATCH 0/3] x86/sev: Carve out the SVSM support code Tom Lendacky
@ 2025-12-04 15:06   ` Borislav Petkov
  0 siblings, 0 replies; 8+ messages in thread
From: Borislav Petkov @ 2025-12-04 15:06 UTC (permalink / raw)
  To: Tom Lendacky; +Cc: Borislav Petkov, linux-coco, X86 ML, LKML

On Thu, Dec 04, 2025 at 08:53:01AM -0600, Tom Lendacky wrote:
> Quick testing doesn't show any issues.
> 
> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>

Much appreciated, thanks!

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2025-12-04 15:06 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-04 12:48 [PATCH 0/3] x86/sev: Carve out the SVSM support code Borislav Petkov
2025-12-04 12:48 ` [PATCH 1/3] x86/sev: Move the internal header Borislav Petkov
2025-12-04 14:18   ` Tom Lendacky
2025-12-04 14:57     ` Borislav Petkov
2025-12-04 12:48 ` [PATCH 2/3] x86/sev: Add internal header guards Borislav Petkov
2025-12-04 12:48 ` [PATCH 3/3] x86/sev: Carve out the SVSM code into a separate compilation unit Borislav Petkov
2025-12-04 14:53 ` [PATCH 0/3] x86/sev: Carve out the SVSM support code Tom Lendacky
2025-12-04 15:06   ` Borislav Petkov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox