From: Yan Zhao <yan.y.zhao@intel.com>
To: dave.hansen@linux.intel.com, pbonzini@redhat.com, seanjc@google.com
Cc: tglx@kernel.org, mingo@redhat.com, bp@alien8.de, kas@kernel.org,
x86@kernel.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, linux-coco@lists.linux.dev,
kai.huang@intel.com, rick.p.edgecombe@intel.com,
yan.y.zhao@intel.com, yilun.xu@linux.intel.com,
vannapurve@google.com, ackerleytng@google.com, sagis@google.com,
binbin.wu@linux.intel.com, xiaoyao.li@intel.com,
isaku.yamahata@intel.com
Subject: [PATCH v2 1/4] x86/tdx: Use PFN directly for mapping guest private memory
Date: Thu, 30 Apr 2026 09:49:29 +0800 [thread overview]
Message-ID: <20260430014929.24210-1-yan.y.zhao@intel.com> (raw)
In-Reply-To: <20260430014852.24183-1-yan.y.zhao@intel.com>
From: Sean Christopherson <seanjc@google.com>
Remove struct page assumptions/constraints in the SEAMCALL wrapper APIs for
mapping guest private memory and have them take PFN directly.
Having core TDX make assumptions that guest private memory must be backed
by struct page (and/or folio) will create subtle dependencies on how
KVM/guest_memfd allocates/manages memory (e.g., whether it uses memory
allocated from core MM, if the memory is refcounted, or if the folio is
split) that are easily avoided. [1].
KVM's MMUs work with PFNs. This is very much an intentional design choice.
It ensures that the KVM MMUs remain flexible and are not too tied to the
regular CPU MMUs and the kernel code around them. Using 'struct page' for
TDX guest memory is not a good fit anywhere near the KVM MMU code [2].
Use "kvm_pfn_t pfn" for type safety. Using this KVM type is appropriate
since APIs tdh_mem_page_add() and tdh_mem_page_aug() are exported to KVM
only.
[ Yan: Replace "u64 pfn" with "kvm_pfn_t pfn" ]
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
Link: https://lore.kernel.org/all/aWgyhmTJphGQqO0Y@google.com [1]
Link: https://lore.kernel.org/all/ac7V0g2q2hN3dU5u@google.com [2]
---
arch/x86/include/asm/tdx.h | 6 ++++--
arch/x86/kvm/vmx/tdx.c | 7 +++----
arch/x86/virt/vmx/tdx/tdx.c | 19 ++++++++++++-------
3 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 0cb77ed4adc5..619aed134c83 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/bits.h>
#include <linux/mmzone.h>
+#include <linux/kvm_types.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -189,11 +190,12 @@ static inline u64 mk_keyed_paddr(u16 hkid, struct page *page)
u64 tdh_vp_enter(struct tdx_vp *vp, struct tdx_module_args *args);
u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page);
-u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, kvm_pfn_t pfn, struct page *source,
+ u64 *ext_err1, u64 *ext_err2);
u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, enum pg_level level, struct page *page,
u64 *ext_err1, u64 *ext_err2);
u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page);
-u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, enum pg_level level, struct page *page,
+u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, enum pg_level level, kvm_pfn_t pfn,
u64 *ext_err1, u64 *ext_err2);
u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, enum pg_level level, u64 *ext_err1,
u64 *ext_err2);
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 77aea8920a4a..9b47dd257ff4 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1624,8 +1624,8 @@ static int tdx_mem_page_add(struct kvm *kvm, gfn_t gfn, enum pg_level level,
KVM_BUG_ON(!kvm_tdx->page_add_src, kvm))
return -EIO;
- err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn_to_page(pfn),
- kvm_tdx->page_add_src, &entry, &level_state);
+ err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn, kvm_tdx->page_add_src,
+ &entry, &level_state);
if (unlikely(tdx_operand_busy(err)))
return -EBUSY;
@@ -1639,12 +1639,11 @@ static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
enum pg_level level, kvm_pfn_t pfn)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
- struct page *page = pfn_to_page(pfn);
gpa_t gpa = gfn_to_gpa(gfn);
u64 entry, level_state;
u64 err;
- err = tdh_mem_page_aug(&kvm_tdx->td, gpa, level, page, &entry, &level_state);
+ err = tdh_mem_page_aug(&kvm_tdx->td, gpa, level, pfn, &entry, &level_state);
if (unlikely(tdx_operand_busy(err)))
return -EBUSY;
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index a6e77afafa79..b24b81cea5ea 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -30,7 +30,6 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/idr.h>
-#include <linux/kvm_types.h>
#include <asm/page.h>
#include <asm/special_insns.h>
#include <asm/msr-index.h>
@@ -1568,6 +1567,11 @@ static void tdx_clflush_page(struct page *page)
clflush_cache_range(page_to_virt(page), PAGE_SIZE);
}
+static void tdx_clflush_pfn(kvm_pfn_t pfn)
+{
+ clflush_cache_range(__va(PFN_PHYS(pfn)), PAGE_SIZE);
+}
+
static int pg_level_to_tdx_sept_level(enum pg_level level)
{
WARN_ON_ONCE(level == PG_LEVEL_NONE);
@@ -1594,17 +1598,18 @@ u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page)
}
EXPORT_SYMBOL_FOR_KVM(tdh_mng_addcx);
-u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2)
+u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, kvm_pfn_t pfn, struct page *source,
+ u64 *ext_err1, u64 *ext_err2)
{
struct tdx_module_args args = {
.rcx = gpa,
.rdx = tdx_tdr_pa(td),
- .r8 = page_to_phys(page),
+ .r8 = PFN_PHYS(pfn),
.r9 = page_to_phys(source),
};
u64 ret;
- tdx_clflush_page(page);
+ tdx_clflush_pfn(pfn);
ret = seamcall_ret(TDH_MEM_PAGE_ADD, &args);
*ext_err1 = args.rcx;
@@ -1647,16 +1652,16 @@ u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page)
EXPORT_SYMBOL_FOR_KVM(tdh_vp_addcx);
u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, enum pg_level level,
- struct page *page, u64 *ext_err1, u64 *ext_err2)
+ kvm_pfn_t pfn, u64 *ext_err1, u64 *ext_err2)
{
struct tdx_module_args args = {
.rcx = gpa | pg_level_to_tdx_sept_level(level),
.rdx = tdx_tdr_pa(td),
- .r8 = page_to_phys(page),
+ .r8 = PFN_PHYS(pfn),
};
u64 ret;
- tdx_clflush_page(page);
+ tdx_clflush_pfn(pfn);
ret = seamcall_ret(TDH_MEM_PAGE_AUG, &args);
*ext_err1 = args.rcx;
--
2.43.2
next prev parent reply other threads:[~2026-04-30 2:29 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-30 1:48 [PATCH v2 0/4] struct page to PFN conversion for TDX guest private memory Yan Zhao
2026-04-30 1:49 ` Yan Zhao [this message]
2026-04-30 17:43 ` [PATCH v2 1/4] x86/tdx: Use PFN directly for mapping " Ackerley Tng
2026-04-30 1:49 ` [PATCH v2 2/4] x86/tdx: Use PFN directly for unmapping " Yan Zhao
2026-04-30 18:17 ` Ackerley Tng
2026-04-30 18:38 ` Edgecombe, Rick P
2026-04-30 19:20 ` Sean Christopherson
2026-04-30 19:37 ` Dave Hansen
2026-05-01 0:57 ` Ackerley Tng
2026-05-01 0:58 ` Ackerley Tng
2026-04-30 1:50 ` [PATCH v2 3/4] x86/tdx: Drop exported function tdx_quirk_reset_page() Yan Zhao
2026-04-30 18:29 ` Ackerley Tng
2026-04-30 1:50 ` [PATCH v2 4/4] x86/virt/tdx: Move mk_keyed_paddr() to tdx.c due to no external users Yan Zhao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260430014929.24210-1-yan.y.zhao@intel.com \
--to=yan.y.zhao@intel.com \
--cc=ackerleytng@google.com \
--cc=binbin.wu@linux.intel.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=isaku.yamahata@intel.com \
--cc=kai.huang@intel.com \
--cc=kas@kernel.org \
--cc=kvm@vger.kernel.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=rick.p.edgecombe@intel.com \
--cc=sagis@google.com \
--cc=seanjc@google.com \
--cc=tglx@kernel.org \
--cc=vannapurve@google.com \
--cc=x86@kernel.org \
--cc=xiaoyao.li@intel.com \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox