From: Yan Zhao <yan.y.zhao@intel.com>
To: seanjc@google.com, pbonzini@redhat.com, dave.hansen@linux.intel.com
Cc: tglx@kernel.org, mingo@redhat.com, bp@alien8.de, kas@kernel.org,
x86@kernel.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, linux-coco@lists.linux.dev,
kai.huang@intel.com, rick.p.edgecombe@intel.com,
yan.y.zhao@intel.com, yilun.xu@linux.intel.com,
vannapurve@google.com, ackerleytng@google.com, sagis@google.com,
binbin.wu@linux.intel.com, xiaoyao.li@intel.com,
isaku.yamahata@intel.com
Subject: [PATCH 2/2] x86/virt/tdx: Use PFN directly for unmapping guest private memory
Date: Thu, 19 Mar 2026 08:58:08 +0800 [thread overview]
Message-ID: <20260319005808.9013-1-yan.y.zhao@intel.com> (raw)
In-Reply-To: <20260319005605.8965-1-yan.y.zhao@intel.com>
From: Sean Christopherson <seanjc@google.com>
Remove the completely unnecessary assumptions that memory unmapped from a
TDX guest is backed by refcounted struct page memory.
APIs tdh_phymem_page_wbinvd_hkid(), tdx_quirk_reset_page() are used when
unmapping guest private memory from S-EPT. Since mapping of guest private
memory places no requirements on how KVM and guest_memfd manage memory,
neither does guest private memory unmapping.
Rip out the misguided struct page assumptions/constraints by having the two
APIs take PFN directly. This ensures that for future huge page support in
S-EPT, the kernel doesn't pick up even worse assumptions like "a hugepage
must be contained in a single folio".
Use "kvm_pfn_t pfn" for type safety. Using this KVM type is appropriate
since APIs tdh_phymem_page_wbinvd_hkid() and tdx_quirk_reset_page() are
exported to KVM only.
Update mk_keyed_paddr(), which is invoked by tdh_phymem_page_wbinvd_hkid(),
to take PFN as parameter accordingly. Opportunistically, move
mk_keyed_paddr() from tdx.h to tdx.c since there are no external users.
Have tdx_reclaim_page() remain using struct page as parameter since it's
currently not used for removing guest private memory yet.
[Yan: Use kvm_pfn_t, drop reclaim API param update, move mk_keyed_paddr()]
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
---
arch/x86/include/asm/tdx.h | 15 ++-------------
arch/x86/kvm/vmx/tdx.c | 10 +++++-----
arch/x86/virt/vmx/tdx/tdx.c | 16 +++++++++++-----
3 files changed, 18 insertions(+), 23 deletions(-)
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index f3f0b1872176..6ceb4cd9ff21 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -153,7 +153,7 @@ int tdx_guest_keyid_alloc(void);
u32 tdx_get_nr_guest_keyids(void);
void tdx_guest_keyid_free(unsigned int keyid);
-void tdx_quirk_reset_page(struct page *page);
+void tdx_quirk_reset_page(kvm_pfn_t pfn);
struct tdx_td {
/* TD root structure: */
@@ -177,17 +177,6 @@ struct tdx_vp {
struct page **tdcx_pages;
};
-static inline u64 mk_keyed_paddr(u16 hkid, struct page *page)
-{
- u64 ret;
-
- ret = page_to_phys(page);
- /* KeyID bits are just above the physical address bits: */
- ret |= (u64)hkid << boot_cpu_data.x86_phys_bits;
-
- return ret;
-}
-
static inline int pg_level_to_tdx_sept_level(enum pg_level level)
{
WARN_ON_ONCE(level == PG_LEVEL_NONE);
@@ -219,7 +208,7 @@ u64 tdh_mem_track(struct tdx_td *tdr);
u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2);
u64 tdh_phymem_cache_wb(bool resume);
u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td);
-u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page);
+u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, kvm_pfn_t pfn);
#else
static inline void tdx_init(void) { }
static inline u32 tdx_get_nr_guest_keyids(void) { return 0; }
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 1f1abc5b5655..75ad3debcd84 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -343,7 +343,7 @@ static int tdx_reclaim_page(struct page *page)
r = __tdx_reclaim_page(page);
if (!r)
- tdx_quirk_reset_page(page);
+ tdx_quirk_reset_page(page_to_pfn(page));
return r;
}
@@ -597,7 +597,7 @@ static void tdx_reclaim_td_control_pages(struct kvm *kvm)
if (TDX_BUG_ON(err, TDH_PHYMEM_PAGE_WBINVD, kvm))
return;
- tdx_quirk_reset_page(kvm_tdx->td.tdr_page);
+ tdx_quirk_reset_page(page_to_pfn(kvm_tdx->td.tdr_page));
__free_page(kvm_tdx->td.tdr_page);
kvm_tdx->td.tdr_page = NULL;
@@ -1776,9 +1776,9 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
enum pg_level level, u64 mirror_spte)
{
- struct page *page = pfn_to_page(spte_to_pfn(mirror_spte));
int tdx_level = pg_level_to_tdx_sept_level(level);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ kvm_pfn_t pfn = spte_to_pfn(mirror_spte);
gpa_t gpa = gfn_to_gpa(gfn);
u64 err, entry, level_state;
@@ -1817,11 +1817,11 @@ static void tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
if (TDX_BUG_ON_2(err, TDH_MEM_PAGE_REMOVE, entry, level_state, kvm))
return;
- err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
+ err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, pfn);
if (TDX_BUG_ON(err, TDH_PHYMEM_PAGE_WBINVD, kvm))
return;
- tdx_quirk_reset_page(page);
+ tdx_quirk_reset_page(pfn);
}
void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index a9dd75190c67..2f9d07ad1a9a 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -730,9 +730,9 @@ static void tdx_quirk_reset_paddr(unsigned long base, unsigned long size)
mb();
}
-void tdx_quirk_reset_page(struct page *page)
+void tdx_quirk_reset_page(kvm_pfn_t pfn)
{
- tdx_quirk_reset_paddr(page_to_phys(page), PAGE_SIZE);
+ tdx_quirk_reset_paddr(PFN_PHYS(pfn), PAGE_SIZE);
}
EXPORT_SYMBOL_FOR_KVM(tdx_quirk_reset_page);
@@ -1907,21 +1907,27 @@ u64 tdh_phymem_cache_wb(bool resume)
}
EXPORT_SYMBOL_FOR_KVM(tdh_phymem_cache_wb);
+static inline u64 mk_keyed_paddr(u16 hkid, kvm_pfn_t pfn)
+{
+ /* KeyID bits are just above the physical address bits. */
+ return PFN_PHYS(pfn) | ((u64)hkid << boot_cpu_data.x86_phys_bits);
+}
+
u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td)
{
struct tdx_module_args args = {};
- args.rcx = mk_keyed_paddr(tdx_global_keyid, td->tdr_page);
+ args.rcx = mk_keyed_paddr(tdx_global_keyid, page_to_pfn(td->tdr_page));
return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
}
EXPORT_SYMBOL_FOR_KVM(tdh_phymem_page_wbinvd_tdr);
-u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
+u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, kvm_pfn_t pfn)
{
struct tdx_module_args args = {};
- args.rcx = mk_keyed_paddr(hkid, page);
+ args.rcx = mk_keyed_paddr(hkid, pfn);
return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
}
--
2.43.2
next prev parent reply other threads:[~2026-03-19 1:37 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-19 0:56 [PATCH 0/2] struct page to PFN conversion for TDX guest private memory Yan Zhao
2026-03-19 0:57 ` [PATCH 1/2] x86/virt/tdx: Use PFN directly for mapping " Yan Zhao
2026-03-19 10:39 ` Kiryl Shutsemau
2026-03-19 11:59 ` Yan Zhao
2026-03-19 12:14 ` Yan Zhao
2026-03-19 12:57 ` Kiryl Shutsemau
2026-03-19 17:27 ` Edgecombe, Rick P
2026-03-20 12:59 ` Kiryl Shutsemau
2026-03-20 17:31 ` Edgecombe, Rick P
2026-03-20 17:38 ` Dave Hansen
2026-03-20 17:48 ` Edgecombe, Rick P
2026-03-19 18:05 ` Dave Hansen
2026-03-25 9:10 ` Yan Zhao
2026-03-25 16:57 ` Edgecombe, Rick P
2026-03-27 7:03 ` Yan Zhao
2026-03-19 0:58 ` Yan Zhao [this message]
2026-03-19 3:20 ` [PATCH 2/2] x86/virt/tdx: Use PFN directly for unmapping " Xiaoyao Li
2026-03-19 6:45 ` Yan Zhao
2026-03-19 8:56 ` Xiaoyao Li
2026-03-19 8:56 ` Yan Zhao
2026-03-19 18:44 ` Edgecombe, Rick P
2026-03-19 10:48 ` Kiryl Shutsemau
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260319005808.9013-1-yan.y.zhao@intel.com \
--to=yan.y.zhao@intel.com \
--cc=ackerleytng@google.com \
--cc=binbin.wu@linux.intel.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=isaku.yamahata@intel.com \
--cc=kai.huang@intel.com \
--cc=kas@kernel.org \
--cc=kvm@vger.kernel.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=rick.p.edgecombe@intel.com \
--cc=sagis@google.com \
--cc=seanjc@google.com \
--cc=tglx@kernel.org \
--cc=vannapurve@google.com \
--cc=x86@kernel.org \
--cc=xiaoyao.li@intel.com \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox