kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sagi Shahar <sagis@google.com>
To: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: pbonzini@redhat.com, seanjc@google.com,
	dave.hansen@linux.intel.com,  rick.p.edgecombe@intel.com,
	isaku.yamahata@intel.com, kai.huang@intel.com,
	 yan.y.zhao@intel.com, chao.gao@intel.com, tglx@linutronix.de,
	 mingo@redhat.com, bp@alien8.de, kvm@vger.kernel.org,
	x86@kernel.org,  linux-coco@lists.linux.dev,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCHv2 08/12] KVM: TDX: Handle PAMT allocation in fault path
Date: Thu, 21 Aug 2025 14:21:47 -0500	[thread overview]
Message-ID: <CAAhR5DGGWss4jovHETYmBeK1gze04LR9c8Dcd2oMpCC3SnMDgQ@mail.gmail.com> (raw)
In-Reply-To: <20250609191340.2051741-9-kirill.shutemov@linux.intel.com>

On Mon, Jun 9, 2025 at 2:16 PM Kirill A. Shutemov
<kirill.shutemov@linux.intel.com> wrote:
>
> There are two distinct cases when the kernel needs to allocate PAMT
> memory in the fault path: for SEPT page tables in tdx_sept_link_private_spt()
> and for leaf pages in tdx_sept_set_private_spte().
>
> These code paths run in atomic context. Use a pre-allocated per-VCPU
> pool for memory allocations.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> ---
>  arch/x86/include/asm/tdx.h  |  4 ++++
>  arch/x86/kvm/vmx/tdx.c      | 40 ++++++++++++++++++++++++++++++++-----
>  arch/x86/virt/vmx/tdx/tdx.c | 21 +++++++++++++------
>  virt/kvm/kvm_main.c         |  1 +
>  4 files changed, 55 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
> index 47092eb13eb3..39f8dd7e0f06 100644
> --- a/arch/x86/include/asm/tdx.h
> +++ b/arch/x86/include/asm/tdx.h
> @@ -116,6 +116,10 @@ u32 tdx_get_nr_guest_keyids(void);
>  void tdx_guest_keyid_free(unsigned int keyid);
>
>  int tdx_nr_pamt_pages(void);
> +int tdx_pamt_get(struct page *page, enum pg_level level,
> +                struct page *(alloc)(void *data), void *data);
> +void tdx_pamt_put(struct page *page, enum pg_level level);
> +
>  struct page *tdx_alloc_page(void);
>  void tdx_free_page(struct page *page);
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 36c3c9f8a62c..bc9bc393f866 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -1537,11 +1537,26 @@ static int tdx_mem_page_record_premap_cnt(struct kvm *kvm, gfn_t gfn,
>         return 0;
>  }
>
> +static struct page *tdx_alloc_pamt_page_atomic(void *data)
> +{
> +       struct kvm_vcpu *vcpu = data;
> +       void *p;
> +
> +       p = kvm_mmu_memory_cache_alloc(&vcpu->arch.pamt_page_cache);
> +       return virt_to_page(p);
> +}
> +
>  int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
>                               enum pg_level level, kvm_pfn_t pfn)
>  {
> +       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
>         struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
>         struct page *page = pfn_to_page(pfn);
> +       int ret;
> +
> +       ret = tdx_pamt_get(page, level, tdx_alloc_pamt_page_atomic, vcpu);
> +       if (ret)
> +               return ret;

tdx_pamt_get() can return non-zero value in case of success e.g.
returning 1 in case tdx_pamt_add() lost the race. Shouldn't we check
for (ret < 0) here and below cases?

>
>         /* TODO: handle large pages. */
>         if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
> @@ -1562,10 +1577,16 @@ int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
>          * barrier in tdx_td_finalize().
>          */
>         smp_rmb();
> -       if (likely(kvm_tdx->state == TD_STATE_RUNNABLE))
> -               return tdx_mem_page_aug(kvm, gfn, level, page);
>
> -       return tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
> +       if (likely(kvm_tdx->state == TD_STATE_RUNNABLE))
> +               ret = tdx_mem_page_aug(kvm, gfn, level, page);
> +       else
> +               ret = tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
> +
> +       if (ret)
> +               tdx_pamt_put(page, level);
> +
> +       return ret;
>  }
>
>  static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
> @@ -1622,17 +1643,26 @@ int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
>                               enum pg_level level, void *private_spt)
>  {
>         int tdx_level = pg_level_to_tdx_sept_level(level);
> -       gpa_t gpa = gfn_to_gpa(gfn);
> +       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
>         struct page *page = virt_to_page(private_spt);
> +       gpa_t gpa = gfn_to_gpa(gfn);
>         u64 err, entry, level_state;
> +       int ret;
> +
> +       ret = tdx_pamt_get(page, PG_LEVEL_4K, tdx_alloc_pamt_page_atomic, vcpu);
> +       if (ret)
> +               return ret;
>
>         err = tdh_mem_sept_add(&to_kvm_tdx(kvm)->td, gpa, tdx_level, page, &entry,
>                                &level_state);
> -       if (unlikely(tdx_operand_busy(err)))
> +       if (unlikely(tdx_operand_busy(err))) {
> +               tdx_pamt_put(page, PG_LEVEL_4K);
>                 return -EBUSY;
> +       }
>
>         if (KVM_BUG_ON(err, kvm)) {
>                 pr_tdx_error_2(TDH_MEM_SEPT_ADD, err, entry, level_state);
> +               tdx_pamt_put(page, PG_LEVEL_4K);
>                 return -EIO;
>         }
>
> diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
> index 4f9eaba4af4a..d4b50b6428fa 100644
> --- a/arch/x86/virt/vmx/tdx/tdx.c
> +++ b/arch/x86/virt/vmx/tdx/tdx.c
> @@ -2067,10 +2067,16 @@ static void tdx_free_pamt_pages(struct list_head *pamt_pages)
>         }
>  }
>
> -static int tdx_alloc_pamt_pages(struct list_head *pamt_pages)
> +static int tdx_alloc_pamt_pages(struct list_head *pamt_pages,
> +                                struct page *(alloc)(void *data), void *data)
>  {
>         for (int i = 0; i < tdx_nr_pamt_pages(); i++) {
> -               struct page *page = alloc_page(GFP_KERNEL);
> +               struct page *page;
> +
> +               if (alloc)
> +                       page = alloc(data);
> +               else
> +                       page = alloc_page(GFP_KERNEL);
>                 if (!page)
>                         goto fail;
>                 list_add(&page->lru, pamt_pages);
> @@ -2115,7 +2121,8 @@ static int tdx_pamt_add(atomic_t *pamt_refcount, unsigned long hpa,
>         return 0;
>  }
>
> -static int tdx_pamt_get(struct page *page, enum pg_level level)
> +int tdx_pamt_get(struct page *page, enum pg_level level,
> +                struct page *(alloc)(void *data), void *data)
>  {
>         unsigned long hpa = page_to_phys(page);
>         atomic_t *pamt_refcount;
> @@ -2134,7 +2141,7 @@ static int tdx_pamt_get(struct page *page, enum pg_level level)
>         if (atomic_inc_not_zero(pamt_refcount))
>                 return 0;
>
> -       if (tdx_alloc_pamt_pages(&pamt_pages))
> +       if (tdx_alloc_pamt_pages(&pamt_pages, alloc, data))
>                 return -ENOMEM;
>
>         ret = tdx_pamt_add(pamt_refcount, hpa, &pamt_pages);
> @@ -2143,8 +2150,9 @@ static int tdx_pamt_get(struct page *page, enum pg_level level)
>
>         return ret >= 0 ? 0 : ret;
>  }
> +EXPORT_SYMBOL_GPL(tdx_pamt_get);
>
> -static void tdx_pamt_put(struct page *page, enum pg_level level)
> +void tdx_pamt_put(struct page *page, enum pg_level level)
>  {
>         unsigned long hpa = page_to_phys(page);
>         atomic_t *pamt_refcount;
> @@ -2179,6 +2187,7 @@ static void tdx_pamt_put(struct page *page, enum pg_level level)
>
>         tdx_free_pamt_pages(&pamt_pages);
>  }
> +EXPORT_SYMBOL_GPL(tdx_pamt_put);
>
>  struct page *tdx_alloc_page(void)
>  {
> @@ -2188,7 +2197,7 @@ struct page *tdx_alloc_page(void)
>         if (!page)
>                 return NULL;
>
> -       if (tdx_pamt_get(page, PG_LEVEL_4K)) {
> +       if (tdx_pamt_get(page, PG_LEVEL_4K, NULL, NULL)) {
>                 __free_page(page);
>                 return NULL;
>         }
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index eec82775c5bf..6add012532a0 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -436,6 +436,7 @@ void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
>         BUG_ON(!p);
>         return p;
>  }
> +EXPORT_SYMBOL_GPL(kvm_mmu_memory_cache_alloc);
>  #endif
>
>  static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
> --
> 2.47.2
>
>

  parent reply	other threads:[~2025-08-21 19:22 UTC|newest]

Thread overview: 90+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-09 19:13 [PATCHv2 00/12] TDX: Enable Dynamic PAMT Kirill A. Shutemov
2025-06-09 19:13 ` [PATCHv2 01/12] x86/tdx: Consolidate TDX error handling Kirill A. Shutemov
2025-06-25 17:58   ` Dave Hansen
2025-06-25 20:58     ` Edgecombe, Rick P
2025-06-25 21:27       ` Sean Christopherson
2025-06-25 21:46         ` Edgecombe, Rick P
2025-06-26  9:25         ` kirill.shutemov
2025-06-26 14:46           ` Dave Hansen
2025-06-26 15:51             ` Sean Christopherson
2025-06-26 16:59               ` Dave Hansen
2025-06-27 10:42                 ` kirill.shutemov
2025-07-30 18:32                 ` Edgecombe, Rick P
2025-07-31 23:31                   ` Sean Christopherson
2025-07-31 23:46                     ` Edgecombe, Rick P
2025-07-31 23:53                       ` Sean Christopherson
2025-08-01 15:03                         ` Edgecombe, Rick P
2025-08-06 15:19                           ` Sean Christopherson
2025-06-26  0:05     ` Huang, Kai
2025-07-30 18:33       ` Edgecombe, Rick P
2025-06-09 19:13 ` [PATCHv2 02/12] x86/virt/tdx: Allocate page bitmap for Dynamic PAMT Kirill A. Shutemov
2025-06-25 18:06   ` Dave Hansen
2025-06-26  9:25     ` Kirill A. Shutemov
2025-07-31  1:06     ` Edgecombe, Rick P
2025-07-31  4:10       ` Huang, Kai
2025-06-26 11:08   ` Huang, Kai
2025-06-27 10:42     ` kirill.shutemov
2025-06-09 19:13 ` [PATCHv2 03/12] x86/virt/tdx: Allocate reference counters for PAMT memory Kirill A. Shutemov
2025-06-25 19:26   ` Dave Hansen
2025-06-27 11:27     ` Kirill A. Shutemov
2025-06-27 14:03       ` Dave Hansen
2025-06-26  0:53   ` Huang, Kai
2025-06-26  4:48     ` Huang, Kai
2025-06-27 11:35     ` kirill.shutemov
2025-06-09 19:13 ` [PATCHv2 04/12] x86/virt/tdx: Add tdx_alloc/free_page() helpers Kirill A. Shutemov
2025-06-10  2:36   ` Chao Gao
2025-06-10 14:51     ` [PATCHv2.1 " Kirill A. Shutemov
2025-06-25 18:01       ` Dave Hansen
2025-06-25 20:09     ` [PATCHv2 " Dave Hansen
2025-06-26  0:46       ` Chao Gao
2025-06-25 20:02   ` Dave Hansen
2025-06-27 13:00     ` Kirill A. Shutemov
2025-06-27  7:49   ` Adrian Hunter
2025-06-27 13:03     ` Kirill A. Shutemov
2025-06-09 19:13 ` [PATCHv2 05/12] KVM: TDX: Allocate PAMT memory in __tdx_td_init() Kirill A. Shutemov
2025-06-09 19:13 ` [PATCHv2 06/12] KVM: TDX: Allocate PAMT memory in tdx_td_vcpu_init() Kirill A. Shutemov
2025-06-09 19:13 ` [PATCHv2 07/12] KVM: TDX: Preallocate PAMT pages to be used in page fault path Kirill A. Shutemov
2025-06-26 11:21   ` Huang, Kai
2025-07-10  1:34   ` Edgecombe, Rick P
2025-07-10  7:49     ` kirill.shutemov
2025-06-09 19:13 ` [PATCHv2 08/12] KVM: TDX: Handle PAMT allocation in " Kirill A. Shutemov
2025-06-12 12:19   ` Chao Gao
2025-06-12 13:05     ` [PATCHv2.1 " Kirill A. Shutemov
2025-06-25 22:38   ` [PATCHv2 " Edgecombe, Rick P
2025-07-09 14:29     ` kirill.shutemov
2025-07-10  1:33   ` Edgecombe, Rick P
2025-07-10  8:45     ` kirill.shutemov
2025-08-21 19:21   ` Sagi Shahar [this message]
2025-08-21 19:35     ` Edgecombe, Rick P
2025-08-21 19:53       ` Sagi Shahar
2025-06-09 19:13 ` [PATCHv2 09/12] KVM: TDX: Reclaim PAMT memory Kirill A. Shutemov
2025-06-09 19:13 ` [PATCHv2 10/12] [NOT-FOR-UPSTREAM] x86/virt/tdx: Account PAMT memory and print it in /proc/meminfo Kirill A. Shutemov
2025-06-09 19:13 ` [PATCHv2 11/12] x86/virt/tdx: Enable Dynamic PAMT Kirill A. Shutemov
2025-06-09 19:13 ` [PATCHv2 12/12] Documentation/x86: Add documentation for TDX's " Kirill A. Shutemov
2025-06-25 13:25 ` [PATCHv2 00/12] TDX: Enable " Kirill A. Shutemov
2025-06-25 22:49 ` Edgecombe, Rick P
2025-06-27 13:05   ` kirill.shutemov
2025-08-08 23:18 ` Edgecombe, Rick P
2025-08-11  6:31   ` kas
2025-08-11 22:30     ` Edgecombe, Rick P
2025-08-12  2:02       ` Sean Christopherson
2025-08-12  2:31         ` Vishal Annapurve
2025-08-12  8:04           ` kas
2025-08-12 15:12             ` Edgecombe, Rick P
2025-08-12 16:15               ` Sean Christopherson
2025-08-12 18:39                 ` Edgecombe, Rick P
2025-08-12 22:00                   ` Vishal Annapurve
2025-08-12 23:34                     ` Edgecombe, Rick P
2025-08-13  0:18                       ` Vishal Annapurve
2025-08-13  0:51                         ` Edgecombe, Rick P
2025-08-12 18:44                 ` Vishal Annapurve
2025-08-13  8:09                 ` Kiryl Shutsemau
2025-08-13  7:49               ` Kiryl Shutsemau
2025-08-12  8:03         ` kas
2025-08-13 22:43         ` Edgecombe, Rick P
2025-08-13 23:31           ` Dave Hansen
2025-08-14  0:14             ` Edgecombe, Rick P
2025-08-14 10:55               ` Kiryl Shutsemau
2025-08-15  1:03                 ` Edgecombe, Rick P
2025-08-20 15:31                   ` Sean Christopherson
2025-08-20 16:35                     ` Edgecombe, Rick P

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAAhR5DGGWss4jovHETYmBeK1gze04LR9c8Dcd2oMpCC3SnMDgQ@mail.gmail.com \
    --to=sagis@google.com \
    --cc=bp@alien8.de \
    --cc=chao.gao@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=isaku.yamahata@intel.com \
    --cc=kai.huang@intel.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rick.p.edgecombe@intel.com \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    --cc=yan.y.zhao@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).