public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] KVM: x86: Dynamically allocate hashed page list
@ 2025-03-15  2:40 Sean Christopherson
  2025-03-15  2:40 ` [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's " Sean Christopherson
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Sean Christopherson @ 2025-03-15  2:40 UTC (permalink / raw)
  To: Sean Christopherson, Paolo Bonzini; +Cc: kvm, linux-kernel

Allocate the hashed list of shadow pages dynamically (separate from
struct kvm), and on-demand.  The hashed list is 32KiB, i.e. absolutely
belongs in a separate allocation, and is worth skipping if KVM isn't
shadowing guest PTEs for the VM.

Sean Christopherson (3):
  KVM: x86/mmu: Dynamically allocate shadow MMU's hashed page list
  KVM: x86: Allocate kvm_vmx/kvm_svm structures using kzalloc()
  KVM: x86/mmu: Defer allocation of shadow MMU's hashed page list

 arch/x86/include/asm/kvm_host.h |  6 ++---
 arch/x86/kvm/mmu/mmu.c          | 48 +++++++++++++++++++++++++++++----
 arch/x86/kvm/svm/svm.c          |  1 +
 arch/x86/kvm/vmx/vmx.c          |  1 +
 arch/x86/kvm/x86.c              |  5 +++-
 5 files changed, 52 insertions(+), 9 deletions(-)


base-commit: 7d2154117a02832ab3643fe2da4cdc9d2090dcb2
-- 
2.49.0.rc1.451.g8f38331e32-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's hashed page list
  2025-03-15  2:40 [PATCH 0/3] KVM: x86: Dynamically allocate hashed page list Sean Christopherson
@ 2025-03-15  2:40 ` Sean Christopherson
  2025-03-17 16:37   ` Vipin Sharma
  2025-03-15  2:40 ` [PATCH 2/3] KVM: x86: Allocate kvm_vmx/kvm_svm structures using kzalloc() Sean Christopherson
  2025-03-15  2:40 ` [PATCH 3/3] KVM: x86/mmu: Defer allocation of shadow MMU's hashed page list Sean Christopherson
  2 siblings, 1 reply; 6+ messages in thread
From: Sean Christopherson @ 2025-03-15  2:40 UTC (permalink / raw)
  To: Sean Christopherson, Paolo Bonzini; +Cc: kvm, linux-kernel

Dynamically allocate the (massive) array of hashed lists used to track
shadow pages, as the array itself is 32KiB, i.e. is an order-3 allocation
all on its own, and is *exactly* an order-3 allocation.  Dynamically
allocating the array will allow allocating "struct kvm" using regular
kmalloc(), and will also allow deferring allocation of the array until
it's actually needed, i.e. until the first shadow root is allocated.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/include/asm/kvm_host.h |  4 ++--
 arch/x86/kvm/mmu/mmu.c          | 21 ++++++++++++++++++++-
 arch/x86/kvm/x86.c              |  5 ++++-
 3 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d881e7d276b1..6ead9e57446a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1332,7 +1332,7 @@ struct kvm_arch {
 	bool has_private_mem;
 	bool has_protected_state;
 	bool pre_fault_allowed;
-	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+	struct hlist_head *mmu_page_hash;
 	struct list_head active_mmu_pages;
 	/*
 	 * A list of kvm_mmu_page structs that, if zapped, could possibly be
@@ -1984,7 +1984,7 @@ void kvm_mmu_vendor_module_exit(void);
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_init_vm(struct kvm *kvm);
+int kvm_mmu_init_vm(struct kvm *kvm);
 void kvm_mmu_uninit_vm(struct kvm *kvm);
 
 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 63bb77ee1bb1..b878f2e89dec 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3880,6 +3880,18 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 	return r;
 }
 
+static int kvm_mmu_alloc_page_hash(struct kvm *kvm)
+{
+	typeof(kvm->arch.mmu_page_hash) h;
+
+	h = kcalloc(KVM_NUM_MMU_PAGES, sizeof(*h), GFP_KERNEL_ACCOUNT);
+	if (!h)
+		return -ENOMEM;
+
+	kvm->arch.mmu_page_hash = h;
+	return 0;
+}
+
 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
 {
 	struct kvm_memslots *slots;
@@ -6673,13 +6685,19 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
 		kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
 }
 
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
 {
+	int r;
+
 	kvm->arch.shadow_mmio_value = shadow_mmio_value;
 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
 	INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
 
+	r = kvm_mmu_alloc_page_hash(kvm);
+	if (r)
+		return r;
+
 	if (tdp_mmu_enabled)
 		kvm_mmu_init_tdp_mmu(kvm);
 
@@ -6690,6 +6708,7 @@ void kvm_mmu_init_vm(struct kvm *kvm)
 
 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
 	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
+	return 0;
 }
 
 static void mmu_free_vm_memory_caches(struct kvm *kvm)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 69c20a68a3f0..a1d85740d6e7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12704,7 +12704,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	if (ret)
 		goto out;
 
-	kvm_mmu_init_vm(kvm);
+	ret = kvm_mmu_init_vm(kvm);
+	if (ret)
+		goto out_cleanup_page_track;
 
 	ret = kvm_x86_call(vm_init)(kvm);
 	if (ret)
@@ -12757,6 +12759,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
 out_uninit_mmu:
 	kvm_mmu_uninit_vm(kvm);
+out_cleanup_page_track:
 	kvm_page_track_cleanup(kvm);
 out:
 	return ret;
-- 
2.49.0.rc1.451.g8f38331e32-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/3] KVM: x86: Allocate kvm_vmx/kvm_svm structures using kzalloc()
  2025-03-15  2:40 [PATCH 0/3] KVM: x86: Dynamically allocate hashed page list Sean Christopherson
  2025-03-15  2:40 ` [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's " Sean Christopherson
@ 2025-03-15  2:40 ` Sean Christopherson
  2025-03-15  2:40 ` [PATCH 3/3] KVM: x86/mmu: Defer allocation of shadow MMU's hashed page list Sean Christopherson
  2 siblings, 0 replies; 6+ messages in thread
From: Sean Christopherson @ 2025-03-15  2:40 UTC (permalink / raw)
  To: Sean Christopherson, Paolo Bonzini; +Cc: kvm, linux-kernel

Now that the size of "struct kvm" is less than 2KiB, switch back to using
kzalloc() to allocate the VM structures.  Add compile-time assertions in
vendor code to ensure the size is an order-0 allocation, i.e. to prevent
unknowingly letting the size balloon in the future.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/include/asm/kvm_host.h | 2 +-
 arch/x86/kvm/svm/svm.c          | 1 +
 arch/x86/kvm/vmx/vmx.c          | 1 +
 3 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ead9e57446a..04e6c5604bc3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1939,7 +1939,7 @@ void kvm_x86_vendor_exit(void);
 #define __KVM_HAVE_ARCH_VM_ALLOC
 static inline struct kvm *kvm_arch_alloc_vm(void)
 {
-	return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+	return kzalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT);
 }
 
 #define __KVM_HAVE_ARCH_VM_FREE
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 8abeab91d329..589adc5f92e0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5536,6 +5536,7 @@ static int __init svm_init(void)
 	if (r)
 		goto err_kvm_init;
 
+	BUILD_BUG_ON(get_order(sizeof(struct kvm_svm) != 0));
 	return 0;
 
 err_kvm_init:
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b70ed72c1783..01264842bf45 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8755,6 +8755,7 @@ static int __init vmx_init(void)
 	if (r)
 		goto err_kvm_init;
 
+	BUILD_BUG_ON(get_order(sizeof(struct kvm_vmx) != 0));
 	return 0;
 
 err_kvm_init:
-- 
2.49.0.rc1.451.g8f38331e32-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/3] KVM: x86/mmu: Defer allocation of shadow MMU's hashed page list
  2025-03-15  2:40 [PATCH 0/3] KVM: x86: Dynamically allocate hashed page list Sean Christopherson
  2025-03-15  2:40 ` [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's " Sean Christopherson
  2025-03-15  2:40 ` [PATCH 2/3] KVM: x86: Allocate kvm_vmx/kvm_svm structures using kzalloc() Sean Christopherson
@ 2025-03-15  2:40 ` Sean Christopherson
  2 siblings, 0 replies; 6+ messages in thread
From: Sean Christopherson @ 2025-03-15  2:40 UTC (permalink / raw)
  To: Sean Christopherson, Paolo Bonzini; +Cc: kvm, linux-kernel

When the TDP MMU is enabled, i.e. when the shadow MMU isn't used until a
nested TDP VM is run, defer allocation of the array of hashed lists used
to track shadow MMU pages until the first shadow root is allocated.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b878f2e89dec..3765d7abc2cc 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1982,14 +1982,25 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
 	return true;
 }
 
+static __ro_after_init HLIST_HEAD(empty_page_hash);
+
+static struct hlist_head *kvm_get_mmu_page_hash(struct kvm *kvm, gfn_t gfn)
+{
+	struct hlist_head *page_hash = READ_ONCE(kvm->arch.mmu_page_hash);
+
+	if (!page_hash)
+		return &empty_page_hash;
+
+	return &page_hash[kvm_page_table_hashfn(gfn)];
+}
+
 #define for_each_valid_sp(_kvm, _sp, _list)				\
 	hlist_for_each_entry(_sp, _list, hash_link)			\
 		if (is_obsolete_sp((_kvm), (_sp))) {			\
 		} else
 
 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)		\
-	for_each_valid_sp(_kvm, _sp,					\
-	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
+	for_each_valid_sp(_kvm, _sp, kvm_get_mmu_page_hash(_kvm, _gfn))	\
 		if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
 
 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
@@ -2357,6 +2368,7 @@ static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
 	struct kvm_mmu_page *sp;
 	bool created = false;
 
+	BUG_ON(!kvm->arch.mmu_page_hash);
 	sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
 
 	sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
@@ -3884,11 +3896,14 @@ static int kvm_mmu_alloc_page_hash(struct kvm *kvm)
 {
 	typeof(kvm->arch.mmu_page_hash) h;
 
+	if (kvm->arch.mmu_page_hash)
+		return 0;
+
 	h = kcalloc(KVM_NUM_MMU_PAGES, sizeof(*h), GFP_KERNEL_ACCOUNT);
 	if (!h)
 		return -ENOMEM;
 
-	kvm->arch.mmu_page_hash = h;
+	WRITE_ONCE(kvm->arch.mmu_page_hash, h);
 	return 0;
 }
 
@@ -3911,9 +3926,13 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm)
 	if (kvm_shadow_root_allocated(kvm))
 		goto out_unlock;
 
+	r = kvm_mmu_alloc_page_hash(kvm);
+	if (r)
+		goto out_unlock;
+
 	/*
-	 * Check if anything actually needs to be allocated, e.g. all metadata
-	 * will be allocated upfront if TDP is disabled.
+	 * Check if memslot metadata actually needs to be allocated, e.g. all
+	 * metadata will be allocated upfront if TDP is disabled.
 	 */
 	if (kvm_memslots_have_rmaps(kvm) &&
 	    kvm_page_track_write_tracking_enabled(kvm))
-- 
2.49.0.rc1.451.g8f38331e32-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's hashed page list
  2025-03-15  2:40 ` [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's " Sean Christopherson
@ 2025-03-17 16:37   ` Vipin Sharma
  2025-03-17 17:11     ` Sean Christopherson
  0 siblings, 1 reply; 6+ messages in thread
From: Vipin Sharma @ 2025-03-17 16:37 UTC (permalink / raw)
  To: Sean Christopherson; +Cc: Paolo Bonzini, kvm, linux-kernel

On 2025-03-14 19:40:08, Sean Christopherson wrote:
> Dynamically allocate the (massive) array of hashed lists used to track
> shadow pages, as the array itself is 32KiB, i.e. is an order-3 allocation
> all on its own, and is *exactly* an order-3 allocation.  Dynamically
> allocating the array will allow allocating "struct kvm" using regular
> kmalloc(), and will also allow deferring allocation of the array until
> it's actually needed, i.e. until the first shadow root is allocated.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  4 ++--
>  arch/x86/kvm/mmu/mmu.c          | 21 ++++++++++++++++++++-
>  arch/x86/kvm/x86.c              |  5 ++++-
>  3 files changed, 26 insertions(+), 4 deletions(-)
> 
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6673,13 +6685,19 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
>  		kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
>  }
>  
> -void kvm_mmu_init_vm(struct kvm *kvm)
> +int kvm_mmu_init_vm(struct kvm *kvm)
>  {
> +	int r;
> +
>  	kvm->arch.shadow_mmio_value = shadow_mmio_value;
>  	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
>  	INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
>  	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
>  
> +	r = kvm_mmu_alloc_page_hash(kvm);
> +	if (r)
> +		return r;
> +

In the patch 3, shouldn't this be moved to else part of the below 
'if (tdp_mmu_enabled)' line? Otherwise, this hash array will always get
allocated.

>  	if (tdp_mmu_enabled)
>  		kvm_mmu_init_tdp_mmu(kvm);
>  
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -12704,7 +12704,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  	if (ret)
>  		goto out;
>  
> -	kvm_mmu_init_vm(kvm);
> +	ret = kvm_mmu_init_vm(kvm);
> +	if (ret)
> +		goto out_cleanup_page_track;
>  
>  	ret = kvm_x86_call(vm_init)(kvm);
>  	if (ret)
> @@ -12757,6 +12759,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  
>  out_uninit_mmu:
>  	kvm_mmu_uninit_vm(kvm);
> +out_cleanup_page_track:

I think there is a memory leak in this series.

1. kvm_mmu_uninit_vm() is not freeing kvm->arch.mmu_page_hash. So, in
error case out_uninit_mmu will not recover memory allocated in
kvm_mmu_alloc_page_hash().

2. When VM terminates or is killed then the same thing will happen, no
one is reclaiming the memory.

>  	kvm_page_track_cleanup(kvm);
>  out:
>  	return ret;

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's hashed page list
  2025-03-17 16:37   ` Vipin Sharma
@ 2025-03-17 17:11     ` Sean Christopherson
  0 siblings, 0 replies; 6+ messages in thread
From: Sean Christopherson @ 2025-03-17 17:11 UTC (permalink / raw)
  To: Vipin Sharma; +Cc: Paolo Bonzini, kvm, linux-kernel

On Mon, Mar 17, 2025, Vipin Sharma wrote:
> On 2025-03-14 19:40:08, Sean Christopherson wrote:
> > Dynamically allocate the (massive) array of hashed lists used to track
> > shadow pages, as the array itself is 32KiB, i.e. is an order-3 allocation
> > all on its own, and is *exactly* an order-3 allocation.  Dynamically
> > allocating the array will allow allocating "struct kvm" using regular
> > kmalloc(), and will also allow deferring allocation of the array until
> > it's actually needed, i.e. until the first shadow root is allocated.
> > 
> > Signed-off-by: Sean Christopherson <seanjc@google.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h |  4 ++--
> >  arch/x86/kvm/mmu/mmu.c          | 21 ++++++++++++++++++++-
> >  arch/x86/kvm/x86.c              |  5 ++++-
> >  3 files changed, 26 insertions(+), 4 deletions(-)
> > 
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -6673,13 +6685,19 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
> >  		kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
> >  }
> >  
> > -void kvm_mmu_init_vm(struct kvm *kvm)
> > +int kvm_mmu_init_vm(struct kvm *kvm)
> >  {
> > +	int r;
> > +
> >  	kvm->arch.shadow_mmio_value = shadow_mmio_value;
> >  	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
> >  	INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
> >  	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
> >  
> > +	r = kvm_mmu_alloc_page_hash(kvm);
> > +	if (r)
> > +		return r;
> > +
> 
> In the patch 3, shouldn't this be moved to else part of the below 
> 'if (tdp_mmu_enabled)' line? Otherwise, this hash array will always get
> allocated.

Ugh, I botched the rebase, and didn't point test that the allocations actually
went away.

Before commit 0df9dab891ff ("KVM: x86/mmu: Stop zapping invalidated TDP MMU roots
asynchronously"), kvm_mmu_init_tdp_mmu() returned a value and so the code was:

	if (tdp_mmu_enabled)
		r = kvm_mmu_init_tdp_mmu(kvm);
	else
		r = kvm_mmu_alloc_page_hash(kvm);
	if (r < 0)
		return r;

I suppose the least ugly approach is:

	if (tdp_mmu_enabled) {
		kvm_mmu_init_tdp_mmu(kvm);
	} else {
		r = kvm_mmu_alloc_page_hash(kvm);
		if (r)
			return r;
	}

> >  	if (tdp_mmu_enabled)
> >  		kvm_mmu_init_tdp_mmu(kvm);
> >  
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -12704,7 +12704,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
> >  	if (ret)
> >  		goto out;
> >  
> > -	kvm_mmu_init_vm(kvm);
> > +	ret = kvm_mmu_init_vm(kvm);
> > +	if (ret)
> > +		goto out_cleanup_page_track;
> >  
> >  	ret = kvm_x86_call(vm_init)(kvm);
> >  	if (ret)
> > @@ -12757,6 +12759,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
> >  
> >  out_uninit_mmu:
> >  	kvm_mmu_uninit_vm(kvm);
> > +out_cleanup_page_track:
> 
> I think there is a memory leak in this series.

/facepalm

Good job, me.

Thanks for the review!

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-03-17 17:11 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-03-15  2:40 [PATCH 0/3] KVM: x86: Dynamically allocate hashed page list Sean Christopherson
2025-03-15  2:40 ` [PATCH 1/3] KVM: x86/mmu: Dynamically allocate shadow MMU's " Sean Christopherson
2025-03-17 16:37   ` Vipin Sharma
2025-03-17 17:11     ` Sean Christopherson
2025-03-15  2:40 ` [PATCH 2/3] KVM: x86: Allocate kvm_vmx/kvm_svm structures using kzalloc() Sean Christopherson
2025-03-15  2:40 ` [PATCH 3/3] KVM: x86/mmu: Defer allocation of shadow MMU's hashed page list Sean Christopherson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox