From: Liang Chen <liangchen.linux@gmail.com>
To: seanjc@google.com, pbonzini@redhat.com
Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
liangchen.linux@gmail.com
Subject: [PATCH] KVM: x86/mmu: Register MMU shrinker only when necessary
Date: Wed, 14 Aug 2024 16:23:02 +0800 [thread overview]
Message-ID: <20240814082302.50032-1-liangchen.linux@gmail.com> (raw)
The shrinker is allocated with TDP MMU, which is meaningless except for
nested VMs, and 'count_objects' is also called each time the reclaim
path tries to shrink slab caches. Let's allocate the shrinker only when
necessary.
Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
---
arch/x86/kvm/mmu/mmu.c | 49 ++++++++++++++++++++++++++++++------------
1 file changed, 35 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 928cf84778b0..d43d7548d801 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -69,11 +69,17 @@ static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
#endif
+static struct shrinker *mmu_shrinker;
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
+static unsigned long mmu_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc);
+static unsigned long mmu_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc);
+
static const struct kernel_param_ops nx_huge_pages_ops = {
.set = set_nx_huge_pages,
.get = get_nx_huge_pages,
@@ -5666,6 +5672,28 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
reset_guest_paging_metadata(vcpu, g_context);
}
+static void kvm_mmu_shrinker_init(void)
+{
+ struct shrinker *shrinker = shrinker_alloc(0, "x86-mmu");
+
+ if (!shrinker) {
+ pr_warn_once("could not allocate shrinker\n");
+ return;
+ }
+
+ /* Ensure mmu_shrinker is assigned only once. */
+ if (cmpxchg(&mmu_shrinker, NULL, shrinker)) {
+ shrinker_free(shrinker);
+ return;
+ }
+
+ mmu_shrinker->count_objects = mmu_shrink_count;
+ mmu_shrinker->scan_objects = mmu_shrink_scan;
+ mmu_shrinker->seeks = DEFAULT_SEEKS * 10;
+
+ shrinker_register(mmu_shrinker);
+}
+
void kvm_init_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
@@ -5677,6 +5705,13 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu)
init_kvm_tdp_mmu(vcpu, cpu_role);
else
init_kvm_softmmu(vcpu, cpu_role);
+
+ /*
+ * Register MMU shrinker only if TDP MMU is disabled or
+ * in nested VM scenarios.
+ */
+ if (unlikely(!mmu_shrinker) && (!tdp_mmu_enabled || mmu_is_nested(vcpu)))
+ kvm_mmu_shrinker_init();
}
EXPORT_SYMBOL_GPL(kvm_init_mmu);
@@ -7092,8 +7127,6 @@ static unsigned long mmu_shrink_count(struct shrinker *shrink,
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}
-static struct shrinker *mmu_shrinker;
-
static void mmu_destroy_caches(void)
{
kmem_cache_destroy(pte_list_desc_cache);
@@ -7223,20 +7256,8 @@ int kvm_mmu_vendor_module_init(void)
if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
goto out;
- mmu_shrinker = shrinker_alloc(0, "x86-mmu");
- if (!mmu_shrinker)
- goto out_shrinker;
-
- mmu_shrinker->count_objects = mmu_shrink_count;
- mmu_shrinker->scan_objects = mmu_shrink_scan;
- mmu_shrinker->seeks = DEFAULT_SEEKS * 10;
-
- shrinker_register(mmu_shrinker);
-
return 0;
-out_shrinker:
- percpu_counter_destroy(&kvm_total_used_mmu_pages);
out:
mmu_destroy_caches();
return ret;
--
2.40.1
next reply other threads:[~2024-08-14 8:23 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-14 8:23 Liang Chen [this message]
2024-08-14 14:41 ` [PATCH] KVM: x86/mmu: Register MMU shrinker only when necessary Sean Christopherson
2024-08-19 21:40 ` Vipin Sharma
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240814082302.50032-1-liangchen.linux@gmail.com \
--to=liangchen.linux@gmail.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox