public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Izik Eidus <izike@qumranet.com>
To: Avi Kivity <avi@qumranet.com>
Cc: kvm-devel <kvm-devel@lists.sourceforge.net>,
	Marcelo Tosatti <marcelo@kvack.org>
Subject: Re: KVM: register the kvm mmu cache with the shrinker.
Date: Tue, 25 Mar 2008 15:03:42 +0200	[thread overview]
Message-ID: <47E8F82E.3000905@qumranet.com> (raw)
In-Reply-To: <47E8F1CA.5000408@qumranet.com>

Avi Kivity wrote:
> Izik Eidus wrote:
>> this is the shrinker patch with all comments applied beside adding
>> aging mechanism
>> it look like the aging mechanism is not really needed and therefor for
>> now it isn't
>> implemented.
>>
>> From 8503a57ae88ba819e4eac6371172772c98b485f0 Mon Sep 17 00:00:00 2001
>> From: Izik Eidus <izike@qumranet.com>
>> Date: Tue, 25 Mar 2008 14:03:27 +0200
>> Subject: [PATCH] KVM: register the kvm mmu cache with the shrinker.
>>
>> Signed-off-by: Izik Eidus <izike@qumranet.com>
>> ---
>>  arch/x86/kvm/mmu.c |   54
>> ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>  1 files changed, 54 insertions(+), 0 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index c67ec62..c42c0db 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -1972,6 +1972,57 @@ void kvm_mmu_zap_all(struct kvm *kvm)
>>      kvm_flush_remote_tlbs(kvm);
>>  }
>> +static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
>> +{
>> +    struct kvm *kvm;
>> +    struct kvm *kvm_freed = NULL;
>> +    int cache_count = 0;
>> +
>> +    spin_lock(&kvm_lock);
>> +    if (list_empty(&vm_list)) {
>> +        spin_unlock(&kvm_lock);
>> +        return 0;
>> +    }
>>   
> 
> Unnecessary, if the list is empty the loop below doesn't execute and we
> return 0 anyway.
> 
>> +    list_for_each_entry(kvm, &vm_list, vm_list) {
>> +        int npages;
>> +
>> +        spin_lock(&kvm->mmu_lock);
>> +        npages = kvm->arch.n_alloc_mmu_pages -
>> +             kvm->arch.n_free_mmu_pages;
>> +        cache_count += npages - KVM_MIN_ALLOC_MMU_PAGES;
>>   
> 
> I think we should allow the shrinker to go below
> KVM_MIN_ALLOC_MMU_PAGES; in particular, if the vm is inactive, we should
> be able to shrink it to nothing.
> 
> When the VM starts executing again, it will reallocate those pages.
> 
>> +        if (!kvm_freed && nr_to_scan > 0 && npages >
>> +            KVM_MIN_ALLOC_MMU_PAGES) {
>>   
> 
> Don't split an expression like that; the tightly binding expression
> should stay on the same line:
> 
> if (!kvm_freed && nr_to_scan > 0 &&
>     npages > KVM_MN_ALLOC_MMU_PAGES) {
> 
>> +            kvm_mmu_remove_one_alloc_mmu_page(kvm);
>> +            cache_count--;
>> +            kvm_freed = kvm;
>> +        }
>> +        nr_to_scan--;
>> +
>> +        spin_unlock(&kvm->mmu_lock);
>> +    }
>> +    if (kvm_freed) {
>> +        list_del(&kvm_freed->vm_list);
>> +        list_add_tail(&kvm_freed->vm_list, &vm_list);
>> +    }
>>   
> 
> list_move_tail()
> 
>> +    spin_unlock(&kvm_lock);
>> +
>> +    return cache_count;
>> +}
>> +
>> +static struct shrinker mmu_shrinker = {
>> +    .shrink = mmu_shrink,
>> +    .seeks = DEFAULT_SEEKS * 10,
>> +};
>> +
>>  void kvm_mmu_module_exit(void)
>>  {
>>      if (pte_chain_cache)
>> @@ -1980,6 +2031,7 @@ void kvm_mmu_module_exit(void)
>>          kmem_cache_destroy(rmap_desc_cache);
>>      if (mmu_page_header_cache)
>>          kmem_cache_destroy(mmu_page_header_cache);
>>
>>   
> 
> I believe it is necessary to register the shrinker in order to have any
> observable effect.
> 
sorry, i didnt send the whole patch
here the one with your comments applied.
(btw, i have just saw something weird about the memory, so wait before you applies it)

>From 989c2f8373d8257e9c2b9a8c6ed8d629cd2a9d74 Mon Sep 17 00:00:00 2001
From: Izik Eidus <izike@qumranet.com>
Date: Tue, 25 Mar 2008 14:03:27 +0200
Subject: [PATCH] KVM: register the kvm mmu cache with the shrinker.

Signed-off-by: Izik Eidus <izike@qumranet.com>
---
 arch/x86/kvm/mmu.c |   49 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 49 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c67ec62..b6c1cb2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1972,6 +1972,52 @@ void kvm_mmu_zap_all(struct kvm *kvm)
 	kvm_flush_remote_tlbs(kvm);
 }
 
+void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+{
+	struct kvm_mmu_page *page;
+
+	page = container_of(kvm->arch.active_mmu_pages.prev,
+			    struct kvm_mmu_page, link);
+	kvm_mmu_zap_page(kvm, page);
+}
+
+static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+	struct kvm *kvm;
+	struct kvm *kvm_freed = NULL;
+	int cache_count = 0;
+
+	spin_lock(&kvm_lock);
+
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		int npages;
+
+		spin_lock(&kvm->mmu_lock);
+		npages = kvm->arch.n_alloc_mmu_pages -
+			 kvm->arch.n_free_mmu_pages;
+		cache_count += npages;
+		if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
+			kvm_mmu_remove_one_alloc_mmu_page(kvm);
+			cache_count--;
+			kvm_freed = kvm;
+		}
+		nr_to_scan--;
+
+		spin_unlock(&kvm->mmu_lock);
+	}
+	if (kvm_freed)
+		list_move_tail(&kvm_freed->vm_list, &vm_list);
+
+	spin_unlock(&kvm_lock);
+
+	return cache_count;
+}
+
+static struct shrinker mmu_shrinker = {
+	.shrink = mmu_shrink,
+	.seeks = DEFAULT_SEEKS * 10,
+};
+
 void kvm_mmu_module_exit(void)
 {
 	if (pte_chain_cache)
@@ -1980,6 +2026,7 @@ void kvm_mmu_module_exit(void)
 		kmem_cache_destroy(rmap_desc_cache);
 	if (mmu_page_header_cache)
 		kmem_cache_destroy(mmu_page_header_cache);
+	unregister_shrinker(&mmu_shrinker);
 }
 
 int kvm_mmu_module_init(void)
@@ -2001,6 +2048,8 @@ int kvm_mmu_module_init(void)
 	if (!mmu_page_header_cache)
 		goto nomem;
 
+	register_shrinker(&mmu_shrinker);
+
 	return 0;
 
 nomem:
-- 
1.5.3.6


-- 
woof.

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

  reply	other threads:[~2008-03-25 13:03 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-03-25 12:15 KVM: register the kvm mmu cache with the shrinker Izik Eidus
2008-03-25 12:36 ` Avi Kivity
2008-03-25 13:03   ` Izik Eidus [this message]
2008-03-27 12:45     ` Avi Kivity
2008-03-30 12:17       ` Izik Eidus
2008-03-30 13:30         ` Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=47E8F82E.3000905@qumranet.com \
    --to=izike@qumranet.com \
    --cc=avi@qumranet.com \
    --cc=kvm-devel@lists.sourceforge.net \
    --cc=marcelo@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox