public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Anthony Liguori <anthony-rdkfGonbjUSkNkDKm+mE6A@public.gmane.org>
To: Anthony Liguori <aliguori-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org>
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org,
	Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
Subject: Re: [PATCH] Allocate userspace memory for older userspace (v3)
Date: Thu, 18 Oct 2007 10:18:11 -0500	[thread overview]
Message-ID: <47177933.50707@codemonkey.ws> (raw)
In-Reply-To: <1192719574998-git-send-email-aliguori-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org>

Weird... guilt stripped the diffstat.  It should be:

 kvm.h      |    2 -
 kvm_main.c |   83 
++++++++++++++++++++++---------------------------------------
 2 files changed, 30 insertions(+), 55 deletions(-)

Regards,

Anthony Liguori

Anthony Liguori wrote:
> Allocate a userspace buffer for older userspaces.  Also eliminate phys_mem
> buffer.  The memset() in kvmctl really kills initial memory usage but swapping
> does even with old userspaces.
>
> Since v1, fixed a bug in slot creation.
>
> Since v2, changed the error checking to use IS_ERR().
>
> Signed-off-by: Anthony Liguori <aliguori-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org>
>
> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
> index 53b2d58..eb086e9 100644
> --- a/drivers/kvm/kvm.h
> +++ b/drivers/kvm/kvm.h
> @@ -407,10 +407,8 @@ struct kvm_memory_slot {
>  	gfn_t base_gfn;
>  	unsigned long npages;
>  	unsigned long flags;
> -	struct page **phys_mem;
>  	unsigned long *rmap;
>  	unsigned long *dirty_bitmap;
> -	int user_alloc; /* user allocated memory */
>  	unsigned long userspace_addr;
>  };
>  
> diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
> index c84bbea..1db607c 100644
> --- a/drivers/kvm/kvm_main.c
> +++ b/drivers/kvm/kvm_main.c
> @@ -42,6 +42,7 @@
>  #include <linux/profile.h>
>  #include <linux/kvm_para.h>
>  #include <linux/pagemap.h>
> +#include <linux/mman.h>
>  
>  #include <asm/processor.h>
>  #include <asm/msr.h>
> @@ -322,36 +323,21 @@ static struct kvm *kvm_create_vm(void)
>  	return kvm;
>  }
>  
> -static void kvm_free_kernel_physmem(struct kvm_memory_slot *free)
> -{
> -	int i;
> -
> -	for (i = 0; i < free->npages; ++i)
> -		if (free->phys_mem[i])
> -			__free_page(free->phys_mem[i]);
> -}
> -
>  /*
>   * Free any memory in @free but not in @dont.
>   */
>  static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
>  				  struct kvm_memory_slot *dont)
>  {
> -	if (!dont || free->phys_mem != dont->phys_mem)
> -		if (free->phys_mem) {
> -			if (!free->user_alloc)
> -				kvm_free_kernel_physmem(free);
> -			vfree(free->phys_mem);
> -		}
>  	if (!dont || free->rmap != dont->rmap)
>  		vfree(free->rmap);
>  
>  	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
>  		vfree(free->dirty_bitmap);
>  
> -	free->phys_mem = NULL;
>  	free->npages = 0;
>  	free->dirty_bitmap = NULL;
> +	free->rmap = NULL;
>  }
>  
>  static void kvm_free_physmem(struct kvm *kvm)
> @@ -734,10 +720,6 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
>  			goto out_unlock;
>  	}
>  
> -	/* Deallocate if slot is being removed */
> -	if (!npages)
> -		new.phys_mem = NULL;
> -
>  	/* Free page dirty bitmap if unneeded */
>  	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
>  		new.dirty_bitmap = NULL;
> @@ -745,29 +727,27 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
>  	r = -ENOMEM;
>  
>  	/* Allocate if a slot is being created */
> -	if (npages && !new.phys_mem) {
> -		new.phys_mem = vmalloc(npages * sizeof(struct page *));
> -
> -		if (!new.phys_mem)
> -			goto out_unlock;
> -
> +	if (npages && !new.rmap) {
>  		new.rmap = vmalloc(npages * sizeof(struct page *));
>  
>  		if (!new.rmap)
>  			goto out_unlock;
>  
> -		memset(new.phys_mem, 0, npages * sizeof(struct page *));
>  		memset(new.rmap, 0, npages * sizeof(*new.rmap));
> -		if (user_alloc) {
> -			new.user_alloc = 1;
> +
> +		if (user_alloc)
>  			new.userspace_addr = mem->userspace_addr;
> -		} else {
> -			for (i = 0; i < npages; ++i) {
> -				new.phys_mem[i] = alloc_page(GFP_HIGHUSER
> -							     | __GFP_ZERO);
> -				if (!new.phys_mem[i])
> -					goto out_unlock;
> -			}
> +		else {
> +			down_write(&current->mm->mmap_sem);
> +			new.userspace_addr = do_mmap(NULL, 0,
> +						     npages * PAGE_SIZE,
> +						     PROT_READ | PROT_WRITE,
> +						     MAP_SHARED | MAP_ANONYMOUS,
> +						     0);
> +			up_write(&current->mm->mmap_sem);
> +
> +			if (IS_ERR((void *)new.userspace_addr))
> +				goto out_unlock;
>  		}
>  	}
>  
> @@ -1032,6 +1012,8 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
>  struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
>  {
>  	struct kvm_memory_slot *slot;
> +	struct page *page[1];
> +	int npages;
>  
>  	gfn = unalias_gfn(kvm, gfn);
>  	slot = __gfn_to_memslot(kvm, gfn);
> @@ -1039,24 +1021,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
>  		get_page(bad_page);
>  		return bad_page;
>  	}
> -	if (slot->user_alloc) {
> -		struct page *page[1];
> -		int npages;
> -
> -		down_read(&current->mm->mmap_sem);
> -		npages = get_user_pages(current, current->mm,
> -					slot->userspace_addr
> -					+ (gfn - slot->base_gfn) * PAGE_SIZE, 1,
> -					1, 0, page, NULL);
> -		up_read(&current->mm->mmap_sem);
> -		if (npages != 1) {
> -			get_page(bad_page);
> -			return bad_page;
> -		}
> -		return page[0];
> +
> +	down_read(&current->mm->mmap_sem);
> +	npages = get_user_pages(current, current->mm,
> +				slot->userspace_addr
> +				+ (gfn - slot->base_gfn) * PAGE_SIZE, 1,
> +				1, 0, page, NULL);
> +	up_read(&current->mm->mmap_sem);
> +	if (npages != 1) {
> +		get_page(bad_page);
> +		return bad_page;
>  	}
> -	get_page(slot->phys_mem[gfn - slot->base_gfn]);
> -	return slot->phys_mem[gfn - slot->base_gfn];
> +
> +	return page[0];
>  }
>  EXPORT_SYMBOL_GPL(gfn_to_page);
>  
>
> -------------------------------------------------------------------------
> This SF.net email is sponsored by: Splunk Inc.
> Still grepping through log files to find problems?  Stop.
> Now Search log events and configuration files using AJAX and a browser.
> Download your FREE copy of Splunk now >> http://get.splunk.com/
> _______________________________________________
> kvm-devel mailing list
> kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
> https://lists.sourceforge.net/lists/listinfo/kvm-devel
>
>   


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/

  parent reply	other threads:[~2007-10-18 15:18 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-10-18 14:59 [PATCH] Allocate userspace memory for older userspace (v3) Anthony Liguori
     [not found] ` <1192719574998-git-send-email-aliguori-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org>
2007-10-18 15:18   ` Anthony Liguori [this message]
2007-10-18 15:43   ` Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=47177933.50707@codemonkey.ws \
    --to=anthony-rdkfgonbjusknkdkm+me6a@public.gmane.org \
    --cc=aliguori-r/Jw6+rmf7HQT0dZR+AlfA@public.gmane.org \
    --cc=avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org \
    --cc=kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox