public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: Andres Lagar-Cavilla <andreslc@google.com>,
	Radim Krcmar <rkrcmar@redhat.com>, Rik van Riel <riel@redhat.com>,
	Gleb Natapov <gleb@kernel.org>,
	Steven Rostedt <rostedt@goodmis.org>,
	kvm@vger.kernel.org
Subject: Re: [PATCH] kvm/x86/mmu: Pass gfn and level to rmapp callback.
Date: Tue, 23 Sep 2014 22:28:59 +0200	[thread overview]
Message-ID: <5421D80B.7030700@redhat.com> (raw)
In-Reply-To: <1411500894-30542-1-git-send-email-andreslc@google.com>

Il 23/09/2014 21:34, Andres Lagar-Cavilla ha scritto:
> Callbacks don't have to do extra computation to learn what the caller
> (lvm_handle_hva_range()) knows very well. Useful for
> debugging/tracing/printk/future.
> 
> Signed-off-by: Andres Lagar-Cavilla <andreslc@google.com>
> ---
>  arch/x86/kvm/mmu.c         | 38 ++++++++++++++++++++++----------------
>  include/trace/events/kvm.h | 10 ++++++----
>  2 files changed, 28 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index f33d5e4..cc14eba 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1262,7 +1262,8 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
>  }
>  
>  static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
> -			   struct kvm_memory_slot *slot, unsigned long data)
> +			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
> +			   unsigned long data)
>  {
>  	u64 *sptep;
>  	struct rmap_iterator iter;
> @@ -1270,7 +1271,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
>  
>  	while ((sptep = rmap_get_first(*rmapp, &iter))) {
>  		BUG_ON(!(*sptep & PT_PRESENT_MASK));
> -		rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
> +		rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n",
> +			     sptep, *sptep, gfn, level);
>  
>  		drop_spte(kvm, sptep);
>  		need_tlb_flush = 1;
> @@ -1280,7 +1282,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
>  }
>  
>  static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
> -			     struct kvm_memory_slot *slot, unsigned long data)
> +			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
> +			     unsigned long data)
>  {
>  	u64 *sptep;
>  	struct rmap_iterator iter;
> @@ -1294,7 +1297,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
>  
>  	for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
>  		BUG_ON(!is_shadow_present_pte(*sptep));
> -		rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
> +		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
> +			     sptep, *sptep, gfn, level);
>  
>  		need_flush = 1;
>  
> @@ -1328,6 +1332,8 @@ static int kvm_handle_hva_range(struct kvm *kvm,
>  				int (*handler)(struct kvm *kvm,
>  					       unsigned long *rmapp,
>  					       struct kvm_memory_slot *slot,
> +					       gfn_t gfn,
> +					       int level,
>  					       unsigned long data))
>  {
>  	int j;
> @@ -1357,6 +1363,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
>  		     j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
>  			unsigned long idx, idx_end;
>  			unsigned long *rmapp;
> +			gfn_t gfn = gfn_start;
>  
>  			/*
>  			 * {idx(page_j) | page_j intersects with
> @@ -1367,8 +1374,10 @@ static int kvm_handle_hva_range(struct kvm *kvm,
>  
>  			rmapp = __gfn_to_rmap(gfn_start, j, memslot);
>  
> -			for (; idx <= idx_end; ++idx)
> -				ret |= handler(kvm, rmapp++, memslot, data);
> +			for (; idx <= idx_end;
> +			       ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j)))
> +				ret |= handler(kvm, rmapp++, memslot,
> +					       gfn, j, data);
>  		}
>  	}
>  
> @@ -1379,6 +1388,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
>  			  unsigned long data,
>  			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
>  					 struct kvm_memory_slot *slot,
> +					 gfn_t gfn, int level,
>  					 unsigned long data))
>  {
>  	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
> @@ -1400,7 +1410,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
>  }
>  
>  static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
> -			 struct kvm_memory_slot *slot, unsigned long data)
> +			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
> +			 unsigned long data)
>  {
>  	u64 *sptep;
>  	struct rmap_iterator uninitialized_var(iter);
> @@ -1410,25 +1421,20 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
>  
>  	for (sptep = rmap_get_first(*rmapp, &iter); sptep;
>  	     sptep = rmap_get_next(&iter)) {
> -		struct kvm_mmu_page *sp;
> -		gfn_t gfn;
>  		BUG_ON(!is_shadow_present_pte(*sptep));
> -		/* From spte to gfn. */
> -		sp = page_header(__pa(sptep));
> -		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
> -
>  		if (*sptep & shadow_accessed_mask) {
>  			young = 1;
>  			clear_bit((ffs(shadow_accessed_mask) - 1),
>  				 (unsigned long *)sptep);
>  		}
> -		trace_kvm_age_page(gfn, slot, young);
> +		trace_kvm_age_page(gfn, level, slot, young);
>  	}
>  	return young;
>  }
>  
>  static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
> -			      struct kvm_memory_slot *slot, unsigned long data)
> +			      struct kvm_memory_slot *slot, gfn_t gfn,
> +			      int level, unsigned long data)
>  {
>  	u64 *sptep;
>  	struct rmap_iterator iter;
> @@ -1466,7 +1472,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
>  
>  	rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
>  
> -	kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0);
> +	kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
>  	kvm_flush_remote_tlbs(vcpu->kvm);
>  }
>  
> diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
> index 0d2de78..6edf1f2 100644
> --- a/include/trace/events/kvm.h
> +++ b/include/trace/events/kvm.h
> @@ -225,24 +225,26 @@ TRACE_EVENT(kvm_fpu,
>  );
>  
>  TRACE_EVENT(kvm_age_page,
> -	TP_PROTO(ulong gfn, struct kvm_memory_slot *slot, int ref),
> -	TP_ARGS(gfn, slot, ref),
> +	TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
> +	TP_ARGS(gfn, level, slot, ref),
>  
>  	TP_STRUCT__entry(
>  		__field(	u64,	hva		)
>  		__field(	u64,	gfn		)
> +		__field(	u8,	level		)
>  		__field(	u8,	referenced	)
>  	),
>  
>  	TP_fast_assign(
>  		__entry->gfn		= gfn;
> +		__entry->level		= level;
>  		__entry->hva		= ((gfn - slot->base_gfn) <<
>  					    PAGE_SHIFT) + slot->userspace_addr;
>  		__entry->referenced	= ref;
>  	),
>  
> -	TP_printk("hva %llx gfn %llx %s",
> -		  __entry->hva, __entry->gfn,
> +	TP_printk("hva %llx gfn %llx level %u %s",
> +		  __entry->hva, __entry->gfn, __entry->level,
>  		  __entry->referenced ? "YOUNG" : "OLD")
>  );
>  
> 

Looks good, thanks.

Paolo

      reply	other threads:[~2014-09-23 20:29 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-23 19:34 [PATCH] kvm/x86/mmu: Pass gfn and level to rmapp callback Andres Lagar-Cavilla
2014-09-23 20:28 ` Paolo Bonzini [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5421D80B.7030700@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=andreslc@google.com \
    --cc=gleb@kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=riel@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox