linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
To: pbonzini@redhat.com
Cc: gleb@kernel.org, mtosatti@redhat.com, kvm@vger.kernel.org,
	linux-kernel@vger.kernel.org,
	Xiao Guangrong <guangrong.xiao@linux.intel.com>
Subject: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type
Date: Sat, 30 May 2015 18:59:23 +0800	[thread overview]
Message-ID: <1432983566-15773-13-git-send-email-guangrong.xiao@linux.intel.com> (raw)
In-Reply-To: <1432983566-15773-1-git-send-email-guangrong.xiao@linux.intel.com>

It walks all MTRRs and gets all the memory cache type setting for the
specified range also it checks if the range is fully covered by MTRRs

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 183 insertions(+)

diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e59d138..35f86303 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
 	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
 }
 
+struct mtrr_looker {
+	/* input fields. */
+	struct kvm_mtrr *mtrr_state;
+	u64 start;
+	u64 end;
+
+	/* output fields. */
+	int mem_type;
+	/* [start, end) is fully covered in MTRRs? */
+	bool partial_map;
+
+	/* private fields. */
+	union {
+		/* used for fixed MTRRs. */
+		struct {
+			int index;
+			int seg;
+		};
+
+		/* used for var MTRRs. */
+		struct {
+			struct kvm_mtrr_range *range;
+			/* max address has been covered in var MTRRs. */
+			u64 start_max;
+		};
+	};
+
+	bool fixed;
+};
+
+static void mtrr_lookup_init(struct mtrr_looker *looker,
+			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
+{
+	looker->mtrr_state = mtrr_state;
+	looker->start = start;
+	looker->end = end;
+}
+
+static u64 fixed_mtrr_range_end_addr(int seg, int index)
+{
+	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
+
+	 return mtrr_seg->start + mtrr_seg->range_size * index;
+}
+
+static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
+{
+	int seg, index;
+
+	if (!looker->mtrr_state->fixed_mtrr_enabled)
+		return false;
+
+	seg = fixed_mtrr_addr_to_seg(looker->start);
+	if (seg < 0)
+		return false;
+
+	looker->fixed = true;
+	index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg);
+	looker->index = index;
+	looker->seg = seg;
+	looker->mem_type = looker->mtrr_state->fixed_ranges[index];
+	looker->start = fixed_mtrr_range_end_addr(seg, index);
+	return true;
+}
+
+static bool match_var_range(struct mtrr_looker *looker,
+			    struct kvm_mtrr_range *range)
+{
+	u64 start, end;
+
+	var_mtrr_range(range, &start, &end);
+	if (!(start >= looker->end || end <= looker->start)) {
+		looker->range = range;
+		looker->mem_type = range->base & 0xff;
+
+		/*
+		 * the function is called when we do kvm_mtrr.head walking
+		 * that means range has the minimum base address interleaves
+		 * with [looker->start_max, looker->end).
+		 */
+		looker->partial_map |= looker->start_max < start;
+
+		/* update the max address has been covered. */
+		looker->start_max = max(looker->start_max, end);
+		return true;
+	}
+
+	return false;
+}
+
+static void mtrr_lookup_var_start(struct mtrr_looker *looker)
+{
+	struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+	struct kvm_mtrr_range *range;
+
+	looker->fixed = false;
+	looker->partial_map = false;
+	looker->start_max = looker->start;
+	looker->mem_type = -1;
+
+	list_for_each_entry(range, &mtrr_state->head, node)
+		if (match_var_range(looker, range))
+			return;
+
+	looker->partial_map = true;
+}
+
+static void mtrr_lookup_fixed_next(struct mtrr_looker *looker)
+{
+	struct fixed_mtrr_segment *eseg = &fixed_seg_table[looker->seg];
+	struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+	u64 end;
+
+	if (looker->start >= looker->end) {
+		looker->mem_type = -1;
+		looker->partial_map = false;
+		return;
+	}
+
+	WARN_ON(!looker->fixed);
+
+	looker->index++;
+	end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
+
+	/* switch to next segment. */
+	if (end >= eseg->end) {
+		looker->seg++;
+		looker->index = 0;
+
+		/* have looked up for all fixed MTRRs. */
+		if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
+			return mtrr_lookup_var_start(looker);
+
+		end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
+	}
+
+	looker->mem_type = mtrr_state->fixed_ranges[looker->index];
+	looker->start = end;
+}
+
+static void mtrr_lookup_var_next(struct mtrr_looker *looker)
+{
+	struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+
+	WARN_ON(looker->fixed);
+
+	looker->mem_type = -1;
+
+	list_for_each_entry_continue(looker->range, &mtrr_state->head, node)
+		if (match_var_range(looker, looker->range))
+			return;
+
+	looker->partial_map |= looker->start_max < looker->end;
+}
+
+static void mtrr_lookup_start(struct mtrr_looker *looker)
+{
+	looker->mem_type = -1;
+
+	if (!looker->mtrr_state->mtrr_enabled) {
+		looker->partial_map = true;
+		return;
+	}
+
+	if (!mtrr_lookup_fixed_start(looker))
+		mtrr_lookup_var_start(looker);
+}
+
+static void mtrr_lookup_next(struct mtrr_looker *looker)
+{
+	WARN_ON(looker->mem_type == -1);
+
+	if (looker->fixed)
+		mtrr_lookup_fixed_next(looker);
+	else
+		mtrr_lookup_var_next(looker);
+}
+
+#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
+	for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_), \
+	     mtrr_lookup_start(_looker_); (_looker_)->mem_type != -1;	 \
+	     mtrr_lookup_next(_looker_))
+
 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
-- 
2.1.0


  parent reply	other threads:[~2015-05-30 11:02 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-30 10:59 [PATCH 00/15] KVM: x86: fully implement vMTRR Xiao Guangrong
2015-05-30 10:58 ` Xiao Guangrong
2015-05-30 10:59 ` [PATCH 01/15] KVM: x86: move MTRR related code to a separate file Xiao Guangrong
2015-05-30 10:59 ` [PATCH 02/15] KVM: MTRR: handle MSR_MTRRcap in kvm_mtrr_get_msr Xiao Guangrong
2015-05-30 10:59 ` [PATCH 03/15] KVM: MTRR: remove mtrr_state.have_fixed Xiao Guangrong
2015-05-30 10:59 ` [PATCH 04/15] KVM: MTRR: exactly define the size of variable MTRRs Xiao Guangrong
2015-05-30 10:59 ` [PATCH 05/15] KVM: MTRR: clean up mtrr default type Xiao Guangrong
2015-06-01  9:11   ` Paolo Bonzini
2015-06-03  1:55     ` Xiao Guangrong
2015-06-09  0:35   ` David Matlack
2015-05-30 10:59 ` [PATCH 06/15] KVM: MTRR: do not split 64 bits MSR content Xiao Guangrong
2015-05-30 10:59 ` [PATCH 07/15] KVM: MTRR: improve kvm_mtrr_get_guest_memory_type Xiao Guangrong
2015-06-01  9:16   ` Paolo Bonzini
2015-06-03  2:12     ` Xiao Guangrong
2015-06-03  7:57       ` Paolo Bonzini
2015-05-30 10:59 ` [PATCH 08/15] KVM: MTRR: introduce fixed_mtrr_segment table Xiao Guangrong
2015-06-01  9:25   ` Paolo Bonzini
2015-06-03  2:29     ` Xiao Guangrong
2015-05-30 10:59 ` [PATCH 09/15] KVM: MTRR: introduce var_mtrr_range Xiao Guangrong
2015-06-09  0:36   ` David Matlack
2015-06-09  2:38     ` Xiao Guangrong
2015-05-30 10:59 ` [PATCH 10/15] KVM: MTRR: sort variable MTRRs Xiao Guangrong
2015-06-01  9:27   ` Paolo Bonzini
2015-06-03  2:31     ` Xiao Guangrong
2015-05-30 10:59 ` [PATCH 11/15] KVM: MTRR: introduce fixed_mtrr_addr_* functions Xiao Guangrong
2015-05-30 10:59 ` Xiao Guangrong [this message]
2015-06-01  9:33   ` [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type Paolo Bonzini
2015-06-01 14:26     ` Paolo Bonzini
2015-06-03  2:57       ` Xiao Guangrong
2015-06-03  2:40     ` Xiao Guangrong
2015-06-09  0:36   ` David Matlack
2015-06-09  2:45     ` Xiao Guangrong
2015-05-30 10:59 ` [PATCH 13/15] KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type Xiao Guangrong
2015-05-30 10:59 ` [PATCH 14/15] KVM: MTRR: do not map huage page for non-consistent range Xiao Guangrong
2015-06-01  9:36   ` Paolo Bonzini
2015-06-01  9:38     ` Paolo Bonzini
2015-06-03  2:56     ` Xiao Guangrong
2015-06-03  7:55       ` Paolo Bonzini
2015-06-04  8:23         ` Xiao Guangrong
2015-06-04  8:26           ` Xiao Guangrong
2015-06-04  8:34             ` Paolo Bonzini
2015-06-04  8:36           ` Paolo Bonzini
2015-06-05  6:33             ` Xiao Guangrong
2015-05-30 10:59 ` [PATCH 15/15] KVM: VMX: fully implement guest MTRR virtualization Xiao Guangrong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1432983566-15773-13-git-send-email-guangrong.xiao@linux.intel.com \
    --to=guangrong.xiao@linux.intel.com \
    --cc=gleb@kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).