public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
To: kvm-ppc@vger.kernel.org
Cc: paulus@ozlabs.org, kvm@vger.kernel.org,
	Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Subject: [PATCH 14/23] KVM: PPC: Book3S HV: Nested: Context switch slb for nested hpt guest
Date: Mon, 26 Aug 2019 16:21:00 +1000	[thread overview]
Message-ID: <20190826062109.7573-15-sjitindarsingh@gmail.com> (raw)
In-Reply-To: <20190826062109.7573-1-sjitindarsingh@gmail.com>

A version 2 of the H_ENTER_NESTED hcall was added with an argument to
specify the slb entries which should be used to run the nested guest.

Add support for this version of the hcall structures to
kvmhv_enter_nested_guest() and context switch the slb when the nested
guest being run is a hpt (hash page table) guest.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/kvm/book3s_hv_nested.c | 84 ++++++++++++++++++++++++++++++++++---
 1 file changed, 79 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 82690eafee77..883f8896ed60 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -104,6 +104,28 @@ static void byteswap_hv_regs(struct hv_guest_state *hr)
 	hr->ppr = swab64(hr->ppr);
 }
 
+static void byteswap_guest_slb(struct guest_slb *slbp)
+{
+	int i;
+
+	for (i = 0; i < 64; i++) {
+		slbp->slb[i].esid = swab64(slbp->slb[i].esid);
+		slbp->slb[i].vsid = swab64(slbp->slb[i].vsid);
+		slbp->slb[i].orige = swab64(slbp->slb[i].orige);
+		slbp->slb[i].origv = swab64(slbp->slb[i].origv);
+		slbp->slb[i].valid = swab32(slbp->slb[i].valid);
+		slbp->slb[i].Ks = swab32(slbp->slb[i].Ks);
+		slbp->slb[i].Kp = swab32(slbp->slb[i].Kp);
+		slbp->slb[i].nx = swab32(slbp->slb[i].nx);
+		slbp->slb[i].large = swab32(slbp->slb[i].large);
+		slbp->slb[i].tb = swab32(slbp->slb[i].tb);
+		slbp->slb[i].class = swab32(slbp->slb[i].class);
+		/* base_page_size is u8 thus no need to byteswap */
+	}
+	slbp->slb_max = swab64(slbp->slb_max);
+	slbp->slb_nr = swab64(slbp->slb_nr);
+}
+
 static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
 				 struct hv_guest_state *hr)
 {
@@ -238,12 +260,13 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
 
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 {
-	long int err, r;
+	long int err, r, ret = H_SUCCESS;
 	struct kvm_nested_guest *l2;
 	struct pt_regs l2_regs, saved_l1_regs;
 	struct hv_guest_state l2_hv, saved_l1_hv;
+	struct guest_slb *l2_slb = NULL, *saved_l1_slb = NULL;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
-	u64 hv_ptr, regs_ptr;
+	u64 hv_ptr, regs_ptr, slb_ptr = 0UL;
 	u64 hdec_exp;
 	s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
 	u64 mask;
@@ -261,7 +284,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 		return H_PARAMETER;
 	if (kvmppc_need_byteswap(vcpu))
 		byteswap_hv_regs(&l2_hv);
-	if (l2_hv.version != 1)
+	/* Do we support the guest version of the argument structures */
+	if ((l2_hv.version > HV_GUEST_STATE_MAX_VERSION) ||
+			(l2_hv.version < HV_GUEST_STATE_MIN_VERSION))
 		return H_P2;
 
 	regs_ptr = kvmppc_get_gpr(vcpu, 5);
@@ -296,6 +321,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 			return H_PARAMETER;
 	} else {
 		return H_PARAMETER;
+		/* must be at least V2 to support hpt guest */
+		if (l2_hv.version < 2)
+			return H_PARAMETER;
 		/* hpt doesn't support gtse or uprt and required vpm */
 		if ((l2_hv.lpcr & LPCR_HR) || (l2_hv.lpcr & LPCR_GTSE) ||
 					      (l2_hv.lpcr & LPCR_UPRT) ||
@@ -307,6 +335,26 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
 	saved_l1_regs = vcpu->arch.regs;
 	kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
+	/* if running hpt then context switch the slb in the vcpu struct */
+	if (!radix) {
+		slb_ptr = kvmppc_get_gpr(vcpu, 6);
+		l2_slb = kzalloc(sizeof(*l2_slb), GFP_KERNEL);
+		saved_l1_slb = kzalloc(sizeof(*saved_l1_slb), GFP_KERNEL);
+
+		if ((!l2_slb) || (!saved_l1_slb)) {
+			ret = H_HARDWARE;
+			goto out_free;
+		}
+		err = kvm_vcpu_read_guest(vcpu, slb_ptr, l2_slb,
+					  sizeof(struct guest_slb));
+		if (err) {
+			ret = H_PARAMETER;
+			goto out_free;
+		}
+		if (kvmppc_need_byteswap(vcpu))
+			byteswap_guest_slb(l2_slb);
+		kvmhv_save_guest_slb(vcpu, saved_l1_slb);
+	}
 
 	/* convert TB values/offsets to host (L0) values */
 	hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
@@ -323,6 +371,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
 	sanitise_hv_regs(vcpu, &l2_hv);
 	restore_hv_regs(vcpu, &l2_hv);
+	if (!radix)
+		kvmhv_restore_guest_slb(vcpu, l2_slb);
 
 	vcpu->arch.ret = RESUME_GUEST;
 	vcpu->arch.trap = 0;
@@ -332,8 +382,11 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 			r = RESUME_HOST;
 			break;
 		}
-		r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
-					  lpcr);
+		if (radix)
+			r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu,
+						  hdec_exp, lpcr);
+		else
+			r = RESUME_HOST; /* XXX TODO hpt entry path */
 	} while (is_kvmppc_resume_guest(r));
 
 	/* save L2 state for return */
@@ -344,6 +397,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	delta_ic = vcpu->arch.ic - l2_hv.ic;
 	delta_vtb = vc->vtb - l2_hv.vtb;
 	save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
+	if (!radix)
+		kvmhv_save_guest_slb(vcpu, l2_slb);
 
 	/* restore L1 state */
 	vcpu->arch.nested = NULL;
@@ -354,6 +409,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 		vcpu->arch.shregs.msr |= MSR_TS_S;
 	vc->tb_offset = saved_l1_hv.tb_offset;
 	restore_hv_regs(vcpu, &saved_l1_hv);
+	if (!radix)
+		kvmhv_restore_guest_slb(vcpu, saved_l1_slb);
 	vcpu->arch.purr += delta_purr;
 	vcpu->arch.spurr += delta_spurr;
 	vcpu->arch.ic += delta_ic;
@@ -363,9 +420,21 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 
 	/* copy l2_hv_state and regs back to guest */
 	if (kvmppc_need_byteswap(vcpu)) {
+		if (!radix)
+			byteswap_guest_slb(l2_slb);
 		byteswap_hv_regs(&l2_hv);
 		byteswap_pt_regs(&l2_regs);
 	}
+	if (!radix) {
+		err = kvm_vcpu_write_guest(vcpu, slb_ptr, l2_slb,
+					   sizeof(struct guest_slb));
+		if (err) {
+			ret = H_AUTHORITY;
+			goto out_free;
+		}
+		kfree(l2_slb);
+		kfree(saved_l1_slb);
+	}
 	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
 				   sizeof(struct hv_guest_state));
 	if (err)
@@ -384,6 +453,11 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	}
 
 	return vcpu->arch.trap;
+
+out_free:
+	kfree(l2_slb);
+	kfree(saved_l1_slb);
+	return ret;
 }
 
 long kvmhv_nested_init(void)
-- 
2.13.6


  parent reply	other threads:[~2019-08-26  6:21 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-26  6:20 [PATCH 00/23] KVM: PPC: BOok3S HV: Support for nested HPT guests Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 01/23] KVM: PPC: Book3S HV: Use __gfn_to_pfn_memslot in HPT page fault handler Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 02/23] KVM: PPC: Book3S HV: Increment mmu_notifier_seq when modifying radix pte rc bits Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 03/23] KVM: PPC: Book3S HV: Nested: Don't allow hash guests to run nested guests Suraj Jitindar Singh
2019-10-23  4:47   ` Paul Mackerras
2019-08-26  6:20 ` [PATCH 04/23] KVM: PPC: Book3S HV: Handle making H_ENTER_NESTED hcall in a separate function Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 05/23] KVM: PPC: Book3S HV: Enable calling kvmppc_hpte_hv_fault in virtual mode Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 06/23] KVM: PPC: Book3S HV: Allow hpt manipulation hcalls to be called " Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 07/23] KVM: PPC: Book3S HV: Make kvmppc_invalidate_hpte() take lpid not a kvm struct Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 08/23] KVM: PPC: Book3S HV: Nested: Allow pseries hypervisor to run hpt nested guest Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 09/23] KVM: PPC: Book3S HV: Nested: Improve comments and naming of nest rmap functions Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 10/23] KVM: PPC: Book3S HV: Nested: Increase gpa field in nest rmap to 46 bits Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 11/23] KVM: PPC: Book3S HV: Nested: Remove single nest rmap entries Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 12/23] KVM: PPC: Book3S HV: Nested: add kvmhv_remove_all_nested_rmap_lpid() Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 13/23] KVM: PPC: Book3S HV: Nested: Infrastructure for nested hpt guest setup Suraj Jitindar Singh
2019-10-24  3:43   ` Paul Mackerras
2019-08-26  6:21 ` Suraj Jitindar Singh [this message]
2019-10-24  4:48   ` [PATCH 14/23] KVM: PPC: Book3S HV: Nested: Context switch slb for nested hpt guest Paul Mackerras
2019-08-26  6:21 ` [PATCH 15/23] KVM: PPC: Book3S HV: Store lpcr and hdec_exp in the vcpu struct Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 16/23] KVM: PPC: Book3S HV: Nested: Make kvmppc_run_vcpu() entry path nested capable Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 17/23] KVM: PPC: Book3S HV: Nested: Rename kvmhv_xlate_addr_nested_radix Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 18/23] KVM: PPC: Book3S HV: Separate out hashing from kvmppc_hv_find_lock_hpte() Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 19/23] KVM: PPC: Book3S HV: Nested: Implement nested hpt mmu translation Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 20/23] KVM: PPC: Book3S HV: Nested: Handle tlbie hcall for nested hpt guest Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 21/23] KVM: PPC: Book3S HV: Nested: Implement nest rmap invalidations for hpt guests Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 22/23] KVM: PPC: Book3S HV: Nested: Enable nested " Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 23/23] KVM: PPC: Book3S HV: Add nested hpt pte information to debugfs Suraj Jitindar Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190826062109.7573-15-sjitindarsingh@gmail.com \
    --to=sjitindarsingh@gmail.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=paulus@ozlabs.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox