linux-s390.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Janosch Frank <frankja@linux.vnet.ibm.com>
To: kvm@vger.kernel.org
Cc: schwidefsky@de.ibm.com, borntraeger@de.ibm.com, david@redhat.com,
	dominik.dingel@gmail.com, linux-s390@vger.kernel.org
Subject: [RFC/PATCH v3 08/16] s390/mm: Make protect_rmap EDAT1 compatible
Date: Fri,  9 Feb 2018 10:34:16 +0100	[thread overview]
Message-ID: <1518168864-147803-9-git-send-email-frankja@linux.vnet.ibm.com> (raw)
In-Reply-To: <1518168864-147803-1-git-send-email-frankja@linux.vnet.ibm.com>

When shadowing, we must make sure, that any changes to the GMAP inside
guest N will also be directly reflected in our shadow GMAP. This is
done by write-protecting guest N memory at the places where it stores
DAT tables for guest N + 1.

This still lacks EDAT1 support, so let's add it.

Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
---
 arch/s390/mm/gmap.c | 97 ++++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 77 insertions(+), 20 deletions(-)

diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 5699770..66789e2 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1096,6 +1096,18 @@ static void gmap_pmdp_transfer_prot(struct mm_struct *mm, unsigned long addr,
 	*hpmdp = new;
 }
 
+static void gmap_pte_transfer_prot(struct mm_struct *mm, unsigned long addr,
+				   pte_t *gptep, pmd_t *hpmdp)
+{
+	pmd_t mpmd = __pmd(0);
+
+	if (pte_val(*gptep) & _PAGE_PROTECT)
+		pmd_val(mpmd) |= _SEGMENT_ENTRY_PROTECT;
+	if (pte_val(*gptep) & _PAGE_INVALID)
+		pmd_val(mpmd) |= _SEGMENT_ENTRY_INVALID;
+	gmap_pmdp_transfer_prot(mm, addr, &mpmd, hpmdp);
+}
+
 /*
  * gmap_protect_pmd - set pmd notification bits
  * @pmdp: pointer to the pmd to be protected
@@ -1141,7 +1153,8 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
  * guest_table_lock held for shadow gmaps.
  */
 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
-			    pmd_t *pmdp, int prot, unsigned long bits)
+			    unsigned long vmaddr, pmd_t *pmdp, pmd_t *hpmdp,
+			    int prot, unsigned long bits)
 {
 	int rc;
 	pte_t *ptep;
@@ -1157,6 +1170,8 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 	/* Protect and unlock. */
 	rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
 	gmap_pte_op_end(ptl);
+	if (!rc && gmap_pmd_is_split(pmdp))
+		gmap_pte_transfer_prot(gmap->mm, vmaddr, ptep, hpmdp);
 	return rc;
 }
 
@@ -1178,17 +1193,25 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
 			      unsigned long len, int prot, unsigned long bits)
 {
+	spinlock_t *ptl;
 	unsigned long vmaddr;
-	pmd_t *pmdp;
+	pmd_t *pmdp, *hpmdp;
 	int rc;
 
 	while (len) {
 		rc = -EAGAIN;
+		vmaddr = __gmap_translate(gmap, gaddr);
+		hpmdp = (pmd_t *)huge_pte_offset(gmap->mm, vmaddr, HPAGE_SIZE);
+		if (!hpmdp)
+			BUG();
+		/* Do we need tests here? */
+		ptl = pmd_lock(gmap->mm, hpmdp);
+
 		pmdp = gmap_pmd_op_walk(gmap, gaddr);
 		if (pmdp && !(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
 			if (!pmd_large(*pmdp)) {
-				rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
-						      bits);
+				rc = gmap_protect_pte(gmap, gaddr, vmaddr,
+						      pmdp, hpmdp, prot, bits);
 				if (!rc) {
 					len -= PAGE_SIZE;
 					gaddr += PAGE_SIZE;
@@ -1200,6 +1223,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
 			}
 			gmap_pmd_op_end(gmap, pmdp);
 		}
+		spin_unlock(ptl);
 		if (rc && rc != -EFAULT) {
 			vmaddr = __gmap_translate(gmap, gaddr);
 			if (IS_ERR_VALUE(vmaddr))
@@ -1268,7 +1292,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val,
 		pmdp = gmap_pmd_op_walk(gmap, gaddr);
 		if (pmdp && !(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
 			if (!pmd_large(*pmdp)) {
-				ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
+				ptep = gmap_pte_from_pmd(gmap, pmdp, gaddr, &ptl);
 				if (ptep) {
 					pte = *ptep;
 					if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
@@ -1331,6 +1355,28 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
 	}
 }
 
+static int gmap_protect_rmap_pte(struct gmap *sg, struct gmap_rmap *rmap,
+				 unsigned long paddr, unsigned long vmaddr,
+				 pmd_t *pmdp, pmd_t *hpmdp, int prot)
+{
+	int rc = 0;
+	pte_t *ptep = NULL;
+	spinlock_t *ptl = NULL;
+
+	ptep = gmap_pte_from_pmd(sg->parent, pmdp, paddr, &ptl);
+	if (unlikely(!ptep))
+		return -ENOMEM;
+
+	spin_lock(&sg->guest_table_lock);
+	rc = gmap_protect_pte(sg->parent, paddr, vmaddr, pmdp, hpmdp,
+			      prot, GMAP_NOTIFY_SHADOW);
+	if (!rc)
+		gmap_insert_rmap(sg, vmaddr, rmap);
+	spin_unlock(&sg->guest_table_lock);
+	gmap_pte_op_end(ptl);
+	return rc;
+}
+
 /**
  * gmap_protect_rmap - modify access rights to memory and create an rmap
  * @sg: pointer to the shadow guest address space structure
@@ -1348,8 +1394,8 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 	struct gmap *parent;
 	struct gmap_rmap *rmap;
 	unsigned long vmaddr;
+	pmd_t *pmdp, *hpmdp;
 	spinlock_t *ptl;
-	pte_t *ptep;
 	int rc;
 
 	BUG_ON(!gmap_is_shadow(sg));
@@ -1358,36 +1404,47 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 		vmaddr = __gmap_translate(parent, paddr);
 		if (IS_ERR_VALUE(vmaddr))
 			return vmaddr;
+		hpmdp = (pmd_t *)huge_pte_offset(parent->mm, vmaddr, HPAGE_SIZE);
+		ptl = pmd_lock(parent->mm, hpmdp);
 		rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
-		if (!rmap)
+		if (!rmap) {
+			spin_unlock(ptl);
 			return -ENOMEM;
+		}
 		rmap->raddr = raddr;
 		rc = radix_tree_preload(GFP_KERNEL);
 		if (rc) {
+			spin_unlock(ptl);
 			kfree(rmap);
 			return rc;
 		}
 		rc = -EAGAIN;
-		ptep = gmap_pte_op_walk(parent, paddr, &ptl);
-		if (ptep) {
-			spin_lock(&sg->guest_table_lock);
-			rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
-					     GMAP_NOTIFY_SHADOW);
-			if (!rc)
-				gmap_insert_rmap(sg, vmaddr, rmap);
-			spin_unlock(&sg->guest_table_lock);
-			gmap_pte_op_end(ptl);
+		pmdp = gmap_pmd_op_walk(parent, paddr);
+		if (pmdp && !(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
+			if (!pmd_large(*pmdp)) {
+				rc = gmap_protect_rmap_pte(sg, rmap, paddr,
+							   vmaddr, pmdp, hpmdp,
+							   prot);
+				if (!rc) {
+					paddr += PAGE_SIZE;
+					len -= PAGE_SIZE;
+				}
+			} else {
+				rc = gmap_pmd_split(parent, paddr, pmdp);
+				if (!rc)
+					rc = -EFAULT;
+			}
+			gmap_pmd_op_end(parent, pmdp);
 		}
+		spin_unlock(ptl);
 		radix_tree_preload_end();
-		if (rc) {
+		if (rc)
 			kfree(rmap);
+		if (rc == -EAGAIN) {
 			rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
 			if (rc)
 				return rc;
-			continue;
 		}
-		paddr += PAGE_SIZE;
-		len -= PAGE_SIZE;
 	}
 	return 0;
 }
-- 
2.7.4

  parent reply	other threads:[~2018-02-09  9:34 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-09  9:34 [RFC/PATCH v3 00/16] KVM/s390: Hugetlbfs enablement Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 01/16] s390/mm: make gmap_protect_range more modular Janosch Frank
2018-02-13 14:07   ` David Hildenbrand
2018-02-09  9:34 ` [RFC/PATCH v3 02/16] s390/mm: Abstract gmap notify bit setting Janosch Frank
2018-02-13 14:10   ` David Hildenbrand
2018-02-13 14:31     ` Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 03/16] s390/mm: Introduce gmap_pmdp_xchg Janosch Frank
2018-02-13 14:16   ` David Hildenbrand
2018-02-13 14:39     ` Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 04/16] s390/mm: add gmap PMD invalidation notification Janosch Frank
2018-02-13 14:36   ` David Hildenbrand
2018-02-13 14:54     ` Janosch Frank
2018-02-13 14:59       ` David Hildenbrand
2018-02-13 15:33         ` Janosch Frank
2018-02-14 10:42           ` David Hildenbrand
2018-02-14 11:19             ` Janosch Frank
2018-02-14 14:18               ` David Hildenbrand
2018-02-14 14:55                 ` Janosch Frank
2018-02-14 15:15                   ` David Hildenbrand
2018-02-14 15:24                     ` Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 05/16] s390/mm: Add gmap pmd invalidation and clearing Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 06/16] s390/mm: Add huge page dirty sync support Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 07/16] s390/mm: Make gmap_read_table EDAT1 compatible Janosch Frank
2018-02-09  9:34 ` Janosch Frank [this message]
2018-02-09  9:34 ` [RFC/PATCH v3 09/16] s390/mm: Add shadow segment code Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 10/16] s390/mm: Add VSIE reverse fake case Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 11/16] s390/mm: Enable gmap huge pmd support Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 12/16] s390/mm: clear huge page storage keys on enable_skey Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 13/16] s390/mm: Add huge pmd storage key handling Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 14/16] s390/mm: hugetlb pages within a gmap can not be freed Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 15/16] KVM: s390: Add KVM HPAGE capability Janosch Frank
2018-02-09  9:34 ` [RFC/PATCH v3 16/16] s390/mm: Add gmap lock classes Janosch Frank
2018-02-14 14:30 ` [RFC/PATCH v3 00/16] KVM/s390: Hugetlbfs enablement David Hildenbrand
2018-02-14 15:01   ` Janosch Frank
2018-02-14 15:07     ` David Hildenbrand
2018-02-14 15:33       ` Janosch Frank
2018-02-14 15:48         ` Christian Borntraeger
2018-02-14 15:57           ` David Hildenbrand
2018-02-14 15:56         ` David Hildenbrand
2018-02-15 15:43           ` [PATCH 0/3] Hpage capability rework Janosch Frank
2018-02-15 15:43             ` [PATCH 1/3] KVM: s390: Refactor host cmma and pfmfi interpretation controls Janosch Frank
2018-02-15 16:08               ` David Hildenbrand
2018-02-15 16:42                 ` Janosch Frank
2018-02-16  9:46                   ` David Hildenbrand
2018-02-15 15:43             ` [PATCH 2/3] KVM: s390: Add storage key facility interpretation control Janosch Frank
2018-02-15 16:09               ` David Hildenbrand
2018-02-15 20:27               ` Farhan Ali
2018-02-15 15:43             ` [PATCH 3/3] s390/mm: Enable gmap huge pmd support Janosch Frank
2018-02-15 16:10               ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1518168864-147803-9-git-send-email-frankja@linux.vnet.ibm.com \
    --to=frankja@linux.vnet.ibm.com \
    --cc=borntraeger@de.ibm.com \
    --cc=david@redhat.com \
    --cc=dominik.dingel@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=schwidefsky@de.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).