linux-s390.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Janosch Frank <frankja@linux.vnet.ibm.com>
To: kvm@vger.kernel.org
Cc: schwidefsky@de.ibm.com, borntraeger@de.ibm.com, david@redhat.com,
	dominik.dingel@gmail.com, linux-s390@vger.kernel.org
Subject: [RFC/PATCH v2 16/22] s390/mm: Add shadow segment code
Date: Wed, 13 Dec 2017 13:53:27 +0100	[thread overview]
Message-ID: <1513169613-13509-17-git-send-email-frankja@linux.vnet.ibm.com> (raw)
In-Reply-To: <1513169613-13509-1-git-send-email-frankja@linux.vnet.ibm.com>

The VSIE code does not yet support shadowing large hosts. Let's add
large to large shadowing.

Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
---
 arch/s390/include/asm/gmap.h |   6 +-
 arch/s390/kvm/gaccess.c      |  35 +++++-
 arch/s390/mm/gmap.c          | 292 +++++++++++++++++++++++++++++++++++++------
 3 files changed, 292 insertions(+), 41 deletions(-)

diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 24c2f86..5247549 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -130,9 +130,11 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
 		    int fake);
 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
 		    int fake);
-int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
-			   unsigned long *pgt, int *dat_protection, int *fake);
+int gmap_shadow_sgt_lookup(struct gmap *sg, unsigned long saddr,
+			   unsigned long *pgt, int *dat_protection,
+			   int *fake);
 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
+int gmap_shadow_segment(struct gmap *sg, unsigned long saddr, pmd_t pmd);
 
 void gmap_register_pte_notifier(struct gmap_notifier *);
 void gmap_unregister_pte_notifier(struct gmap_notifier *);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index ebea9ec..045d12e 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -981,7 +981,7 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
  */
 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 				  unsigned long *pgt, int *dat_protection,
-				  int *fake)
+				  int *fake, int *lvl)
 {
 	struct gmap *parent;
 	union asce asce;
@@ -1136,6 +1136,17 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 		if (ste.cs && asce.p)
 			return PGM_TRANSLATION_SPEC;
 		*dat_protection |= ste.fc0.p;
+
+		/* Parent is mapped by huge pages. */
+		if (fc) {
+			/* Guest is also huge, easy case. */
+			if (ste.fc && sg->edat_level >= 1) {
+				*lvl = 1;
+				*pgt = ptr;
+				return 0;
+			}
+		}
+		/* Small to small and small to huge case */
 		if (ste.fc && sg->edat_level >= 1) {
 			*fake = 1;
 			ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
@@ -1172,8 +1183,9 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
 {
 	union vaddress vaddr;
 	union page_table_entry pte;
+	union segment_table_entry ste;
 	unsigned long pgt;
-	int dat_protection, fake;
+	int dat_protection, fake, lvl, fc;
 	int rc;
 
 	down_read(&sg->mm->mmap_sem);
@@ -1184,12 +1196,26 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
 	 */
 	ipte_lock(vcpu);
 
-	rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
+	rc = gmap_shadow_sgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
 	if (rc)
 		rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
-					    &fake);
+					    &fake, &lvl);
 
 	vaddr.addr = saddr;
+
+	/* Shadow stopped at segment level, we map pmd to pmd */
+	if (lvl) {
+		if (!rc)
+			rc = gmap_read_table(sg->parent, pgt + vaddr.sx * 8,
+					     &ste.val, &fc);
+		if (!rc && ste.i)
+			rc = PGM_PAGE_TRANSLATION;
+		ste.fc1.p |= dat_protection;
+		if (!rc)
+			rc = gmap_shadow_segment(sg, saddr, __pmd(ste.val));
+		goto out;
+	}
+
 	if (fake) {
 		pte.val = pgt + vaddr.px * PAGE_SIZE;
 		goto shadow_page;
@@ -1204,6 +1230,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
 	pte.p |= dat_protection;
 	if (!rc)
 		rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
+out:
 	ipte_unlock(vcpu);
 	up_read(&sg->mm->mmap_sem);
 	return rc;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 8833d2a..b3d01d9 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1279,7 +1279,7 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
 
 static int gmap_protect_rmap_pmd(struct gmap *sg, struct gmap_rmap *rmap,
 				 unsigned long paddr, unsigned long vmaddr,
-				 pmd_t *pmdp, int prot)
+				 pmd_t *pmdp, pmd_t *hpmdp, int prot)
 {
 	int rc = 0;
 
@@ -1288,8 +1288,8 @@ static int gmap_protect_rmap_pmd(struct gmap *sg, struct gmap_rmap *rmap,
 		return -EAGAIN;
 
 	spin_lock(&sg->guest_table_lock);
-	rc = gmap_protect_large(sg->parent, paddr, pmdp,
-				prot, GMAP_NOTIFY_SHADOW);
+	rc = gmap_protect_pmd(sg->parent, paddr, vmaddr, pmdp, hpmdp,
+			      prot, GMAP_NOTIFY_SHADOW);
 	if (!rc)
 		gmap_insert_rmap(sg, vmaddr & HPAGE_MASK, rmap);
 
@@ -1339,7 +1339,8 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 	struct gmap *parent;
 	struct gmap_rmap *rmap;
 	unsigned long vmaddr, dist;
-	pmd_t *pmdp;
+	spinlock_t *ptl;
+	pmd_t *pmdp, *hpmdp;
 	int rc;
 
 	BUG_ON(!gmap_is_shadow(sg));
@@ -1348,12 +1349,18 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 		vmaddr = __gmap_translate(parent, paddr);
 		if (IS_ERR_VALUE(vmaddr))
 			return vmaddr;
+		hpmdp = (pmd_t *)huge_pte_offset(parent->mm, vmaddr, HPAGE_SIZE);
+		/* Do we need tests here? */
+		ptl = pmd_lock(parent->mm, hpmdp);
 		rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
-		if (!rmap)
+		if (!rmap) {
+			spin_unlock(ptl);
 			return -ENOMEM;
+		}
 		rmap->raddr = raddr;
 		rc = radix_tree_preload(GFP_KERNEL);
 		if (rc) {
+			spin_unlock(ptl);
 			kfree(rmap);
 			return rc;
 		}
@@ -1369,7 +1376,8 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 				}
 			} else {
 				rc = gmap_protect_rmap_pmd(sg, rmap, paddr,
-							   vmaddr, pmdp, prot);
+							   vmaddr, pmdp,
+							   hpmdp, prot);
 				if (!rc) {
 					dist = HPAGE_SIZE - (paddr & ~HPAGE_MASK);
 					len = len < dist ? 0 : len - dist;
@@ -1378,6 +1386,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 			}
 			gmap_pmd_op_end(parent, pmdp);
 		}
+		spin_unlock(ptl);
 		radix_tree_preload_end();
 		if (rc) {
 			kfree(rmap);
@@ -1391,6 +1400,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 }
 
 #define _SHADOW_RMAP_MASK	0x7
+#define _SHADOW_RMAP_SEGMENT_LP	0x6
 #define _SHADOW_RMAP_REGION1	0x5
 #define _SHADOW_RMAP_REGION2	0x4
 #define _SHADOW_RMAP_REGION3	0x3
@@ -1498,13 +1508,16 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
 	for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
 		if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
 			continue;
-		pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
+
+		if (!(sgt[i] & _SEGMENT_ENTRY_LARGE)) {
+			pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
+			__gmap_unshadow_pgt(sg, raddr, pgt);
+			/* Free page table */
+			page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
+			list_del(&page->lru);
+			page_table_free_pgste(page);
+		}
 		sgt[i] = _SEGMENT_ENTRY_EMPTY;
-		__gmap_unshadow_pgt(sg, raddr, pgt);
-		/* Free page table */
-		page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
-		list_del(&page->lru);
-		page_table_free_pgste(page);
 	}
 }
 
@@ -2119,32 +2132,62 @@ EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  *
  * Called with sg->mm->mmap_sem in read.
  */
-int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
+void gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long *sge,
+			    unsigned long saddr, unsigned long *pgt,
+			    int *dat_protection, int *fake)
+{
+	struct page *page;
+
+	/* Shadow page tables are full pages (pte+pgste) */
+	page = pfn_to_page(*sge >> PAGE_SHIFT);
+	*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
+	*dat_protection = !!(*sge & _SEGMENT_ENTRY_PROTECT);
+	*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
+
+int gmap_shadow_sgt_lookup(struct gmap *sg, unsigned long saddr,
 			   unsigned long *pgt, int *dat_protection,
 			   int *fake)
 {
-	unsigned long *table;
+	unsigned long *sge, *r3e = NULL;
 	struct page *page;
-	int rc;
+	int rc = -EAGAIN;
 
 	BUG_ON(!gmap_is_shadow(sg));
 	spin_lock(&sg->guest_table_lock);
-	table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
-	if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
-		/* Shadow page tables are full pages (pte+pgste) */
-		page = pfn_to_page(*table >> PAGE_SHIFT);
-		*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
-		*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
-		*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
-		rc = 0;
-	} else  {
-		rc = -EAGAIN;
+	if (sg->asce & _ASCE_TYPE_MASK) {
+		/* >2 GB guest */
+		r3e = (unsigned long *) gmap_table_walk(sg, saddr, 2);
+		if (!r3e || (*r3e & _REGION_ENTRY_INVALID))
+			goto out;
+		sge = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN) + ((saddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT);
+	} else {
+		sge = (unsigned long *)(sg->asce & PAGE_MASK) + ((saddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT);
 	}
+	if (*sge & _SEGMENT_ENTRY_INVALID)
+		goto out;
+	rc = 0;
+	if (*sge & _SEGMENT_ENTRY_LARGE) {
+		if (r3e) {
+			page = pfn_to_page(*r3e >> PAGE_SHIFT);
+			*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
+			*dat_protection = !!(*r3e & _SEGMENT_ENTRY_PROTECT);
+			*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
+		} else {
+			*pgt = sg->orig_asce & PAGE_MASK;
+			*dat_protection = 0;
+			*fake = 0;
+		}
+	} else {
+		gmap_shadow_pgt_lookup(sg, sge, saddr, pgt,
+				       dat_protection, fake);
+	}
+out:
 	spin_unlock(&sg->guest_table_lock);
 	return rc;
-
 }
-EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
+EXPORT_SYMBOL_GPL(gmap_shadow_sgt_lookup);
 
 /**
  * gmap_shadow_pgt - instantiate a shadow page table
@@ -2226,6 +2269,89 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
 }
 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
 
+int gmap_shadow_segment(struct gmap *sg, unsigned long saddr, pmd_t pmd)
+{
+	struct gmap *parent;
+	struct gmap_rmap *rmap;
+	unsigned long vmaddr, paddr;
+	pmd_t spmd, tpmd, *spmdp = NULL, *tpmdp;
+	int prot;
+	int rc;
+
+	BUG_ON(!gmap_is_shadow(sg));
+	parent = sg->parent;
+
+	prot = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) ? PROT_READ : PROT_WRITE;
+	rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
+	if (!rmap)
+		return -ENOMEM;
+	rmap->raddr = (saddr & HPAGE_MASK) | _SHADOW_RMAP_SEGMENT_LP;
+
+	while (1) {
+		paddr = pmd_val(pmd) & HPAGE_MASK;
+		vmaddr = __gmap_translate(parent, paddr);
+		if (IS_ERR_VALUE(vmaddr)) {
+			rc = vmaddr;
+			break;
+		}
+		rc = radix_tree_preload(GFP_KERNEL);
+		if (rc)
+			break;
+		rc = -EAGAIN;
+
+		/* Let's look up the parent's mapping */
+		spmdp = gmap_pmd_op_walk(parent, paddr);
+		if (spmdp) {
+			spin_lock(&sg->guest_table_lock);
+			/* Get shadow segment table pointer */
+			tpmdp = (pmd_t *) gmap_table_walk(sg, saddr, 1);
+			if (!tpmdp) {
+				spin_unlock(&sg->guest_table_lock);
+				gmap_pmd_op_end(parent, spmdp);
+				radix_tree_preload_end();
+				break;
+			}
+			/* Shadowing magic happens here. */
+			if (!(pmd_val(*tpmdp) & _SEGMENT_ENTRY_INVALID)) {
+				rc = 0;	/* already shadowed */
+				spin_unlock(&sg->guest_table_lock);
+				gmap_pmd_op_end(parent, spmdp);
+				radix_tree_preload_end();
+				break;
+			}
+			spmd = *spmdp;
+			if (!(pmd_val(spmd) & _SEGMENT_ENTRY_INVALID) &&
+			    !((pmd_val(spmd) & _SEGMENT_ENTRY_PROTECT) &&
+			      !(pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT))) {
+
+				*spmdp = __pmd(pmd_val(spmd)
+					       | _SEGMENT_ENTRY_GMAP_VSIE);
+
+				/* Insert shadow ste */
+				pmd_val(tpmd) = ((pmd_val(spmd) & HPAGE_MASK) |
+						 _SEGMENT_ENTRY_LARGE |
+						 (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT));
+				*tpmdp = tpmd;
+
+				gmap_insert_rmap(sg, vmaddr, rmap);
+				rc = 0;
+			}
+			spin_unlock(&sg->guest_table_lock);
+			gmap_pmd_op_end(parent, spmdp);
+		}
+		radix_tree_preload_end();
+		if (!rc)
+			break;
+		rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
+		if (rc)
+			break;
+	}
+	if (rc)
+		kfree(rmap);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_segment);
+
 /**
  * gmap_shadow_page - create a shadow page mapping
  * @sg: pointer to the shadow guest address space structure
@@ -2302,6 +2428,78 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 EXPORT_SYMBOL_GPL(gmap_shadow_page);
 
 /**
+ * gmap_unshadow_segment - remove a huge segment from a shadow segment table
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void gmap_unshadow_segment(struct gmap *sg, unsigned long raddr)
+{
+	unsigned long *table;
+
+	BUG_ON(!gmap_is_shadow(sg));
+	/* We already have the lock */
+	table = gmap_table_walk(sg, raddr, 1); /* get segment table pointer */
+	if (!table || *table & _SEGMENT_ENTRY_INVALID ||
+	    !(*table & _SEGMENT_ENTRY_LARGE))
+		return;
+	gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
+	gmap_pmdp_xchg(sg, (pmd_t *)table, __pmd(_SEGMENT_ENTRY_EMPTY), raddr);
+}
+
+static void gmap_shadow_notify_pmd(struct gmap *sg, unsigned long vmaddr,
+				   unsigned long gaddr)
+{
+	struct gmap_rmap *rmap, *rnext, *head;
+	unsigned long start, end, bits, raddr;
+
+
+	BUG_ON(!gmap_is_shadow(sg));
+
+	spin_lock(&sg->guest_table_lock);
+	if (sg->removed) {
+		spin_unlock(&sg->guest_table_lock);
+		return;
+	}
+	/* Check for top level table */
+	start = sg->orig_asce & _ASCE_ORIGIN;
+	end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
+	if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
+	    gaddr < ((end & HPAGE_MASK) + HPAGE_SIZE - 1)) {
+		/* The complete shadow table has to go */
+		gmap_unshadow(sg);
+		spin_unlock(&sg->guest_table_lock);
+		list_del(&sg->list);
+		gmap_put(sg);
+		return;
+	}
+	/* Remove the page table tree from on specific entry */
+	head = radix_tree_delete(&sg->host_to_rmap, (vmaddr & HPAGE_MASK) >> PAGE_SHIFT);
+	gmap_for_each_rmap_safe(rmap, rnext, head) {
+		bits = rmap->raddr & _SHADOW_RMAP_MASK;
+		raddr = rmap->raddr ^ bits;
+		switch (bits) {
+		case _SHADOW_RMAP_REGION1:
+			gmap_unshadow_r2t(sg, raddr);
+			break;
+		case _SHADOW_RMAP_REGION2:
+			gmap_unshadow_r3t(sg, raddr);
+			break;
+		case _SHADOW_RMAP_REGION3:
+			gmap_unshadow_sgt(sg, raddr);
+			break;
+		case _SHADOW_RMAP_SEGMENT_LP:
+			gmap_unshadow_segment(sg, raddr);
+			break;
+		}
+		kfree(rmap);
+	}
+	spin_unlock(&sg->guest_table_lock);
+}
+
+
+/**
  * gmap_shadow_notify - handle notifications for shadow gmap
  *
  * Called with sg->parent->shadow_lock.
@@ -2413,13 +2611,28 @@ EXPORT_SYMBOL_GPL(ptep_notify);
 static void pmdp_notify_gmap(struct gmap *gmap, unsigned long gaddr)
 {
 	unsigned long *table;
+	unsigned long vmaddr, bits;
+	struct gmap *sg, *next;
 
 	gaddr &= HPAGE_MASK;
 	table = gmap_table_walk(gmap, gaddr, 1);
-	if (!table || !(*table & _SEGMENT_ENTRY_GMAP_IN))
+	if (!table)
 		return;
-	*table &= ~_SEGMENT_ENTRY_GMAP_IN;
-	gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
+	bits = *table & (_SEGMENT_ENTRY_GMAP_IN | _SEGMENT_ENTRY_GMAP_VSIE);
+	if (!bits)
+		return;
+	*table ^= bits;
+	vmaddr = __gmap_translate(gmap, gaddr);
+	if (!list_empty(&gmap->children) && (bits & _SEGMENT_ENTRY_GMAP_VSIE)
+	    && (*table & _SEGMENT_ENTRY_PROTECT)) {
+		spin_lock(&gmap->shadow_lock);
+		list_for_each_entry_safe(sg, next,
+					 &gmap->children, list)
+			gmap_shadow_notify_pmd(sg, vmaddr, gaddr);
+		spin_unlock(&gmap->shadow_lock);
+	}
+	if (bits & _SEGMENT_ENTRY_GMAP_IN)
+		gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
 }
 
 /**
@@ -2431,22 +2644,31 @@ static void pmdp_notify_gmap(struct gmap *gmap, unsigned long gaddr)
  */
 void pmdp_notify(struct mm_struct *mm, unsigned long vmaddr)
 {
-	unsigned long *table, gaddr;
-	struct gmap *gmap;
+	unsigned long *table, gaddr, bits;
+	struct gmap *gmap, *sg, *next;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
 		spin_lock(&gmap->guest_table_lock);
 		table = radix_tree_lookup(&gmap->host_to_guest,
 					  vmaddr >> PMD_SHIFT);
-		if (!table || !(*table & _SEGMENT_ENTRY_GMAP_IN)) {
+		if (!table) {
 			spin_unlock(&gmap->guest_table_lock);
 			continue;
 		}
+		bits = *table & (_SEGMENT_ENTRY_GMAP_IN | _SEGMENT_ENTRY_GMAP_VSIE);
+		*table ^= bits;
 		gaddr = __gmap_segment_gaddr(table);
-		*table &= ~_SEGMENT_ENTRY_GMAP_IN;
 		spin_unlock(&gmap->guest_table_lock);
-		gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
+		if (!list_empty(&gmap->children) && (bits & _SEGMENT_ENTRY_GMAP_VSIE)) {
+			spin_lock(&gmap->shadow_lock);
+			list_for_each_entry_safe(sg, next,
+						 &gmap->children, list)
+				gmap_shadow_notify_pmd(sg, vmaddr, gaddr);
+			spin_unlock(&gmap->shadow_lock);
+		}
+		if (bits & _SEGMENT_ENTRY_GMAP_IN)
+			gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
 	}
 	rcu_read_unlock();
 }
-- 
2.7.4

  parent reply	other threads:[~2017-12-13 12:53 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-13 12:53 [RFC/PATCH v2 00/22] KVM/s390: Hugetlbfs enablement Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 01/22] s390/mm: make gmap_protect_range more modular Janosch Frank
2018-01-22 11:33   ` David Hildenbrand
2018-01-22 12:31     ` Janosch Frank
2018-01-22 12:50       ` David Hildenbrand
2018-01-22 13:02         ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 02/22] s390/mm: Abstract gmap notify bit setting Janosch Frank
2018-01-22 11:34   ` David Hildenbrand
2017-12-13 12:53 ` [RFC/PATCH v2 03/22] s390/mm: add gmap PMD invalidation notification Janosch Frank
2017-12-21  9:24   ` Janosch Frank
2018-01-22 11:46   ` David Hildenbrand
2018-01-22 13:13     ` Janosch Frank
2018-01-22 13:29       ` David Hildenbrand
2018-01-22 14:04         ` Janosch Frank
2018-01-22 11:56   ` David Hildenbrand
2018-01-22 12:09     ` Janosch Frank
2018-01-22 12:12       ` David Hildenbrand
2017-12-13 12:53 ` [RFC/PATCH v2 04/22] s390/mm: Add gmap pmd invalidation and clearing Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 05/22] s390/mm: hugetlb pages within a gmap can not be freed Janosch Frank
2018-01-24 13:45   ` David Hildenbrand
2018-01-24 13:56     ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 06/22] s390/mm: Introduce gmap_pmdp_xchg Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 07/22] RFC: s390/mm: Transfer guest pmd protection to host Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 08/22] s390/mm: Add huge page dirty sync support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 09/22] s390/mm: clear huge page storage keys on enable_skey Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 10/22] s390/mm: Add huge pmd storage key handling Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 11/22] s390/mm: Remove superfluous parameter Janosch Frank
2017-12-21  9:22   ` Janosch Frank
2018-01-16 12:39     ` Janosch Frank
2018-01-16 13:11   ` David Hildenbrand
2018-01-22 13:14   ` Christian Borntraeger
2018-01-22 13:24     ` Martin Schwidefsky
2017-12-13 12:53 ` [RFC/PATCH v2 12/22] s390/mm: Add gmap_protect_large read protection support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 13/22] s390/mm: Make gmap_read_table EDAT1 compatible Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 14/22] s390/mm: Make protect_rmap " Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 15/22] s390/mm: GMAP read table extensions Janosch Frank
2017-12-13 12:53 ` Janosch Frank [this message]
2017-12-13 12:53 ` [RFC/PATCH v2 17/22] s390/mm: Add VSIE reverse fake case Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 18/22] s390/mm: Remove gmap_pte_op_walk Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 19/22] s390/mm: Split huge pages if granular protection is needed Janosch Frank
2018-01-25  7:16   ` Janosch Frank
2018-01-25 14:39     ` David Hildenbrand
2018-01-25 14:55       ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 20/22] s390/mm: Enable gmap huge pmd support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 21/22] KVM: s390: Add KVM HPAGE capability Janosch Frank
2017-12-20 13:02   ` Cornelia Huck
2017-12-20 13:17     ` Janosch Frank
2017-12-20 13:21       ` Cornelia Huck
2017-12-13 12:53 ` [RFC/PATCH v2 22/22] RFC: s390/mm: Add gmap lock classes Janosch Frank
2017-12-20 12:24   ` Christian Borntraeger
2017-12-20 12:36     ` Janosch Frank
2017-12-20 12:23 ` [RFC/PATCH v2 00/22] KVM/s390: Hugetlbfs enablement Christian Borntraeger
2017-12-21 12:00   ` David Hildenbrand
2017-12-22  9:08     ` Christian Borntraeger
2018-01-02  0:02       ` Janosch Frank
2018-01-22 11:23 ` David Hildenbrand
2018-01-22 11:56   ` Christian Borntraeger
2018-01-23 21:15 ` David Hildenbrand
2018-01-24  9:01   ` Janosch Frank
2018-01-24  9:14     ` David Hildenbrand
2018-01-25 15:33       ` [PATCH 0/2] Huge page pte protection Janosch Frank
2018-01-25 15:33         ` [PATCH 1/2] mm: s390: Only notify on 4k pages Janosch Frank
2018-01-25 16:04           ` David Hildenbrand
2018-01-26 10:31             ` Janosch Frank
2018-01-25 15:33         ` [PATCH 2/2] mm: s390: Rename gmap_pte_op_fixup Janosch Frank
2018-01-26 10:34       ` [PATCH v2] mm: s390: Only notify on 4k pages Janosch Frank
2018-01-30 10:19         ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1513169613-13509-17-git-send-email-frankja@linux.vnet.ibm.com \
    --to=frankja@linux.vnet.ibm.com \
    --cc=borntraeger@de.ibm.com \
    --cc=david@redhat.com \
    --cc=dominik.dingel@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=schwidefsky@de.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).