linux-s390.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Hildenbrand <david@redhat.com>
To: Janosch Frank <frankja@linux.vnet.ibm.com>, kvm@vger.kernel.org
Cc: schwidefsky@de.ibm.com, borntraeger@de.ibm.com,
	dominik.dingel@gmail.com, linux-s390@vger.kernel.org
Subject: Re: [PATCH 1/2] mm: s390: Only notify on 4k pages
Date: Thu, 25 Jan 2018 17:04:17 +0100	[thread overview]
Message-ID: <26ef13f7-dfd4-de7d-b448-0084df956d27@redhat.com> (raw)
In-Reply-To: <1516894398-12694-2-git-send-email-frankja@linux.vnet.ibm.com>

On 25.01.2018 16:33, Janosch Frank wrote:
> Let's try this
> 
> Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
> ---
>  arch/s390/include/asm/gmap.h |  5 ++-
>  arch/s390/mm/gmap.c          | 72 ++++++++------------------------------------
>  2 files changed, 14 insertions(+), 63 deletions(-)
> 
> diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
> index 6287aca..4120360 100644
> --- a/arch/s390/include/asm/gmap.h
> +++ b/arch/s390/include/asm/gmap.h
> @@ -13,9 +13,8 @@
>  #define GMAP_NOTIFY_SHADOW	0x2
>  #define GMAP_NOTIFY_MPROT	0x1
>  
> -/* Status bits in huge and non-huge gmap segment entries. */
> -#define _SEGMENT_ENTRY_GMAP_IN		0x0001	/* invalidation notify bit */
> -#define _SEGMENT_ENTRY_GMAP_SPLIT	0x0002  /* split huge pmd */
> +/* Status bit in huge and non-huge gmap segment entries. */
> +#define _SEGMENT_ENTRY_GMAP_SPLIT	0x0001  /* split huge pmd */
>  /* Status bits only for huge segment entries */
>  #define _SEGMENT_ENTRY_GMAP_UC		0x4000	/* user dirty (migration) */
>  #define _SEGMENT_ENTRY_GMAP_VSIE	0x8000	/* vsie bit */
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index 10e0690..c47964f 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -998,7 +998,7 @@ static void gmap_pte_transfer_prot(struct mm_struct *mm, unsigned long addr,
>   * and requested access rights are incompatible.
>   */
>  static int gmap_pmdp_force_prot(struct gmap *gmap, unsigned long addr,
> -				pmd_t *pmdp, int prot, unsigned long bits)
> +				pmd_t *pmdp, int prot)
>  {
>  	int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
>  	int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
> @@ -1018,7 +1018,6 @@ static int gmap_pmdp_force_prot(struct gmap *gmap, unsigned long addr,
>  		pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
>  		gmap_pmdp_xchg(gmap, pmdp, new, addr);
>  	}
> -	pmd_val(*pmdp) |=  bits;
>  	return 0;
>  }
>  
> @@ -1136,21 +1135,18 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
>  			    unsigned long vmaddr, pmd_t *pmdp, pmd_t *hpmdp,
>  			    int prot, unsigned long bits)
>  {
> -	unsigned long sbits = 0;
>  	int ret = 0;
>  
> -	sbits |= (bits & GMAP_NOTIFY_MPROT) ? _SEGMENT_ENTRY_GMAP_IN : 0;
> -	sbits |= (bits & GMAP_NOTIFY_SHADOW) ? _SEGMENT_ENTRY_GMAP_VSIE : 0;
> -
> -	if (((prot != PROT_WRITE) && (bits & GMAP_NOTIFY_SHADOW))) {
> +	/* We notify only on the smallest possible frame size, a 4k page. */
> +	if (bits) {
>  		ret = gmap_pmd_split(gmap, gaddr, pmdp);
>  		if (ret)
>  			return ret;
>  		return -EFAULT;
>  	}

See below, I think we should move that to the caller.

Especially, gmap_protect_rmap_pmd() should no longer be needed then. (if
I am not messing things up)

>  
> -	/* Protect gmap pmd */
> -	ret = gmap_pmdp_force_prot(gmap, gaddr, pmdp, prot, sbits);
> +	/* Protect gmap pmd for dirty tracking. */
> +	ret = gmap_pmdp_force_prot(gmap, gaddr, pmdp, prot);
>  	/*
>  	 * Transfer protection back to the host pmd, so userspace has
>  	 * never more access rights than the VM.
> @@ -1167,7 +1163,7 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
>   * @gaddr: virtual address in the guest address space
>   * @len: size of area
>   * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
> - * @bits: pgste notification bits to set
> + * @bits: notification bits to set
>   *
>   * Returns 0 if successfully protected, -ENOMEM if out of memory and
>   * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
> @@ -1196,11 +1192,6 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
>  		pmdp = gmap_pmd_op_walk(gmap, gaddr);
>  		if (pmdp) {
>  			if (!pmd_large(*pmdp)) {
> -				if (gmap_pmd_is_split(pmdp) &&
> -				    (bits & GMAP_NOTIFY_MPROT)) {
> -					pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
> -				}
> -

Actually we can reduce this code here quite a lot by simply checking for

if (pmd_large(*pmdp)) {
	// splitup
	rc = EAGAIN;
}

No need to call gmap_protect_pmd().

I think it makes sense to move the split handling completely out of
gmap_protect_pmd() and only call it at places where we need it.

So only gmap_test_and_clear_dirty_segment() should end up calling it.

We can then also get rid of the "bits" parameter here, which is nice.

>  				rc = gmap_protect_pte(gmap, gaddr, vmaddr,
>  						      pmdp, hpmdp, prot, bits);
>  				if (!rc) {
> @@ -2562,53 +2553,20 @@ static void gmap_shadow_notify_pmd(struct gmap *sg, unsigned long vmaddr,
>  				   unsigned long gaddr)
>  {
>  	struct gmap_rmap *rmap, *rnext, *head;
> -	unsigned long start, end, bits, raddr;
> +	unsigned long bits, raddr;
>  
>  
>  	BUG_ON(!gmap_is_shadow(sg));
>  
>  	spin_lock_nested(&sg->guest_table_lock, GMAP_LOCK_SHADOW);
> -	if (sg->removed) {
> -		spin_unlock(&sg->guest_table_lock);
> -		return;
> -	}
> -	/* Check for top level table */
> -	start = sg->orig_asce & _ASCE_ORIGIN;
> -	end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
> -	if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
> -	    gaddr < ((end & HPAGE_MASK) + HPAGE_SIZE - 1)) {
> -		/* The complete shadow table has to go */
> -		gmap_unshadow(sg);
> -		spin_unlock(&sg->guest_table_lock);
> -		list_del(&sg->list);
> -		gmap_put(sg);
> -		return;
> -	}
> -	/* Remove the page table tree from on specific entry */
>  	head = radix_tree_delete(&sg->host_to_rmap, (vmaddr & HPAGE_MASK) >> PAGE_SHIFT);
>  	gmap_for_each_rmap_safe(rmap, rnext, head) {
>  		bits = rmap->raddr & _SHADOW_RMAP_MASK;
>  		raddr = rmap->raddr ^ bits;
> -		switch (bits) {
> -		case _SHADOW_RMAP_REGION1:
> -			gmap_unshadow_r2t(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_REGION2:
> -			gmap_unshadow_r3t(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_REGION3:
> -			gmap_unshadow_sgt(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_SEGMENT_LP:
> +		if (bits ==  _SHADOW_RMAP_SEGMENT_LP)
>  			gmap_unshadow_segment(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_SEGMENT:
> -			gmap_unshadow_pgt(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_PGTABLE:
> -			gmap_unshadow_page(sg, raddr);
> -			break;
> -		}

Now this looks much better. Do we still need the _SHADOW_RMAP_SEGMENT_LP
check in gmap_shadow_notify() ? don't think so

> +		else
> +			BUG_ON(1);
>  		kfree(rmap);
>  	}
>  	spin_unlock(&sg->guest_table_lock);
> @@ -2777,9 +2735,8 @@ static void pmdp_notify_gmap(struct gmap *gmap, unsigned long gaddr)
>  	table = gmap_table_walk(gmap, gaddr, 1);
>  	if (!table)
>  		return;
> -	bits = *table & _SEGMENT_ENTRY_GMAP_IN;
>  	if (pmd_large(__pmd(*table)) && (*table & _SEGMENT_ENTRY_GMAP_VSIE))
> -		bits |= _SEGMENT_ENTRY_GMAP_VSIE;
> +		bits = _SEGMENT_ENTRY_GMAP_VSIE;
>  	if (!bits)
>  		return;
>  	*table &= ~bits;
> @@ -2792,8 +2749,6 @@ static void pmdp_notify_gmap(struct gmap *gmap, unsigned long gaddr)
>  			gmap_shadow_notify_pmd(sg, vmaddr, gaddr);
>  		spin_unlock(&gmap->shadow_lock);
>  	}
> -	if (bits & _SEGMENT_ENTRY_GMAP_IN)
> -		gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
>  }
>  
>  static void pmdp_notify_split(struct mm_struct *mm, unsigned long vmaddr,
> @@ -2841,9 +2796,8 @@ void pmdp_notify(struct mm_struct *mm, unsigned long vmaddr)
>  			continue;
>  		}
>  
> -		bits = *table & (_SEGMENT_ENTRY_GMAP_IN);
>  		if (pmd_large(__pmd(*table)) && (*table & _SEGMENT_ENTRY_GMAP_VSIE))
> -			bits |= _SEGMENT_ENTRY_GMAP_VSIE;
> +			bits = _SEGMENT_ENTRY_GMAP_VSIE;
>  		*table &= ~bits;
>  		gaddr = __gmap_segment_gaddr(table);
>  		spin_unlock(&gmap->guest_table_lock);
> @@ -2854,8 +2808,6 @@ void pmdp_notify(struct mm_struct *mm, unsigned long vmaddr)
>  				gmap_shadow_notify_pmd(sg, vmaddr, gaddr);
>  			spin_unlock(&gmap->shadow_lock);
>  		}
> -		if (bits & _SEGMENT_ENTRY_GMAP_IN)
> -			gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
>  	}
>  	rcu_read_unlock();
>  }
> 


-- 

Thanks,

David / dhildenb

  reply	other threads:[~2018-01-25 16:04 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-13 12:53 [RFC/PATCH v2 00/22] KVM/s390: Hugetlbfs enablement Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 01/22] s390/mm: make gmap_protect_range more modular Janosch Frank
2018-01-22 11:33   ` David Hildenbrand
2018-01-22 12:31     ` Janosch Frank
2018-01-22 12:50       ` David Hildenbrand
2018-01-22 13:02         ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 02/22] s390/mm: Abstract gmap notify bit setting Janosch Frank
2018-01-22 11:34   ` David Hildenbrand
2017-12-13 12:53 ` [RFC/PATCH v2 03/22] s390/mm: add gmap PMD invalidation notification Janosch Frank
2017-12-21  9:24   ` Janosch Frank
2018-01-22 11:46   ` David Hildenbrand
2018-01-22 13:13     ` Janosch Frank
2018-01-22 13:29       ` David Hildenbrand
2018-01-22 14:04         ` Janosch Frank
2018-01-22 11:56   ` David Hildenbrand
2018-01-22 12:09     ` Janosch Frank
2018-01-22 12:12       ` David Hildenbrand
2017-12-13 12:53 ` [RFC/PATCH v2 04/22] s390/mm: Add gmap pmd invalidation and clearing Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 05/22] s390/mm: hugetlb pages within a gmap can not be freed Janosch Frank
2018-01-24 13:45   ` David Hildenbrand
2018-01-24 13:56     ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 06/22] s390/mm: Introduce gmap_pmdp_xchg Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 07/22] RFC: s390/mm: Transfer guest pmd protection to host Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 08/22] s390/mm: Add huge page dirty sync support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 09/22] s390/mm: clear huge page storage keys on enable_skey Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 10/22] s390/mm: Add huge pmd storage key handling Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 11/22] s390/mm: Remove superfluous parameter Janosch Frank
2017-12-21  9:22   ` Janosch Frank
2018-01-16 12:39     ` Janosch Frank
2018-01-16 13:11   ` David Hildenbrand
2018-01-22 13:14   ` Christian Borntraeger
2018-01-22 13:24     ` Martin Schwidefsky
2017-12-13 12:53 ` [RFC/PATCH v2 12/22] s390/mm: Add gmap_protect_large read protection support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 13/22] s390/mm: Make gmap_read_table EDAT1 compatible Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 14/22] s390/mm: Make protect_rmap " Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 15/22] s390/mm: GMAP read table extensions Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 16/22] s390/mm: Add shadow segment code Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 17/22] s390/mm: Add VSIE reverse fake case Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 18/22] s390/mm: Remove gmap_pte_op_walk Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 19/22] s390/mm: Split huge pages if granular protection is needed Janosch Frank
2018-01-25  7:16   ` Janosch Frank
2018-01-25 14:39     ` David Hildenbrand
2018-01-25 14:55       ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 20/22] s390/mm: Enable gmap huge pmd support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 21/22] KVM: s390: Add KVM HPAGE capability Janosch Frank
2017-12-20 13:02   ` Cornelia Huck
2017-12-20 13:17     ` Janosch Frank
2017-12-20 13:21       ` Cornelia Huck
2017-12-13 12:53 ` [RFC/PATCH v2 22/22] RFC: s390/mm: Add gmap lock classes Janosch Frank
2017-12-20 12:24   ` Christian Borntraeger
2017-12-20 12:36     ` Janosch Frank
2017-12-20 12:23 ` [RFC/PATCH v2 00/22] KVM/s390: Hugetlbfs enablement Christian Borntraeger
2017-12-21 12:00   ` David Hildenbrand
2017-12-22  9:08     ` Christian Borntraeger
2018-01-02  0:02       ` Janosch Frank
2018-01-22 11:23 ` David Hildenbrand
2018-01-22 11:56   ` Christian Borntraeger
2018-01-23 21:15 ` David Hildenbrand
2018-01-24  9:01   ` Janosch Frank
2018-01-24  9:14     ` David Hildenbrand
2018-01-25 15:33       ` [PATCH 0/2] Huge page pte protection Janosch Frank
2018-01-25 15:33         ` [PATCH 1/2] mm: s390: Only notify on 4k pages Janosch Frank
2018-01-25 16:04           ` David Hildenbrand [this message]
2018-01-26 10:31             ` Janosch Frank
2018-01-25 15:33         ` [PATCH 2/2] mm: s390: Rename gmap_pte_op_fixup Janosch Frank
2018-01-26 10:34       ` [PATCH v2] mm: s390: Only notify on 4k pages Janosch Frank
2018-01-30 10:19         ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=26ef13f7-dfd4-de7d-b448-0084df956d27@redhat.com \
    --to=david@redhat.com \
    --cc=borntraeger@de.ibm.com \
    --cc=dominik.dingel@gmail.com \
    --cc=frankja@linux.vnet.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=schwidefsky@de.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).