linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Mike Rapoport <rppt@kernel.org>
To: Jason Gunthorpe <jgg@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>,
	John Hubbard <jhubbard@nvidia.com>,
	linux-mm@kvack.org
Subject: Re: [PATCH 1/8] mm/gup: have internal functions get the mmap_read_lock()
Date: Thu, 19 Jan 2023 13:16:23 +0200	[thread overview]
Message-ID: <Y8kmh1wdHrO2O3ZF@kernel.org> (raw)
In-Reply-To: <1-v1-dd94f8f0d5ad+716-gup_tidy_jgg@nvidia.com>

On Tue, Jan 17, 2023 at 11:58:32AM -0400, Jason Gunthorpe wrote:
> __get_user_pages_locked() and __gup_longterm_locked() both require the
> mmap lock to be held. They have a slightly unusual locked parameter that
> is used to allow these functions to unlock and relock the mmap lock and
> convey that fact to the caller.
> 
> Several places wrapper these functions with a simple mmap_read_lock() just

Nit:             ^wrap

> so they can follow the optimized locked protocol.
> 
> Consolidate this internally to the functions. Allow internal callers to
> set locked = 0 to cause the functions to obtain and release the lock on

I'd s/obtain/acquire/g

> their own.
> 
> Reorganize __gup_longterm_locked() to use the autolocking in
> __get_user_pages_locked().
> 
> Replace all the places obtaining the mmap_read_lock() just to call
> __get_user_pages_locked() with the new mechanism. Replace all the internal
> callers of get_user_pages_unlocked() with direct calls to
> __gup_longterm_locked() using the new mechanism.
> 
> A following patch will add assertions ensuring the external interface
> continues to always pass in locked = 1.
> 
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  mm/gup.c | 92 +++++++++++++++++++++++++++++---------------------------
>  1 file changed, 47 insertions(+), 45 deletions(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index f45a3a5be53a48..3a9f764165f50b 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1343,13 +1343,22 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
>  						unsigned int flags)
>  {
>  	long ret, pages_done;
> -	bool lock_dropped;
> +	bool lock_dropped = false;
>  
>  	if (locked) {
>  		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
>  		BUG_ON(vmas);
> -		/* check caller initialized locked */
> -		BUG_ON(*locked != 1);
> +	}
> +
> +	/*
> +	 * The internal caller expects GUP to manage the lock internally and the
> +	 * lock must be released when this returns.
> +	 */
> +	if (locked && !*locked) {
> +		if (mmap_read_lock_killable(mm))
> +			return -EAGAIN;
> +		lock_dropped = true;
> +		*locked = 1;
>  	}
>  
>  	if (flags & FOLL_PIN)
> @@ -1368,7 +1377,6 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
>  		flags |= FOLL_GET;
>  
>  	pages_done = 0;
> -	lock_dropped = false;
>  	for (;;) {
>  		ret = __get_user_pages(mm, start, nr_pages, flags, pages,
>  				       vmas, locked);
> @@ -1659,9 +1667,24 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
>  		unsigned int foll_flags)
>  {
>  	struct vm_area_struct *vma;
> +	bool must_unlock = false;
>  	unsigned long vm_flags;
>  	long i;
>  
> +	if (!nr_pages)
> +		return 0;
> +
> +	/*
> +	 * The internal caller expects GUP to manage the lock internally and the
> +	 * lock must be released when this returns.
> +	 */
> +	if (locked && !*locked) {
> +		if (mmap_read_lock_killable(mm))
> +			return -EAGAIN;
> +		must_unlock = true;
> +		*locked = 1;
> +	}
> +
>  	/* calculate required read or write permissions.
>  	 * If FOLL_FORCE is set, we only require the "MAY" flags.
>  	 */
> @@ -1673,12 +1696,12 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
>  	for (i = 0; i < nr_pages; i++) {
>  		vma = find_vma(mm, start);
>  		if (!vma)
> -			goto finish_or_fault;
> +			break;
>  
>  		/* protect what we can, including chardevs */
>  		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
>  		    !(vm_flags & vma->vm_flags))
> -			goto finish_or_fault;
> +			break;
>  
>  		if (pages) {
>  			pages[i] = virt_to_page((void *)start);
> @@ -1690,9 +1713,11 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
>  		start = (start + PAGE_SIZE) & PAGE_MASK;
>  	}
>  
> -	return i;
> +	if (must_unlock && *locked) {
> +		mmap_read_unlock(mm);
> +		*locked = 0;
> +	}
>  
> -finish_or_fault:
>  	return i ? : -EFAULT;
>  }
>  #endif /* !CONFIG_MMU */
> @@ -1861,17 +1886,13 @@ EXPORT_SYMBOL(fault_in_readable);
>  #ifdef CONFIG_ELF_CORE
>  struct page *get_dump_page(unsigned long addr)
>  {
> -	struct mm_struct *mm = current->mm;
>  	struct page *page;
> -	int locked = 1;
> +	int locked = 0;
>  	int ret;
>  
> -	if (mmap_read_lock_killable(mm))
> -		return NULL;
> -	ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
> +	ret = __get_user_pages_locked(current->mm, addr, 1, &page, NULL,
> +				      &locked,
>  				      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
> -	if (locked)
> -		mmap_read_unlock(mm);
>  	return (ret == 1) ? page : NULL;
>  }
>  #endif /* CONFIG_ELF_CORE */
> @@ -2047,13 +2068,9 @@ static long __gup_longterm_locked(struct mm_struct *mm,
>  				  int *locked,
>  				  unsigned int gup_flags)
>  {
> -	bool must_unlock = false;
>  	unsigned int flags;
>  	long rc, nr_pinned_pages;
>  
> -	if (locked && WARN_ON_ONCE(!*locked))
> -		return -EINVAL;
> -
>  	if (!(gup_flags & FOLL_LONGTERM))
>  		return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
>  					       locked, gup_flags);
> @@ -2070,11 +2087,6 @@ static long __gup_longterm_locked(struct mm_struct *mm,
>  		return -EINVAL;
>  	flags = memalloc_pin_save();
>  	do {
> -		if (locked && !*locked) {
> -			mmap_read_lock(mm);
> -			must_unlock = true;
> -			*locked = 1;
> -		}
>  		nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
>  							  pages, vmas, locked,
>  							  gup_flags);
> @@ -2085,11 +2097,6 @@ static long __gup_longterm_locked(struct mm_struct *mm,
>  		rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
>  	} while (rc == -EAGAIN);
>  	memalloc_pin_restore(flags);
> -
> -	if (locked && *locked && must_unlock) {
> -		mmap_read_unlock(mm);
> -		*locked = 0;
> -	}
>  	return rc ? rc : nr_pinned_pages;
>  }
>  
> @@ -2242,16 +2249,10 @@ EXPORT_SYMBOL(get_user_pages);
>  long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
>  			     struct page **pages, unsigned int gup_flags)
>  {
> -	struct mm_struct *mm = current->mm;
> -	int locked = 1;
> -	long ret;
> +	int locked = 0;
>  
> -	mmap_read_lock(mm);
> -	ret = __gup_longterm_locked(mm, start, nr_pages, pages, NULL, &locked,
> -				    gup_flags | FOLL_TOUCH);
> -	if (locked)
> -		mmap_read_unlock(mm);
> -	return ret;
> +	return __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL,
> +				     &locked, gup_flags | FOLL_TOUCH);
>  }
>  EXPORT_SYMBOL(get_user_pages_unlocked);
>  
> @@ -2904,6 +2905,7 @@ static int internal_get_user_pages_fast(unsigned long start,
>  {
>  	unsigned long len, end;
>  	unsigned long nr_pinned;
> +	int locked = 0;
>  	int ret;
>  
>  	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
> @@ -2932,8 +2934,9 @@ static int internal_get_user_pages_fast(unsigned long start,
>  	/* Slow path: try to get the remaining pages with get_user_pages */
>  	start += nr_pinned << PAGE_SHIFT;
>  	pages += nr_pinned;
> -	ret = get_user_pages_unlocked(start, nr_pages - nr_pinned, pages,
> -				      gup_flags);
> +	ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
> +				    pages, NULL, &locked,
> +				    gup_flags | FOLL_TOUCH);
>  	if (ret < 0) {
>  		/*
>  		 * The caller has to unpin the pages we already pinned so
> @@ -3180,14 +3183,13 @@ EXPORT_SYMBOL(pin_user_pages);
>  long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
>  			     struct page **pages, unsigned int gup_flags)
>  {
> -	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
> -	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
> -		return -EINVAL;
> +	int locked = 0;
>  
>  	if (WARN_ON_ONCE(!pages))
>  		return -EINVAL;
>  
> -	gup_flags |= FOLL_PIN;
> -	return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
> +	gup_flags |= FOLL_PIN | FOLL_TOUCH;
> +	return __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL,
> +				     &locked, gup_flags);
>  }
>  EXPORT_SYMBOL(pin_user_pages_unlocked);
> -- 
> 2.39.0
> 
> 

-- 
Sincerely yours,
Mike.


  reply	other threads:[~2023-01-19 11:16 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-17 15:58 [PATCH 0/8] Simplify the external interface for GUP Jason Gunthorpe
2023-01-17 15:58 ` [PATCH 1/8] mm/gup: have internal functions get the mmap_read_lock() Jason Gunthorpe
2023-01-19 11:16   ` Mike Rapoport [this message]
2023-01-19 21:19   ` John Hubbard
2023-01-19 22:40     ` John Hubbard
2023-01-20 14:47     ` Jason Gunthorpe
2023-01-17 15:58 ` [PATCH 2/8] mm/gup: don't call __gup_longterm_locked() if FOLL_LONGTERM cannot be set Jason Gunthorpe
2023-01-19 22:24   ` John Hubbard
2023-01-17 15:58 ` [PATCH 3/8] mm/gup: simplify the external interface functions and consolidate invariants Jason Gunthorpe
2023-01-19 11:17   ` Mike Rapoport
2023-01-20  2:51   ` John Hubbard
2023-01-20 14:58     ` Jason Gunthorpe
2023-01-20 18:45       ` John Hubbard
2023-01-17 15:58 ` [PATCH 4/8] mm/gup: add an assertion that the mmap lock is locked Jason Gunthorpe
2023-01-20  3:08   ` John Hubbard
2023-01-20 15:44     ` Jason Gunthorpe
2023-01-17 15:58 ` [PATCH 5/8] mm/gup: add FOLL_UNLOCK Jason Gunthorpe
2023-01-19 11:18   ` Mike Rapoport
2023-01-20 13:45     ` Jason Gunthorpe
2023-01-20 14:58       ` Mike Rapoport
2023-01-20 19:02   ` John Hubbard
2023-01-23 11:37   ` David Hildenbrand
2023-01-23 17:58     ` Jason Gunthorpe
2023-01-23 22:22       ` John Hubbard
2023-01-24 13:08       ` David Hildenbrand
2023-01-17 15:58 ` [PATCH 6/8] mm/gup: make locked never NULL in the internal GUP functions Jason Gunthorpe
2023-01-20 19:19   ` John Hubbard
2023-01-21  0:21     ` Jason Gunthorpe
2023-01-23 11:35   ` David Hildenbrand
2023-01-23 12:44     ` Jason Gunthorpe
2023-01-23 12:59       ` David Hildenbrand
2023-01-23 18:07         ` Jason Gunthorpe
2023-01-17 15:58 ` [PATCH 7/8] mm/gup: remove pin_user_pages_fast_only() Jason Gunthorpe
2023-01-20 19:23   ` John Hubbard
2023-01-23 11:31   ` David Hildenbrand
2023-01-17 15:58 ` [PATCH 8/8] mm/gup: make get_user_pages_fast_only() return the common return value Jason Gunthorpe
2023-01-20 19:27   ` John Hubbard
2023-01-23 11:33   ` David Hildenbrand
2023-01-19 11:18 ` [PATCH 0/8] Simplify the external interface for GUP Mike Rapoport

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Y8kmh1wdHrO2O3ZF@kernel.org \
    --to=rppt@kernel.org \
    --cc=apopple@nvidia.com \
    --cc=jgg@nvidia.com \
    --cc=jhubbard@nvidia.com \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).