AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: John Hubbard <jhubbard@nvidia.com>
To: Jason Gunthorpe <jgg@ziepe.ca>,
	Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: Andrea Arcangeli <aarcange@redhat.com>,
	linux-rdma@vger.kernel.org, amd-gfx@lists.freedesktop.org,
	linux-mm@kvack.org, Jason Gunthorpe <jgg@mellanox.com>,
	dri-devel@lists.freedesktop.org
Subject: Re: [PATCH v2 hmm 06/11] mm/hmm: Hold on to the mmget for the lifetime of the range
Date: Thu, 6 Jun 2019 20:15:00 -0700	[thread overview]
Message-ID: <326c4ed3-5232-4a58-7501-d27f763a9b56@nvidia.com> (raw)
In-Reply-To: <20190606184438.31646-7-jgg@ziepe.ca>

On 6/6/19 11:44 AM, Jason Gunthorpe wrote:
> From: Jason Gunthorpe <jgg@mellanox.com>
> 
> Range functions like hmm_range_snapshot() and hmm_range_fault() call
> find_vma, which requires hodling the mmget() and the mmap_sem for the mm.
> 
> Make this simpler for the callers by holding the mmget() inside the range
> for the lifetime of the range. Other functions that accept a range should
> only be called if the range is registered.
> 
> This has the side effect of directly preventing hmm_release() from
> happening while a range is registered. That means range->dead cannot be
> false during the lifetime of the range, so remove dead and
> hmm_mirror_mm_is_alive() entirely.
> 
> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
> ---
> v2:
>  - Use Jerome's idea of just holding the mmget() for the range lifetime,
>    rework the patch to use that as as simplification to remove dead in
>    one step
> ---
>  include/linux/hmm.h | 26 --------------------------
>  mm/hmm.c            | 28 ++++++++++------------------
>  2 files changed, 10 insertions(+), 44 deletions(-)
> 
> diff --git a/include/linux/hmm.h b/include/linux/hmm.h
> index 2ab35b40992b24..0e20566802967a 100644
> --- a/include/linux/hmm.h
> +++ b/include/linux/hmm.h
> @@ -91,7 +91,6 @@
>   * @mirrors_sem: read/write semaphore protecting the mirrors list
>   * @wq: wait queue for user waiting on a range invalidation
>   * @notifiers: count of active mmu notifiers
> - * @dead: is the mm dead ?
>   */
>  struct hmm {
>  	struct mm_struct	*mm;
> @@ -104,7 +103,6 @@ struct hmm {
>  	wait_queue_head_t	wq;
>  	struct rcu_head		rcu;
>  	long			notifiers;
> -	bool			dead;
>  };
>  
>  /*
> @@ -469,30 +467,6 @@ struct hmm_mirror {
>  int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
>  void hmm_mirror_unregister(struct hmm_mirror *mirror);
>  
> -/*
> - * hmm_mirror_mm_is_alive() - test if mm is still alive
> - * @mirror: the HMM mm mirror for which we want to lock the mmap_sem
> - * Return: false if the mm is dead, true otherwise
> - *
> - * This is an optimization, it will not always accurately return false if the
> - * mm is dead; i.e., there can be false negatives (process is being killed but
> - * HMM is not yet informed of that). It is only intended to be used to optimize
> - * out cases where the driver is about to do something time consuming and it
> - * would be better to skip it if the mm is dead.
> - */
> -static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
> -{
> -	struct mm_struct *mm;
> -
> -	if (!mirror || !mirror->hmm)
> -		return false;
> -	mm = READ_ONCE(mirror->hmm->mm);
> -	if (mirror->hmm->dead || !mm)
> -		return false;
> -
> -	return true;
> -}
> -
>  /*
>   * Please see Documentation/vm/hmm.rst for how to use the range API.
>   */
> diff --git a/mm/hmm.c b/mm/hmm.c
> index dc30edad9a8a02..f67ba32983d9f1 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -80,7 +80,6 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
>  	mutex_init(&hmm->lock);
>  	kref_init(&hmm->kref);
>  	hmm->notifiers = 0;
> -	hmm->dead = false;
>  	hmm->mm = mm;
>  
>  	hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
> @@ -124,20 +123,17 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
>  {
>  	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
>  	struct hmm_mirror *mirror;
> -	struct hmm_range *range;
>  
>  	/* hmm is in progress to free */
>  	if (!kref_get_unless_zero(&hmm->kref))
>  		return;
>  
> -	/* Report this HMM as dying. */
> -	hmm->dead = true;
> -
> -	/* Wake-up everyone waiting on any range. */
>  	mutex_lock(&hmm->lock);
> -	list_for_each_entry(range, &hmm->ranges, list)
> -		range->valid = false;
> -	wake_up_all(&hmm->wq);
> +	/*
> +	 * Since hmm_range_register() holds the mmget() lock hmm_release() is
> +	 * prevented as long as a range exists.
> +	 */
> +	WARN_ON(!list_empty(&hmm->ranges));
>  	mutex_unlock(&hmm->lock);
>  
>  	down_write(&hmm->mirrors_sem);
> @@ -909,8 +905,8 @@ int hmm_range_register(struct hmm_range *range,
>  	range->start = start;
>  	range->end = end;
>  
> -	/* Check if hmm_mm_destroy() was call. */
> -	if (hmm->mm == NULL || hmm->dead)
> +	/* Prevent hmm_release() from running while the range is valid */
> +	if (!mmget_not_zero(hmm->mm))
>  		return -EFAULT;
>  
>  	range->hmm = hmm;
> @@ -955,6 +951,7 @@ void hmm_range_unregister(struct hmm_range *range)
>  
>  	/* Drop reference taken by hmm_range_register() */
>  	range->valid = false;
> +	mmput(hmm->mm);
>  	hmm_put(hmm);
>  	range->hmm = NULL;
>  }
> @@ -982,10 +979,7 @@ long hmm_range_snapshot(struct hmm_range *range)
>  	struct vm_area_struct *vma;
>  	struct mm_walk mm_walk;
>  
> -	/* Check if hmm_mm_destroy() was call. */
> -	if (hmm->mm == NULL || hmm->dead)
> -		return -EFAULT;
> -
> +	lockdep_assert_held(&hmm->mm->mmap_sem);
>  	do {
>  		/* If range is no longer valid force retry. */
>  		if (!range->valid)
> @@ -1080,9 +1074,7 @@ long hmm_range_fault(struct hmm_range *range, bool block)
>  	struct mm_walk mm_walk;
>  	int ret;
>  
> -	/* Check if hmm_mm_destroy() was call. */
> -	if (hmm->mm == NULL || hmm->dead)
> -		return -EFAULT;
> +	lockdep_assert_held(&hmm->mm->mmap_sem);
>  
>  	do {
>  		/* If range is no longer valid force retry. */
> 

Nice cleanup.

    Reviewed-by: John Hubbard <jhubbard@nvidia.com>


thanks,
-- 
John Hubbard
NVIDIA
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  reply	other threads:[~2019-06-07  3:15 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-06 18:44 [PATCH v2 hmm 00/11] Various revisions from a locking/code review Jason Gunthorpe
     [not found] ` <20190606184438.31646-1-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-06 18:44   ` [PATCH v2 hmm 01/11] mm/hmm: fix use after free with struct hmm in the mmu notifiers Jason Gunthorpe
     [not found]     ` <20190606184438.31646-2-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07  2:29       ` John Hubbard
     [not found]         ` <9c72d18d-2924-cb90-ea44-7cd4b10b5bc2-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:34           ` Jason Gunthorpe
     [not found]             ` <20190607123432.GB14802-uk2M96/98Pc@public.gmane.org>
2019-06-07 13:42               ` Jason Gunthorpe
2019-06-08  1:13             ` John Hubbard
2019-06-08  1:37             ` John Hubbard
2019-06-07 18:12       ` Ralph Campbell
2019-06-08  8:49       ` Christoph Hellwig
2019-06-08 11:33         ` Jason Gunthorpe
2019-06-06 18:44   ` [PATCH v2 hmm 02/11] mm/hmm: Use hmm_mirror not mm as an argument for hmm_range_register Jason Gunthorpe
     [not found]     ` <20190606184438.31646-3-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07  2:36       ` John Hubbard
2019-06-07 18:24       ` Ralph Campbell
2019-06-07 22:39         ` Ralph Campbell
     [not found]           ` <e460ddf5-9ed3-7f3b-98ce-526c12fdb8b1-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-10 13:09             ` Jason Gunthorpe
2019-06-08  8:54       ` Christoph Hellwig
     [not found]         ` <20190608085425.GB32185-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2019-06-11 19:44           ` Jason Gunthorpe
     [not found]             ` <20190611194431.GC29375-uk2M96/98Pc@public.gmane.org>
2019-06-12  7:12               ` Christoph Hellwig
     [not found]                 ` <20190612071234.GA20306-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2019-06-12 11:41                   ` Jason Gunthorpe
     [not found]                     ` <20190612114125.GA3876-uk2M96/98Pc@public.gmane.org>
2019-06-12 12:11                       ` Christoph Hellwig
2019-06-07 22:33     ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 03/11] mm/hmm: Hold a mmgrab from hmm to mm Jason Gunthorpe
     [not found]     ` <20190606184438.31646-4-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07  2:44       ` John Hubbard
     [not found]         ` <48fcaa19-6ac3-59d0-cd51-455abeca7cdb-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:36           ` Jason Gunthorpe
2019-06-07 18:41       ` Ralph Campbell
     [not found]         ` <605172dc-5c66-123f-61a3-8e6880678aef-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 18:51           ` Jason Gunthorpe
2019-06-07 22:38     ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 04/11] mm/hmm: Simplify hmm_get_or_create and make it reliable Jason Gunthorpe
2019-06-07  2:54     ` John Hubbard
     [not found]     ` <20190606184438.31646-5-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 18:52       ` Ralph Campbell
2019-06-07 22:44     ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 05/11] mm/hmm: Remove duplicate condition test before wait_event_timeout Jason Gunthorpe
2019-06-07  3:06     ` John Hubbard
     [not found]       ` <86962e22-88b1-c1bf-d704-d5a5053fa100-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:47         ` Jason Gunthorpe
2019-06-07 13:31         ` [PATCH v3 " Jason Gunthorpe
2019-06-07 22:55           ` Ira Weiny
2019-06-08  1:32           ` John Hubbard
     [not found]     ` <20190606184438.31646-6-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 19:01       ` [PATCH v2 " Ralph Campbell
     [not found]         ` <6833be96-12a3-1a1c-1514-c148ba2dd87b-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 19:13           ` Jason Gunthorpe
     [not found]             ` <20190607191302.GR14802-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:21               ` Ralph Campbell
2019-06-07 20:44                 ` Jason Gunthorpe
2019-06-07 22:13                   ` Ralph Campbell
2019-06-08  1:47                     ` Jason Gunthorpe
2019-06-06 18:44   ` [PATCH v2 hmm 06/11] mm/hmm: Hold on to the mmget for the lifetime of the range Jason Gunthorpe
2019-06-07  3:15     ` John Hubbard [this message]
     [not found]     ` <20190606184438.31646-7-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:29       ` Ralph Campbell
2019-06-06 18:44   ` [PATCH v2 hmm 07/11] mm/hmm: Use lockdep instead of comments Jason Gunthorpe
2019-06-07  3:19     ` John Hubbard
     [not found]     ` <20190606184438.31646-8-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:31       ` Ralph Campbell
2019-06-07 22:16     ` Souptick Joarder
2019-06-06 18:44   ` [PATCH v2 hmm 08/11] mm/hmm: Remove racy protection against double-unregistration Jason Gunthorpe
2019-06-07  3:29     ` John Hubbard
     [not found]       ` <88400de9-e1ae-509b-718f-c6b0f726b14c-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 13:57         ` Jason Gunthorpe
     [not found]     ` <20190606184438.31646-9-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:33       ` Ralph Campbell
2019-06-06 18:44   ` [PATCH v2 hmm 09/11] mm/hmm: Poison hmm_range during unregister Jason Gunthorpe
2019-06-07  3:37     ` John Hubbard
     [not found]       ` <c00da0f2-b4b8-813b-0441-a50d4de9d8be-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 14:03         ` Jason Gunthorpe
2019-06-07 20:46     ` Ralph Campbell
2019-06-07 20:49       ` Jason Gunthorpe
2019-06-07 23:01     ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 10/11] mm/hmm: Do not use list*_rcu() for hmm->ranges Jason Gunthorpe
2019-06-07  3:40     ` John Hubbard
2019-06-07 20:49     ` Ralph Campbell
2019-06-07 22:11     ` Souptick Joarder
2019-06-07 23:02     ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 11/11] mm/hmm: Remove confusing comment and logic from hmm_release Jason Gunthorpe
2019-06-07  3:47     ` John Hubbard
     [not found]       ` <3edc47bd-e8f6-0e65-5844-d16901890637-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:58         ` Jason Gunthorpe
2019-06-07 21:37     ` Ralph Campbell
2019-06-08  2:12       ` Jason Gunthorpe
     [not found]       ` <61ea869d-43d2-d1e5-dc00-cf5e3e139169-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-10 16:02         ` Jason Gunthorpe
2019-06-10 22:03           ` Ralph Campbell
2019-06-07 16:05   ` [PATCH v2 12/11] mm/hmm: Fix error flows in hmm_invalidate_range_start Jason Gunthorpe
2019-06-07 23:52     ` Ralph Campbell
2019-06-08  1:35       ` Jason Gunthorpe
2019-06-11 19:48   ` [PATCH v2 hmm 00/11] Various revisions from a locking/code review Jason Gunthorpe
2019-06-12 17:54     ` Kuehling, Felix
     [not found]       ` <5d3b0ae2-3662-cab2-5e6c-82912f32356a-5C7GfCeVMHo@public.gmane.org>
2019-06-12 21:49         ` Yang, Philip
2019-06-13 17:50           ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=326c4ed3-5232-4a58-7501-d27f763a9b56@nvidia.com \
    --to=jhubbard@nvidia.com \
    --cc=Felix.Kuehling@amd.com \
    --cc=aarcange@redhat.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jgg@mellanox.com \
    --cc=jgg@ziepe.ca \
    --cc=jglisse@redhat.com \
    --cc=linux-mm@kvack.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=rcampbell@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox