linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jeff Layton <jlayton@poochiereds.net>
To: Benjamin Coddington <bcodding@redhat.com>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	bfields@fieldses.org
Cc: linux-fsdevel@vger.kernel.org, linux-nfs@vger.kernel.org
Subject: Re: [PATCH 2/3] fs/locks: Set fl_nspid at file_lock allocation
Date: Sat, 27 May 2017 06:00:59 -0400	[thread overview]
Message-ID: <1495879259.2881.4.camel@poochiereds.net> (raw)
In-Reply-To: <b4a55cb32bc843bae76fc10cf7ff9378a2a29822.1495829587.git.bcodding@redhat.com>

On Fri, 2017-05-26 at 16:14 -0400, Benjamin Coddington wrote:
> Since commit c69899a17ca4 "NFSv4: Update of VFS byte range lock must be
> atomic with the stateid update", NFSv4 has been inserting locks in rpciod
> worker context.  The result is that the file_lock's fl_nspid is the
> kworker's pid instead of the original userspace pid.  We can fix that up by
> setting fl_nspid in locks_allocate_lock, and tranfer it to the file_lock
> that's eventually recorded.
> 
> Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
> ---
>  fs/locks.c | 29 ++++++++++++++++++++---------
>  1 file changed, 20 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/locks.c b/fs/locks.c
> index 54aeacf8dc46..270ae50247db 100644
> --- a/fs/locks.c
> +++ b/fs/locks.c
> @@ -249,7 +249,9 @@ locks_dump_ctx_list(struct list_head *list, char *list_type)
>  	struct file_lock *fl;
>  
>  	list_for_each_entry(fl, list, fl_list) {
> -		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
> +		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u fl_nspid=%u\n",
> +			list_type, fl->fl_owner, fl->fl_flags, fl->fl_type,
> +			fl->fl_pid, pid_vnr(fl->fl_nspid));
>  	}
>  }
>  
> @@ -294,8 +296,10 @@ struct file_lock *locks_alloc_lock(void)
>  {
>  	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
>  
> -	if (fl)
> +	if (fl) {
>  		locks_init_lock_heads(fl);
> +		fl->fl_nspid = get_pid(task_tgid(current));
> +	}
>  
>  	return fl;
>  }
> @@ -328,6 +332,8 @@ void locks_free_lock(struct file_lock *fl)
>  	BUG_ON(!hlist_unhashed(&fl->fl_link));
>  
>  	locks_release_private(fl);
> +	if (fl->fl_nspid)
> +		put_pid(fl->fl_nspid);
>  	kmem_cache_free(filelock_cache, fl);
>  }
>  EXPORT_SYMBOL(locks_free_lock);
> @@ -357,8 +363,15 @@ EXPORT_SYMBOL(locks_init_lock);
>   */
>  void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
>  {
> +	struct pid *replace_pid = new->fl_nspid;
> +
>  	new->fl_owner = fl->fl_owner;
>  	new->fl_pid = fl->fl_pid;
> +	if (fl->fl_nspid) {
> +		new->fl_nspid = get_pid(fl->fl_nspid);
> +		if (replace_pid)
> +			put_pid(replace_pid);
> +	}
>  	new->fl_file = NULL;
>  	new->fl_flags = fl->fl_flags;
>  	new->fl_type = fl->fl_type;
> @@ -733,7 +746,6 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
>  static void
>  locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
>  {
> -	fl->fl_nspid = get_pid(task_tgid(current));
>  	list_add_tail(&fl->fl_list, before);
>  	locks_insert_global_locks(fl);
>  }
> @@ -743,10 +755,6 @@ locks_unlink_lock_ctx(struct file_lock *fl)
>  {
>  	locks_delete_global_locks(fl);
>  	list_del_init(&fl->fl_list);
> -	if (fl->fl_nspid) {
> -		put_pid(fl->fl_nspid);
> -		fl->fl_nspid = NULL;
> -	}
>  	locks_wake_up_blocks(fl);
>  }
>  
> @@ -823,8 +831,6 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
>  	list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
>  		if (posix_locks_conflict(fl, cfl)) {
>  			locks_copy_conflock(fl, cfl);
> -			if (cfl->fl_nspid)
> -				fl->fl_pid = pid_vnr(cfl->fl_nspid);
>  			goto out;
>  		}
>  	}
> @@ -2492,6 +2498,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
>  	lock.fl_end = OFFSET_MAX;
>  	lock.fl_owner = owner;
>  	lock.fl_pid = current->tgid;
> +	lock.fl_nspid = get_pid(task_tgid(current));
>  	lock.fl_file = filp;
>  	lock.fl_ops = NULL;
>  	lock.fl_lmops = NULL;
> @@ -2500,6 +2507,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
>  
>  	if (lock.fl_ops && lock.fl_ops->fl_release_private)
>  		lock.fl_ops->fl_release_private(&lock);
> +	put_pid(lock.fl_nspid);
>  	trace_locks_remove_posix(inode, &lock, error);
>  }
>  
> @@ -2522,6 +2530,8 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
>  	if (list_empty(&flctx->flc_flock))
>  		return;
>  
> +	fl.fl_nspid = get_pid(task_tgid(current));
> +
>  	if (filp->f_op->flock && is_remote_lock(filp))
>  		filp->f_op->flock(filp, F_SETLKW, &fl);
>  	else
> @@ -2529,6 +2539,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
>  
>  	if (fl.fl_ops && fl.fl_ops->fl_release_private)
>  		fl.fl_ops->fl_release_private(&fl);
> +	put_pid(fl.fl_nspid);

Maybe we should make a new helper function that does fl_release_private
and put_pid? That can be done in a later cleanup patch though. This
looks fine in the meantime.

>  }
>  
>  /* The i_flctx must be valid when calling into here */

-- 
Jeff Layton <jlayton@poochiereds.net>

  reply	other threads:[~2017-05-27 10:01 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-26 20:14 [PATCH 0/3] Fixups for l_pid Benjamin Coddington
2017-05-26 20:14 ` [PATCH 1/3] fs/locks: Alloc file_lock where practical Benjamin Coddington
2017-05-27  9:56   ` Jeff Layton
2017-05-28  6:35   ` Christoph Hellwig
2017-05-26 20:14 ` [PATCH 2/3] fs/locks: Set fl_nspid at file_lock allocation Benjamin Coddington
2017-05-27 10:00   ` Jeff Layton [this message]
2017-05-26 20:14 ` [PATCH 3/3] fs/locks: Use fs-specific l_pid for remote locks Benjamin Coddington
2017-05-26 20:26 ` [PATCH 0/3] Fixups for l_pid Benjamin Coddington
2017-05-27 10:11 ` Jeff Layton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1495879259.2881.4.camel@poochiereds.net \
    --to=jlayton@poochiereds.net \
    --cc=bcodding@redhat.com \
    --cc=bfields@fieldses.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).