From: Henrique Carvalho <henrique.carvalho@suse.com>
To: nspmangalore@gmail.com
Cc: smfrench@gmail.com, bharathsm.hsk@gmail.com, ematsumiya@suse.de,
pc@manguebit.com, paul@darkrain42.org, ronniesahlberg@gmail.com,
linux-cifs@vger.kernel.org,
Shyam Prasad N <sprasad@microsoft.com>
Subject: Re: [PATCH 1/5] cifs: protect cfid accesses with fid_lock
Date: Fri, 2 May 2025 09:37:34 -0300 [thread overview]
Message-ID: <aBS8jg4bcmh6EdwT@precision> (raw)
In-Reply-To: <20250502051517.10449-1-sprasad@microsoft.com>
Hi Shyam,
On Fri, May 02, 2025 at 05:13:40AM +0000, nspmangalore@gmail.com wrote:
> From: Shyam Prasad N <sprasad@microsoft.com>
>
> There are several accesses to cfid structure today without
> locking fid_lock. This can lead to race conditions that are
> hard to debug.
>
> With this change, I'm trying to make sure that accesses to cfid
> struct members happen with fid_lock held.
>
> Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
> ---
> fs/smb/client/cached_dir.c | 87 ++++++++++++++++++++++----------------
> 1 file changed, 50 insertions(+), 37 deletions(-)
>
> diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
> index fe738623cf1b..f074675fa6be 100644
> --- a/fs/smb/client/cached_dir.c
> +++ b/fs/smb/client/cached_dir.c
> @@ -31,6 +31,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
>
> spin_lock(&cfids->cfid_list_lock);
> list_for_each_entry(cfid, &cfids->entries, entry) {
> + spin_lock(&cfid->fid_lock);
> if (!strcmp(cfid->path, path)) {
> /*
> * If it doesn't have a lease it is either not yet
> @@ -38,13 +39,16 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
> * being deleted due to a lease break.
> */
> if (!cfid->time || !cfid->has_lease) {
> + spin_unlock(&cfid->fid_lock);
> spin_unlock(&cfids->cfid_list_lock);
> return NULL;
> }
> kref_get(&cfid->refcount);
> + spin_unlock(&cfid->fid_lock);
> spin_unlock(&cfids->cfid_list_lock);
> return cfid;
> }
> + spin_unlock(&cfid->fid_lock);
> }
> if (lookup_only) {
> spin_unlock(&cfids->cfid_list_lock);
> @@ -192,19 +196,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
> kfree(utf16_path);
> return -ENOENT;
> }
> +
> /*
> * Return cached fid if it is valid (has a lease and has a time).
> * Otherwise, it is either a new entry or laundromat worker removed it
> * from @cfids->entries. Caller will put last reference if the latter.
> */
> - spin_lock(&cfids->cfid_list_lock);
> + spin_lock(&cfid->fid_lock);
> if (cfid->has_lease && cfid->time) {
> - spin_unlock(&cfids->cfid_list_lock);
> + spin_unlock(&cfid->fid_lock);
> *ret_cfid = cfid;
> kfree(utf16_path);
> return 0;
> }
> - spin_unlock(&cfids->cfid_list_lock);
> + spin_unlock(&cfid->fid_lock);
>
> /*
> * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
> @@ -219,17 +224,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
> goto out;
> }
>
> - if (!npath[0]) {
> - dentry = dget(cifs_sb->root);
> - } else {
> - dentry = path_to_dentry(cifs_sb, npath);
> - if (IS_ERR(dentry)) {
> - rc = -ENOENT;
> - goto out;
> - }
> - }
> - cfid->dentry = dentry;
> - cfid->tcon = tcon;
>
> /*
> * We do not hold the lock for the open because in case
> @@ -301,9 +295,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
> }
> goto oshr_free;
> }
> - cfid->is_open = true;
> -
> - spin_lock(&cfids->cfid_list_lock);
>
> o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
> oparms.fid->persistent_fid = o_rsp->PersistentFileId;
> @@ -314,7 +305,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
>
>
> if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
> - spin_unlock(&cfids->cfid_list_lock);
> rc = -EINVAL;
> goto oshr_free;
> }
> @@ -323,21 +313,15 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
> &oparms.fid->epoch,
> oparms.fid->lease_key,
> &oplock, NULL, NULL);
> - if (rc) {
> - spin_unlock(&cfids->cfid_list_lock);
> + if (rc)
> goto oshr_free;
> - }
>
> rc = -EINVAL;
> - if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
> - spin_unlock(&cfids->cfid_list_lock);
> + if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
> goto oshr_free;
> - }
> qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
> - if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
> - spin_unlock(&cfids->cfid_list_lock);
> + if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
> goto oshr_free;
> - }
> if (!smb2_validate_and_copy_iov(
> le16_to_cpu(qi_rsp->OutputBufferOffset),
> sizeof(struct smb2_file_all_info),
> @@ -345,10 +329,26 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
> (char *)&cfid->file_all_info))
> cfid->file_all_info_is_valid = true;
>
> - cfid->time = jiffies;
> - spin_unlock(&cfids->cfid_list_lock);
> /* At this point the directory handle is fully cached */
> rc = 0;
> + spin_lock(&cfid->fid_lock);
> + if (!cfid->dentry) {
> + if (!npath[0]) {
> + dentry = dget(cifs_sb->root);
> + } else {
> + dentry = path_to_dentry(cifs_sb, npath);
> + if (IS_ERR(dentry)) {
> + spin_unlock(&cfid->fid_lock);
> + rc = -ENOENT;
> + goto out;
> + }
> + }
> + cfid->dentry = dentry;
> + }
> + cfid->tcon = tcon;
> + cfid->is_open = true;
> + cfid->time = jiffies;
> + spin_unlock(&cfid->fid_lock);
>
> oshr_free:
> SMB2_open_free(&rqst[0]);
> @@ -363,6 +363,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
> cfid->on_list = false;
> cfids->num_entries--;
> }
> + spin_lock(&cfid->fid_lock);
> if (cfid->has_lease) {
> /*
> * We are guaranteed to have two references at this
> @@ -372,6 +373,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
> cfid->has_lease = false;
> kref_put(&cfid->refcount, smb2_close_cached_fid);
> }
> + spin_unlock(&cfid->fid_lock);
> spin_unlock(&cfids->cfid_list_lock);
>
> kref_put(&cfid->refcount, smb2_close_cached_fid);
> @@ -400,13 +402,16 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
>
> spin_lock(&cfids->cfid_list_lock);
> list_for_each_entry(cfid, &cfids->entries, entry) {
> + spin_lock(&cfid->fid_lock);
> if (dentry && cfid->dentry == dentry) {
> cifs_dbg(FYI, "found a cached file handle by dentry\n");
> kref_get(&cfid->refcount);
> + spin_unlock(&cfid->fid_lock);
> *ret_cfid = cfid;
> spin_unlock(&cfids->cfid_list_lock);
> return 0;
> }
> + spin_unlock(&cfid->fid_lock);
> }
> spin_unlock(&cfids->cfid_list_lock);
> return -ENOENT;
> @@ -427,8 +432,11 @@ smb2_close_cached_fid(struct kref *ref)
> }
> spin_unlock(&cfid->cfids->cfid_list_lock);
>
> - dput(cfid->dentry);
> - cfid->dentry = NULL;
> + /* no locking necessary as we're the last user of this cfid */
> + if (cfid->dentry) {
> + dput(cfid->dentry);
> + cfid->dentry = NULL;
> + }
>
> if (cfid->is_open) {
> rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
> @@ -451,12 +459,13 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
> cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
> return;
> }
> - spin_lock(&cfid->cfids->cfid_list_lock);
> + spin_lock(&cfid->fid_lock);
> if (cfid->has_lease) {
> + /* mark as invalid */
> cfid->has_lease = false;
> kref_put(&cfid->refcount, smb2_close_cached_fid);
> }
> - spin_unlock(&cfid->cfids->cfid_list_lock);
> + spin_unlock(&cfid->fid_lock);
> close_cached_dir(cfid);
> }
>
> @@ -538,6 +547,7 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
> cfids->num_entries--;
> cfid->is_open = false;
> cfid->on_list = false;
> + spin_lock(&cfid->fid_lock);
> if (cfid->has_lease) {
> /*
> * The lease was never cancelled from the server,
> @@ -546,6 +556,7 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
> cfid->has_lease = false;
> } else
> kref_get(&cfid->refcount);
> + spin_unlock(&cfid->fid_lock);
> }
> /*
> * Queue dropping of the dentries once locks have been dropped
> @@ -600,6 +611,7 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
>
> spin_lock(&cfids->cfid_list_lock);
> list_for_each_entry(cfid, &cfids->entries, entry) {
> + spin_lock(&cfid->fid_lock);
> if (cfid->has_lease &&
> !memcmp(lease_key,
> cfid->fid.lease_key,
> @@ -612,6 +624,7 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
> */
> list_del(&cfid->entry);
> cfid->on_list = false;
> + spin_unlock(&cfid->fid_lock);
> cfids->num_entries--;
>
> ++tcon->tc_count;
> @@ -621,6 +634,7 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
> spin_unlock(&cfids->cfid_list_lock);
> return true;
> }
> + spin_unlock(&cfid->fid_lock);
> }
> spin_unlock(&cfids->cfid_list_lock);
> return false;
> @@ -656,9 +670,6 @@ static void free_cached_dir(struct cached_fid *cfid)
> WARN_ON(work_pending(&cfid->close_work));
> WARN_ON(work_pending(&cfid->put_work));
>
> - dput(cfid->dentry);
> - cfid->dentry = NULL;
> -
> /*
> * Delete all cached dirent names
> */
> @@ -703,6 +714,7 @@ static void cfids_laundromat_worker(struct work_struct *work)
>
> spin_lock(&cfids->cfid_list_lock);
> list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
> + spin_lock(&cfid->fid_lock);
> if (cfid->time &&
> time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
> cfid->on_list = false;
> @@ -717,6 +729,7 @@ static void cfids_laundromat_worker(struct work_struct *work)
> } else
> kref_get(&cfid->refcount);
> }
> + spin_unlock(&cfid->fid_lock);
> }
> spin_unlock(&cfids->cfid_list_lock);
>
> @@ -726,7 +739,6 @@ static void cfids_laundromat_worker(struct work_struct *work)
> spin_lock(&cfid->fid_lock);
> dentry = cfid->dentry;
> cfid->dentry = NULL;
> - spin_unlock(&cfid->fid_lock);
>
> dput(dentry);
> if (cfid->is_open) {
> @@ -742,6 +754,7 @@ static void cfids_laundromat_worker(struct work_struct *work)
> * was one) or the extra one acquired.
> */
> kref_put(&cfid->refcount, smb2_close_cached_fid);
> + spin_unlock(&cfid->fid_lock);
> }
> queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
> dir_cache_timeout * HZ);
You are calling dput() here with a lock held, both in path_to_dentry and
in smb2_close_cached_fid. Is this correct?
Also, the lock ordering here is lock(fid_lock) -> lock(cifs_tcp_ses_lock) ->
unlock(cifs_tcp_ses_lock) -> unlock(fid_lock), won't this blow up in
another path?
Thanks,
Henrique
next prev parent reply other threads:[~2025-05-02 12:39 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-02 5:13 [PATCH 1/5] cifs: protect cfid accesses with fid_lock nspmangalore
2025-05-02 5:13 ` [PATCH 2/5] cifs: do not return an invalidated cfid nspmangalore
2025-05-02 5:13 ` [PATCH 3/5] cifs: serialize initialization and cleanup of cfid nspmangalore
2025-05-02 15:10 ` Henrique Carvalho
2025-05-02 15:12 ` Henrique Carvalho
2025-05-02 5:13 ` [PATCH 4/5] cifs: update the lock ordering comments with new mutex nspmangalore
2025-05-02 5:13 ` [PATCH 5/5] cifs: add new field to track the last access time of cfid nspmangalore
2025-05-02 12:37 ` Henrique Carvalho [this message]
2025-05-02 12:40 ` [PATCH 1/5] cifs: protect cfid accesses with fid_lock Henrique Carvalho
2025-05-03 2:54 ` Shyam Prasad N
2025-05-05 0:25 ` Henrique Carvalho
2025-05-05 0:48 ` Steve French
2025-05-10 14:03 ` Shyam Prasad N
2025-05-10 14:04 ` Shyam Prasad N
[not found] ` <CAH2r5mv+CmYtEZ8oGcQQYzwmh0HYgBpaFwLSR3NqtUWxNwTL=Q@mail.gmail.com>
2025-05-02 15:35 ` Enzo Matsumiya
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aBS8jg4bcmh6EdwT@precision \
--to=henrique.carvalho@suse.com \
--cc=bharathsm.hsk@gmail.com \
--cc=ematsumiya@suse.de \
--cc=linux-cifs@vger.kernel.org \
--cc=nspmangalore@gmail.com \
--cc=paul@darkrain42.org \
--cc=pc@manguebit.com \
--cc=ronniesahlberg@gmail.com \
--cc=smfrench@gmail.com \
--cc=sprasad@microsoft.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox