From: Baokun Li <libaokun@huaweicloud.com>
To: Jingbo Xu <jefflexu@linux.alibaba.com>,
netfs@lists.linux.dev, dhowells@redhat.com, jlayton@kernel.org
Cc: hsiangkao@linux.alibaba.com, zhujia.zj@bytedance.com,
linux-erofs@lists.ozlabs.org, linux-fsdevel@vger.kernel.org,
linux-kernel@vger.kernel.org, yangerkun@huawei.com,
houtao1@huawei.com, yukuai3@huawei.com, wozizhi@huawei.com,
Baokun Li <libaokun1@huawei.com>,
libaokun@huaweicloud.com
Subject: Re: [PATCH v2 09/12] cachefiles: defer exposing anon_fd until after copy_to_user() succeeds
Date: Mon, 20 May 2024 19:36:47 +0800 [thread overview]
Message-ID: <a9e39b5f-4397-056e-7f6c-b1a1847429dd@huaweicloud.com> (raw)
In-Reply-To: <db7ae78c-857b-45ba-94dc-63c02757e0b2@linux.alibaba.com>
On 2024/5/20 17:39, Jingbo Xu wrote:
>
> On 5/15/24 4:45 PM, libaokun@huaweicloud.com wrote:
>> From: Baokun Li <libaokun1@huawei.com>
>>
>> After installing the anonymous fd, we can now see it in userland and close
>> it. However, at this point we may not have gotten the reference count of
>> the cache, but we will put it during colse fd, so this may cause a cache
>> UAF.
>>
>> So grab the cache reference count before fd_install(). In addition, by
>> kernel convention, fd is taken over by the user land after fd_install(),
>> and the kernel should not call close_fd() after that, i.e., it should call
>> fd_install() after everything is ready, thus fd_install() is called after
>> copy_to_user() succeeds.
>>
>> Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie")
>> Suggested-by: Hou Tao <houtao1@huawei.com>
>> Signed-off-by: Baokun Li <libaokun1@huawei.com>
>> ---
>> fs/cachefiles/ondemand.c | 53 +++++++++++++++++++++++++---------------
>> 1 file changed, 33 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
>> index d2d4e27fca6f..3a36613e00a7 100644
>> --- a/fs/cachefiles/ondemand.c
>> +++ b/fs/cachefiles/ondemand.c
>> @@ -4,6 +4,11 @@
>> #include <linux/uio.h>
>> #include "internal.h"
>>
>> +struct anon_file {
>> + struct file *file;
>> + int fd;
>> +};
>> +
>> static inline void cachefiles_req_put(struct cachefiles_req *req)
>> {
>> if (refcount_dec_and_test(&req->ref))
>> @@ -263,14 +268,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
>> return 0;
>> }
>>
>
>> -static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
>> +static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
>> + struct anon_file *anon_file)
>
> How about:
>
> int cachefiles_ondemand_get_fd(struct cachefiles_req *req, int *fd,
> struct file *file) ?
>
> It isn't worth introducing a new structure as it is used only for
> parameter passing.
>
It's just a different code style preference, and internally we think
it makes the code look clearer when encapsulated this way.
>> {
>> struct cachefiles_object *object;
>> struct cachefiles_cache *cache;
>> struct cachefiles_open *load;
>> - struct file *file;
>> u32 object_id;
>> - int ret, fd;
>> + int ret;
>>
>> object = cachefiles_grab_object(req->object,
>> cachefiles_obj_get_ondemand_fd);
>> @@ -282,16 +287,16 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
>> if (ret < 0)
>> goto err;
>>
>> - fd = get_unused_fd_flags(O_WRONLY);
>> - if (fd < 0) {
>> - ret = fd;
>> + anon_file->fd = get_unused_fd_flags(O_WRONLY);
>> + if (anon_file->fd < 0) {
>> + ret = anon_file->fd;
>> goto err_free_id;
>> }
>>
>> - file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
>> - object, O_WRONLY);
>> - if (IS_ERR(file)) {
>> - ret = PTR_ERR(file);
>> + anon_file->file = anon_inode_getfile("[cachefiles]",
>> + &cachefiles_ondemand_fd_fops, object, O_WRONLY);
>> + if (IS_ERR(anon_file->file)) {
>> + ret = PTR_ERR(anon_file->file);
>> goto err_put_fd;
>> }
>>
>> @@ -299,16 +304,15 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
>> if (object->ondemand->ondemand_id > 0) {
>> spin_unlock(&object->ondemand->lock);
>> /* Pair with check in cachefiles_ondemand_fd_release(). */
>> - file->private_data = NULL;
>> + anon_file->file->private_data = NULL;
>> ret = -EEXIST;
>> goto err_put_file;
>> }
>>
>> - file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
>> - fd_install(fd, file);
>> + anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
>>
>> load = (void *)req->msg.data;
>> - load->fd = fd;
>> + load->fd = anon_file->fd;
>> object->ondemand->ondemand_id = object_id;
>> spin_unlock(&object->ondemand->lock);
>>
>> @@ -317,9 +321,11 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
>> return 0;
>>
>> err_put_file:
>> - fput(file);
>> + fput(anon_file->file);
>> + anon_file->file = NULL;
> When cachefiles_ondemand_get_fd() returns failure, anon_file->file is
> not used, and thus I don't think it is worth resetting anon_file->file
> to NULL. Or we could assign fd and struct file at the very end when all
> succeed.
Nulling pointers that are no longer in use is a safer coding convention,
which goes some way to avoiding double free or use-after-free.
Moreover it's in the error branch, so it doesn't cost anything.
>> err_put_fd:
>> - put_unused_fd(fd);
>> + put_unused_fd(anon_file->fd);
>> + anon_file->fd = ret;
> Ditto.
>
>> err_free_id:
>> xa_erase(&cache->ondemand_ids, object_id);
>> err:
>> @@ -376,6 +382,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>> struct cachefiles_msg *msg;
>> size_t n;
>> int ret = 0;
>> + struct anon_file anon_file;
>> XA_STATE(xas, &cache->reqs, cache->req_id_next);
>>
>> xa_lock(&cache->reqs);
>> @@ -409,7 +416,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>> xa_unlock(&cache->reqs);
>>
>> if (msg->opcode == CACHEFILES_OP_OPEN) {
>> - ret = cachefiles_ondemand_get_fd(req);
>> + ret = cachefiles_ondemand_get_fd(req, &anon_file);
>> if (ret)
>> goto out;
>> }
>> @@ -417,10 +424,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>> msg->msg_id = xas.xa_index;
>> msg->object_id = req->object->ondemand->ondemand_id;
>>
>> - if (copy_to_user(_buffer, msg, n) != 0) {
>> + if (copy_to_user(_buffer, msg, n) != 0)
>> ret = -EFAULT;
>> - if (msg->opcode == CACHEFILES_OP_OPEN)
>> - close_fd(((struct cachefiles_open *)msg->data)->fd);
>> +
>> + if (msg->opcode == CACHEFILES_OP_OPEN) {
>> + if (ret < 0) {
>> + fput(anon_file.file);
>> + put_unused_fd(anon_file.fd);
>> + goto out;
>> + }
>> + fd_install(anon_file.fd, anon_file.file);
>> }
>> out:
>> cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
--
With Best Regards,
Baokun Li
next prev parent reply other threads:[~2024-05-20 11:36 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-15 8:45 [PATCH v2 00/12] cachefiles: some bugfixes and cleanups for ondemand requests libaokun
2024-05-15 8:45 ` [PATCH v2 01/12] cachefiles: remove request from xarry during flush requests libaokun
2024-05-20 2:20 ` Gao Xiang
2024-05-20 4:11 ` Baokun Li
2024-05-20 7:09 ` Jingbo Xu
2024-05-15 8:45 ` [PATCH v2 02/12] cachefiles: remove err_put_fd tag in cachefiles_ondemand_daemon_read() libaokun
2024-05-20 2:23 ` Gao Xiang
2024-05-20 4:15 ` Baokun Li
2024-05-15 8:45 ` [PATCH v2 03/12] cachefiles: fix slab-use-after-free in cachefiles_ondemand_get_fd() libaokun
2024-05-20 7:24 ` Jingbo Xu
2024-05-20 8:38 ` Baokun Li
2024-05-20 8:45 ` Gao Xiang
2024-05-20 9:10 ` Jingbo Xu
2024-05-20 9:19 ` Baokun Li
2024-05-20 12:22 ` Baokun Li
2024-05-20 8:06 ` Jingbo Xu
2024-05-20 9:10 ` Baokun Li
2024-05-15 8:45 ` [PATCH v2 04/12] cachefiles: fix slab-use-after-free in cachefiles_ondemand_daemon_read() libaokun
2024-05-20 7:36 ` Jingbo Xu
2024-05-20 8:56 ` Baokun Li
2024-05-15 8:45 ` [PATCH v2 05/12] cachefiles: add output string to cachefiles_obj_[get|put]_ondemand_fd libaokun
2024-05-20 7:40 ` Jingbo Xu
2024-05-20 9:02 ` Baokun Li
2024-05-15 8:45 ` [PATCH v2 06/12] cachefiles: add consistency check for copen/cread libaokun
2024-05-15 8:45 ` [PATCH v2 07/12] cachefiles: add spin_lock for cachefiles_ondemand_info libaokun
2024-05-15 8:45 ` [PATCH v2 08/12] cachefiles: never get a new anonymous fd if ondemand_id is valid libaokun
2024-05-20 8:43 ` Jingbo Xu
2024-05-20 9:07 ` Baokun Li
2024-05-20 9:24 ` Jingbo Xu
2024-05-20 11:14 ` Baokun Li
2024-05-20 11:24 ` Gao Xiang
2024-05-15 8:45 ` [PATCH v2 09/12] cachefiles: defer exposing anon_fd until after copy_to_user() succeeds libaokun
2024-05-20 9:39 ` Jingbo Xu
2024-05-20 11:36 ` Baokun Li [this message]
2024-05-15 8:45 ` [PATCH v2 10/12] cachefiles: Set object to close if ondemand_id < 0 in copen libaokun
2024-05-15 8:46 ` [PATCH v2 11/12] cachefiles: flush all requests after setting CACHEFILES_DEAD libaokun
2024-05-15 8:46 ` [PATCH v2 12/12] cachefiles: make on-demand read killable libaokun
2024-05-19 10:56 ` [PATCH v2 00/12] cachefiles: some bugfixes and cleanups for ondemand requests Jeff Layton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a9e39b5f-4397-056e-7f6c-b1a1847429dd@huaweicloud.com \
--to=libaokun@huaweicloud.com \
--cc=dhowells@redhat.com \
--cc=houtao1@huawei.com \
--cc=hsiangkao@linux.alibaba.com \
--cc=jefflexu@linux.alibaba.com \
--cc=jlayton@kernel.org \
--cc=libaokun1@huawei.com \
--cc=linux-erofs@lists.ozlabs.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netfs@lists.linux.dev \
--cc=wozizhi@huawei.com \
--cc=yangerkun@huawei.com \
--cc=yukuai3@huawei.com \
--cc=zhujia.zj@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).