From: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
To: Alex Markuze <amarkuze@redhat.com>,
"slava@dubeyko.com" <slava@dubeyko.com>,
David Howells <dhowells@redhat.com>
Cc: "linux-block@vger.kernel.org" <linux-block@vger.kernel.org>,
"idryomov@gmail.com" <idryomov@gmail.com>,
"jlayton@kernel.org" <jlayton@kernel.org>,
"linux-fsdevel@vger.kernel.org" <linux-fsdevel@vger.kernel.org>,
"ceph-devel@vger.kernel.org" <ceph-devel@vger.kernel.org>,
"dongsheng.yang@easystack.cn" <dongsheng.yang@easystack.cn>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: Re: [RFC PATCH 22/35] libceph, rbd: Convert ceph_osdc_notify() reply to ceph_databuf
Date: Wed, 19 Mar 2025 00:08:52 +0000 [thread overview]
Message-ID: <b1108a2b01c693430abb4566b1bd644a5985ecf6.camel@ibm.com> (raw)
In-Reply-To: <20250313233341.1675324-23-dhowells@redhat.com>
On Thu, 2025-03-13 at 23:33 +0000, David Howells wrote:
> Convert the reply buffer of ceph_osdc_notify() to ceph_databuf rather than
> an array of pages.
>
> Signed-off-by: David Howells <dhowells@redhat.com>
> cc: Viacheslav Dubeyko <slava@dubeyko.com>
> cc: Alex Markuze <amarkuze@redhat.com>
> cc: Ilya Dryomov <idryomov@gmail.com>
> cc: ceph-devel@vger.kernel.org
> cc: linux-fsdevel@vger.kernel.org
> ---
> drivers/block/rbd.c | 36 +++++++++++++++++----------
> include/linux/ceph/databuf.h | 16 ++++++++++++
> include/linux/ceph/osd_client.h | 7 ++----
> net/ceph/osd_client.c | 44 +++++++++++----------------------
> 4 files changed, 55 insertions(+), 48 deletions(-)
>
> diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
> index eea12c7ab2a0..a2674077edea 100644
> --- a/drivers/block/rbd.c
> +++ b/drivers/block/rbd.c
> @@ -3585,8 +3585,7 @@ static void rbd_unlock(struct rbd_device *rbd_dev)
>
> static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
> enum rbd_notify_op notify_op,
> - struct page ***preply_pages,
> - size_t *preply_len)
> + struct ceph_databuf *reply)
> {
> struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
> struct rbd_client_id cid = rbd_get_cid(rbd_dev);
> @@ -3604,13 +3603,13 @@ static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
>
> return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
> &rbd_dev->header_oloc, buf, buf_size,
> - RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
> + RBD_NOTIFY_TIMEOUT, reply);
> }
>
> static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
> enum rbd_notify_op notify_op)
> {
> - __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
> + __rbd_notify_op_lock(rbd_dev, notify_op, NULL);
> }
>
> static void rbd_notify_acquired_lock(struct work_struct *work)
> @@ -3631,23 +3630,29 @@ static void rbd_notify_released_lock(struct work_struct *work)
>
> static int rbd_request_lock(struct rbd_device *rbd_dev)
> {
> - struct page **reply_pages;
> - size_t reply_len;
> + struct ceph_databuf *reply;
> bool lock_owner_responded = false;
> int ret;
>
> dout("%s rbd_dev %p\n", __func__, rbd_dev);
>
> - ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
> - &reply_pages, &reply_len);
> + /* The actual reply pages will be allocated in the read path and then
> + * pasted in in handle_watch_notify().
> + */
> + reply = ceph_databuf_reply_alloc(0, 0, GFP_KERNEL);
> + if (!reply)
> + return -ENOMEM;
> +
> + ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK, reply);
> if (ret && ret != -ETIMEDOUT) {
> rbd_warn(rbd_dev, "failed to request lock: %d", ret);
> goto out;
> }
>
> - if (reply_len > 0 && reply_len <= PAGE_SIZE) {
> - void *p = page_address(reply_pages[0]);
> - void *const end = p + reply_len;
> + if (ceph_databuf_len(reply) > 0 && ceph_databuf_len(reply) <= PAGE_SIZE) {
> + void *s = kmap_ceph_databuf_page(reply, 0);
Maybe, start instead of s?
> + void *p = s;
> + void *const end = p + ceph_databuf_len(reply);
> u32 n;
>
> ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
> @@ -3659,10 +3664,12 @@ static int rbd_request_lock(struct rbd_device *rbd_dev)
> p += 8 + 8; /* skip gid and cookie */
>
> ceph_decode_32_safe(&p, end, len, e_inval);
> - if (!len)
> + if (!len) {
> continue;
> + }
>
> if (lock_owner_responded) {
> + kunmap_local(s);
> rbd_warn(rbd_dev,
> "duplicate lock owners detected");
> ret = -EIO;
> @@ -3673,6 +3680,7 @@ static int rbd_request_lock(struct rbd_device *rbd_dev)
> ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
> &struct_v, &len);
> if (ret) {
> + kunmap_local(s);
Is it possible to have kunmap_local() only in one place and to use goto of
jumping there?
> rbd_warn(rbd_dev,
> "failed to decode ResponseMessage: %d",
> ret);
> @@ -3681,6 +3689,8 @@ static int rbd_request_lock(struct rbd_device *rbd_dev)
>
> ret = ceph_decode_32(&p);
> }
> +
> + kunmap_local(s);
> }
>
> if (!lock_owner_responded) {
> @@ -3689,7 +3699,7 @@ static int rbd_request_lock(struct rbd_device *rbd_dev)
> }
>
> out:
> - ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
> + ceph_databuf_release(reply);
> return ret;
>
> e_inval:
> diff --git a/include/linux/ceph/databuf.h b/include/linux/ceph/databuf.h
> index 54b76d0c91a0..25154b3d08fa 100644
> --- a/include/linux/ceph/databuf.h
> +++ b/include/linux/ceph/databuf.h
> @@ -150,4 +150,20 @@ static inline bool ceph_databuf_is_all_zero(struct ceph_databuf *dbuf, size_t co
> ceph_databuf_scan_for_nonzero) == count;
> }
>
> +static inline void ceph_databuf_transfer(struct ceph_databuf *to,
> + struct ceph_databuf *from)
> +{
> + BUG_ON(to->nr_bvec || to->bvec);
> + to->bvec = from->bvec;
> + to->nr_bvec = from->nr_bvec;
> + to->max_bvec = from->max_bvec;
> + to->limit = from->limit;
> + to->iter = from->iter;
> +
> + from->bvec = NULL;
> + from->nr_bvec = from->max_bvec = 0;
> + from->limit = 0;
> + iov_iter_discard(&from->iter, ITER_DEST, 0);
> +}
> +
> #endif /* __FS_CEPH_DATABUF_H */
> diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
> index 5a1ee66ca216..7eff589711cc 100644
> --- a/include/linux/ceph/osd_client.h
> +++ b/include/linux/ceph/osd_client.h
> @@ -333,9 +333,7 @@ struct ceph_osd_linger_request {
>
> struct ceph_databuf *request_pl;
> struct ceph_databuf *notify_id_buf;
> -
> - struct page ***preply_pages;
Really!!! We had pointer on pointer on pointer... :) Damn, I never saw something
like this.
> - size_t *preply_len;
> + struct ceph_databuf *reply;
> };
>
> struct ceph_watch_item {
> @@ -589,8 +587,7 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
> void *payload,
> u32 payload_len,
> u32 timeout,
> - struct page ***preply_pages,
> - size_t *preply_len);
> + struct ceph_databuf *reply);
> int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
> struct ceph_object_id *oid,
> struct ceph_object_locator *oloc,
> diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
> index 1a0cb2cdcc52..92aaa5ed9145 100644
> --- a/net/ceph/osd_client.c
> +++ b/net/ceph/osd_client.c
> @@ -4523,17 +4523,11 @@ static void handle_watch_notify(struct ceph_osd_client *osdc,
> dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
> lreq->notify_id, notify_id);
> } else if (!completion_done(&lreq->notify_finish_wait)) {
> - struct ceph_msg_data *data =
> - msg->num_data_items ? &msg->data[0] : NULL;
> -
> - if (data) {
> - if (lreq->preply_pages) {
> - WARN_ON(data->type !=
> - CEPH_MSG_DATA_PAGES);
> - *lreq->preply_pages = data->pages;
> - *lreq->preply_len = data->length;
> - data->own_pages = false;
> - }
> + if (msg->num_data_items && lreq->reply) {
> + struct ceph_msg_data *data = &msg->data[0];
This low-level access slightly worry me. I don't see any real problem here. But,
maybe, we need to hide this access into some iterator-like function? However, it
could be not feasible for the scope of this patchset.
Thanks,
Slava.
> +
> + WARN_ON(data->type != CEPH_MSG_DATA_DATABUF);
> + ceph_databuf_transfer(lreq->reply, data->dbuf);
> }
> lreq->notify_finish_error = return_code;
> complete_all(&lreq->notify_finish_wait);
> @@ -4823,10 +4817,7 @@ EXPORT_SYMBOL(ceph_osdc_notify_ack);
> /*
> * @timeout: in seconds
> *
> - * @preply_{pages,len} are initialized both on success and error.
> - * The caller is responsible for:
> - *
> - * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
> + * @reply should be an empty ceph_databuf.
> */
> int ceph_osdc_notify(struct ceph_osd_client *osdc,
> struct ceph_object_id *oid,
> @@ -4834,8 +4825,7 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
> void *payload,
> u32 payload_len,
> u32 timeout,
> - struct page ***preply_pages,
> - size_t *preply_len)
> + struct ceph_databuf *reply)
> {
> struct ceph_osd_linger_request *lreq;
> void *p;
> @@ -4845,10 +4835,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
> return -EIO;
>
> WARN_ON(!timeout);
> - if (preply_pages) {
> - *preply_pages = NULL;
> - *preply_len = 0;
> - }
>
> lreq = linger_alloc(osdc);
> if (!lreq)
> @@ -4875,8 +4861,7 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
> goto out_put_lreq;
> }
>
> - lreq->preply_pages = preply_pages;
> - lreq->preply_len = preply_len;
> + lreq->reply = reply;
>
> ceph_oid_copy(&lreq->t.base_oid, oid);
> ceph_oloc_copy(&lreq->t.base_oloc, oloc);
> @@ -5383,7 +5368,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
> return m;
> }
>
> -static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
> +static struct ceph_msg *alloc_msg_with_data_buffer(struct ceph_msg_header *hdr)
> {
> struct ceph_msg *m;
> int type = le16_to_cpu(hdr->type);
> @@ -5395,16 +5380,15 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
> return NULL;
>
> if (data_len) {
> - struct page **pages;
> + struct ceph_databuf *dbuf;
>
> - pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
> - GFP_NOIO);
> - if (IS_ERR(pages)) {
> + dbuf = ceph_databuf_reply_alloc(0, data_len, GFP_NOIO);
> + if (!dbuf) {
> ceph_msg_put(m);
> return NULL;
> }
>
> - ceph_msg_data_add_pages(m, pages, data_len, 0, true);
> + ceph_msg_data_add_databuf(m, dbuf);
> }
>
> return m;
> @@ -5422,7 +5406,7 @@ static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
> case CEPH_MSG_OSD_MAP:
> case CEPH_MSG_OSD_BACKOFF:
> case CEPH_MSG_WATCH_NOTIFY:
> - return alloc_msg_with_page_vector(hdr);
> + return alloc_msg_with_data_buffer(hdr);
> case CEPH_MSG_OSD_OPREPLY:
> return get_reply(con, hdr, skip);
> default:
>
>
next prev parent reply other threads:[~2025-03-19 0:09 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-13 23:32 [RFC PATCH 00/35] ceph, rbd, netfs: Make ceph fully use netfslib David Howells
2025-03-13 23:32 ` [RFC PATCH 01/35] ceph: Fix incorrect flush end position calculation David Howells
2025-03-13 23:32 ` [RFC PATCH 02/35] libceph: Rename alignment to offset David Howells
2025-03-14 19:04 ` Viacheslav Dubeyko
2025-03-14 20:01 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 03/35] libceph: Add a new data container type, ceph_databuf David Howells
2025-03-14 20:06 ` Viacheslav Dubeyko
2025-03-17 11:27 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 04/35] ceph: Convert ceph_mds_request::r_pagelist to a databuf David Howells
2025-03-14 22:27 ` slava
2025-03-17 11:52 ` David Howells
2025-03-20 20:34 ` Viacheslav Dubeyko
2025-03-20 22:01 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 05/35] libceph: Add functions to add ceph_databufs to requests David Howells
2025-03-13 23:32 ` [RFC PATCH 06/35] rbd: Use ceph_databuf for rbd_obj_read_sync() David Howells
2025-03-17 19:08 ` Viacheslav Dubeyko
2025-04-11 13:48 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 07/35] libceph: Change ceph_osdc_call()'s reply to a ceph_databuf David Howells
2025-03-17 19:41 ` Viacheslav Dubeyko
2025-03-17 22:12 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 08/35] libceph: Unexport osd_req_op_cls_request_data_pages() David Howells
2025-03-13 23:33 ` [RFC PATCH 09/35] libceph: Remove osd_req_op_cls_response_data_pages() David Howells
2025-03-13 23:33 ` [RFC PATCH 10/35] libceph: Convert notify_id_pages to a ceph_databuf David Howells
2025-03-13 23:33 ` [RFC PATCH 11/35] ceph: Use ceph_databuf in DIO David Howells
2025-03-17 20:03 ` Viacheslav Dubeyko
2025-03-17 22:26 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 12/35] libceph: Bypass the messenger-v1 Tx loop for databuf/iter data blobs David Howells
2025-03-13 23:33 ` [RFC PATCH 13/35] rbd: Switch from using bvec_iter to iov_iter David Howells
2025-03-18 19:38 ` Viacheslav Dubeyko
2025-03-18 22:13 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 14/35] libceph: Remove bvec and bio data container types David Howells
2025-03-13 23:33 ` [RFC PATCH 15/35] libceph: Make osd_req_op_cls_init() use a ceph_databuf and map it David Howells
2025-03-13 23:33 ` [RFC PATCH 16/35] libceph: Convert req_page of ceph_osdc_call() to ceph_databuf David Howells
2025-03-13 23:33 ` [RFC PATCH 17/35] libceph, rbd: Use ceph_databuf encoding start/stop David Howells
2025-03-18 19:59 ` Viacheslav Dubeyko
2025-03-18 22:19 ` David Howells
2025-03-20 21:45 ` Viacheslav Dubeyko
2025-03-13 23:33 ` [RFC PATCH 18/35] libceph, rbd: Convert some page arrays to ceph_databuf David Howells
2025-03-18 20:02 ` Viacheslav Dubeyko
2025-03-18 22:25 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 19/35] libceph, ceph: Convert users of ceph_pagelist " David Howells
2025-03-18 20:09 ` Viacheslav Dubeyko
2025-03-18 22:27 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 20/35] libceph: Remove ceph_pagelist David Howells
2025-03-13 23:33 ` [RFC PATCH 21/35] libceph: Make notify code use ceph_databuf_enc_start/stop David Howells
2025-03-18 20:12 ` Viacheslav Dubeyko
2025-03-18 22:36 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 22/35] libceph, rbd: Convert ceph_osdc_notify() reply to ceph_databuf David Howells
2025-03-19 0:08 ` Viacheslav Dubeyko [this message]
2025-03-20 14:44 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 23/35] rbd: Use ceph_databuf_enc_start/stop() David Howells
2025-03-19 0:32 ` Viacheslav Dubeyko
2025-03-20 14:59 ` Why use plain numbers and totals rather than predef'd constants for RPC sizes? David Howells
2025-03-20 21:48 ` Viacheslav Dubeyko
2025-03-13 23:33 ` [RFC PATCH 24/35] ceph: Make ceph_calc_file_object_mapping() return size as size_t David Howells
2025-03-13 23:33 ` [RFC PATCH 25/35] ceph: Wrap POSIX_FADV_WILLNEED to get caps David Howells
2025-03-13 23:33 ` [RFC PATCH 26/35] ceph: Kill ceph_rw_context David Howells
2025-03-13 23:33 ` [RFC PATCH 27/35] netfs: Pass extra write context to write functions David Howells
2025-03-13 23:33 ` [RFC PATCH 28/35] netfs: Adjust group handling David Howells
2025-03-19 18:57 ` Viacheslav Dubeyko
2025-03-20 15:22 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 29/35] netfs: Allow fs-private data to be handed through to request alloc David Howells
2025-03-13 23:33 ` [RFC PATCH 30/35] netfs: Make netfs_page_mkwrite() use folio_mkwrite_check_truncate() David Howells
2025-03-13 23:33 ` [RFC PATCH 31/35] netfs: Fix netfs_unbuffered_read() to return ssize_t rather than int David Howells
2025-03-13 23:33 ` [RFC PATCH 32/35] netfs: Add some more RMW support for ceph David Howells
2025-03-19 19:14 ` Viacheslav Dubeyko
2025-03-20 15:25 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 33/35] ceph: Use netfslib [INCOMPLETE] David Howells
2025-03-19 19:54 ` Viacheslav Dubeyko
2025-03-20 15:38 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 34/35] ceph: Enable multipage folios for ceph files David Howells
2025-03-13 23:33 ` [RFC PATCH 35/35] ceph: Remove old I/O API bits David Howells
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b1108a2b01c693430abb4566b1bd644a5985ecf6.camel@ibm.com \
--to=slava.dubeyko@ibm.com \
--cc=amarkuze@redhat.com \
--cc=ceph-devel@vger.kernel.org \
--cc=dhowells@redhat.com \
--cc=dongsheng.yang@easystack.cn \
--cc=idryomov@gmail.com \
--cc=jlayton@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=slava@dubeyko.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox