From: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
To: Alex Markuze <amarkuze@redhat.com>,
"slava@dubeyko.com" <slava@dubeyko.com>,
David Howells <dhowells@redhat.com>
Cc: "linux-block@vger.kernel.org" <linux-block@vger.kernel.org>,
"idryomov@gmail.com" <idryomov@gmail.com>,
"jlayton@kernel.org" <jlayton@kernel.org>,
"linux-fsdevel@vger.kernel.org" <linux-fsdevel@vger.kernel.org>,
"ceph-devel@vger.kernel.org" <ceph-devel@vger.kernel.org>,
"dongsheng.yang@easystack.cn" <dongsheng.yang@easystack.cn>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: Re: [RFC PATCH 02/35] libceph: Rename alignment to offset
Date: Fri, 14 Mar 2025 19:04:10 +0000 [thread overview]
Message-ID: <f6be2823c1bfd37cb7629feb40a1a579bb9378d6.camel@ibm.com> (raw)
In-Reply-To: <20250313233341.1675324-3-dhowells@redhat.com>
On Thu, 2025-03-13 at 23:32 +0000, David Howells wrote:
> Rename 'alignment' to 'offset' in a number of places where it seems to be
> talking about the offset into the first page of a sequence of pages.
>
Yeah, offset sounds more clear than alignment.
> Signed-off-by: David Howells <dhowells@redhat.com>
> cc: Viacheslav Dubeyko <slava@dubeyko.com>
> cc: Alex Markuze <amarkuze@redhat.com>
> cc: Ilya Dryomov <idryomov@gmail.com>
> cc: ceph-devel@vger.kernel.org
> cc: linux-fsdevel@vger.kernel.org
> ---
> fs/ceph/addr.c | 4 ++--
> include/linux/ceph/messenger.h | 4 ++--
> include/linux/ceph/osd_client.h | 10 +++++-----
> net/ceph/messenger.c | 10 +++++-----
> net/ceph/osd_client.c | 24 ++++++++++++------------
> 5 files changed, 26 insertions(+), 26 deletions(-)
>
> diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
> index 20b6bd8cd004..482a9f41a685 100644
> --- a/fs/ceph/addr.c
> +++ b/fs/ceph/addr.c
> @@ -254,7 +254,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
>
> if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
> ceph_put_page_vector(osd_data->pages,
> - calc_pages_for(osd_data->alignment,
> + calc_pages_for(osd_data->offset,
> osd_data->length), false);
> }
> if (err > 0) {
> @@ -918,7 +918,7 @@ static void writepages_finish(struct ceph_osd_request *req)
> osd_data = osd_req_op_extent_osd_data(req, i);
> BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
> len += osd_data->length;
> - num_pages = calc_pages_for((u64)osd_data->alignment,
> + num_pages = calc_pages_for((u64)osd_data->offset,
> (u64)osd_data->length);
> total_pages += num_pages;
> for (j = 0; j < num_pages; j++) {
> diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
> index 1717cc57cdac..db2aba32b8a0 100644
> --- a/include/linux/ceph/messenger.h
> +++ b/include/linux/ceph/messenger.h
> @@ -221,7 +221,7 @@ struct ceph_msg_data {
> struct {
> struct page **pages;
Do we still operate by pages here? It looks like we need to rework it somehow.
> size_t length; /* total # bytes */
> - unsigned int alignment; /* first page */
> + unsigned int offset; /* first page */
Maybe, we need to change the comment on the "first folio" here?
> bool own_pages;
We are mentioning pages everywhere. :)
> };
> struct ceph_pagelist *pagelist;
> @@ -602,7 +602,7 @@ extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
> unsigned long interval);
>
> void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
> - size_t length, size_t alignment, bool own_pages);
> + size_t length, size_t offset, bool own_pages);
> extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
> struct ceph_pagelist *pagelist);
> #ifdef CONFIG_BLOCK
> diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
> index d55b30057a45..8fc84f389aad 100644
> --- a/include/linux/ceph/osd_client.h
> +++ b/include/linux/ceph/osd_client.h
> @@ -118,7 +118,7 @@ struct ceph_osd_data {
> struct {
> struct page **pages;
Yeah, pages, pages, pages... :)
> u64 length;
> - u32 alignment;
> + u32 offset;
> bool pages_from_pool;
> bool own_pages;
> };
> @@ -469,7 +469,7 @@ struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
> extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
> unsigned int which,
> struct page **pages, u64 length,
> - u32 alignment, bool pages_from_pool,
> + u32 offset, bool pages_from_pool,
> bool own_pages);
>
> extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
> @@ -488,7 +488,7 @@ extern struct ceph_osd_data *osd_req_op_extent_osd_data(
> extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
> unsigned int which,
> struct page **pages, u64 length,
> - u32 alignment, bool pages_from_pool,
> + u32 offset, bool pages_from_pool,
> bool own_pages);
> extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
> unsigned int which,
> @@ -515,7 +515,7 @@ extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
> extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
> unsigned int which,
> struct page **pages, u64 length,
> - u32 alignment, bool pages_from_pool,
> + u32 offset, bool pages_from_pool,
> bool own_pages);
> void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
> unsigned int which,
> @@ -524,7 +524,7 @@ void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
> extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
> unsigned int which,
> struct page **pages, u64 length,
> - u32 alignment, bool pages_from_pool,
> + u32 offset, bool pages_from_pool,
> bool own_pages);
> int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
> const char *class, const char *method);
> diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
> index d1b5705dc0c6..1df4291cc80b 100644
> --- a/net/ceph/messenger.c
> +++ b/net/ceph/messenger.c
> @@ -840,8 +840,8 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
> BUG_ON(!data->length);
>
> cursor->resid = min(length, data->length);
> - page_count = calc_pages_for(data->alignment, (u64)data->length);
> - cursor->page_offset = data->alignment & ~PAGE_MASK;
> + page_count = calc_pages_for(data->offset, (u64)data->length);
> + cursor->page_offset = data->offset & ~PAGE_MASK;
We still have a lot of work converting to folio.
> cursor->page_index = 0;
> BUG_ON(page_count > (int)USHRT_MAX);
> cursor->page_count = (unsigned short)page_count;
> @@ -1873,7 +1873,7 @@ static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
> static void ceph_msg_data_destroy(struct ceph_msg_data *data)
> {
> if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
> - int num_pages = calc_pages_for(data->alignment, data->length);
> + int num_pages = calc_pages_for(data->offset, data->length);
> ceph_release_page_vector(data->pages, num_pages);
> } else if (data->type == CEPH_MSG_DATA_PAGELIST) {
> ceph_pagelist_release(data->pagelist);
> @@ -1881,7 +1881,7 @@ static void ceph_msg_data_destroy(struct ceph_msg_data *data)
> }
>
> void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
> - size_t length, size_t alignment, bool own_pages)
> + size_t length, size_t offset, bool own_pages)
I assume a sequence "size_t offset, size_t length" looks more logical here. But
it's not critical at all.
> {
> struct ceph_msg_data *data;
>
> @@ -1892,7 +1892,7 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
> data->type = CEPH_MSG_DATA_PAGES;
> data->pages = pages;
> data->length = length;
> - data->alignment = alignment & ~PAGE_MASK;
> + data->offset = offset & ~PAGE_MASK;
> data->own_pages = own_pages;
>
> msg->data_length += length;
> diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
> index b24afec24138..e359e70ad47e 100644
> --- a/net/ceph/osd_client.c
> +++ b/net/ceph/osd_client.c
> @@ -130,13 +130,13 @@ static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
> * Consumes @pages if @own_pages is true.
> */
> static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
> - struct page **pages, u64 length, u32 alignment,
> + struct page **pages, u64 length, u32 offset,
And here too...
> bool pages_from_pool, bool own_pages)
> {
> osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
> osd_data->pages = pages;
> osd_data->length = length;
> - osd_data->alignment = alignment;
> + osd_data->offset = offset;
> osd_data->pages_from_pool = pages_from_pool;
> osd_data->own_pages = own_pages;
> }
> @@ -196,26 +196,26 @@ EXPORT_SYMBOL(osd_req_op_extent_osd_data);
>
> void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
> unsigned int which, struct page **pages,
> - u64 length, u32 alignment,
> + u64 length, u32 offset,
Interesting... We have length of 64 bits but offset is only 32 bits. I assume
that length is in bytes, but offset is in pages. But still this difference in
types looks slightly strange.
> bool pages_from_pool, bool own_pages)
> {
> struct ceph_osd_data *osd_data;
>
> osd_data = osd_req_op_raw_data_in(osd_req, which);
> - ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> + ceph_osd_data_pages_init(osd_data, pages, length, offset,
> pages_from_pool, own_pages);
> }
> EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
>
> void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
> unsigned int which, struct page **pages,
> - u64 length, u32 alignment,
> + u64 length, u32 offset,
The same strange thing here...
> bool pages_from_pool, bool own_pages)
> {
> struct ceph_osd_data *osd_data;
>
> osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
> - ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> + ceph_osd_data_pages_init(osd_data, pages, length, offset,
> pages_from_pool, own_pages);
> }
> EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
> @@ -312,12 +312,12 @@ EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
>
> void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
> unsigned int which, struct page **pages, u64 length,
> - u32 alignment, bool pages_from_pool, bool own_pages)
> + u32 offset, bool pages_from_pool, bool own_pages)
> {
> struct ceph_osd_data *osd_data;
>
> osd_data = osd_req_op_data(osd_req, which, cls, request_data);
> - ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> + ceph_osd_data_pages_init(osd_data, pages, length, offset,
> pages_from_pool, own_pages);
> osd_req->r_ops[which].cls.indata_len += length;
> osd_req->r_ops[which].indata_len += length;
> @@ -344,12 +344,12 @@ EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
>
> void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
> unsigned int which, struct page **pages, u64 length,
> - u32 alignment, bool pages_from_pool, bool own_pages)
> + u32 offset, bool pages_from_pool, bool own_pages)
> {
> struct ceph_osd_data *osd_data;
>
> osd_data = osd_req_op_data(osd_req, which, cls, response_data);
> - ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> + ceph_osd_data_pages_init(osd_data, pages, length, offset,
> pages_from_pool, own_pages);
> }
> EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
> @@ -382,7 +382,7 @@ static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
> if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
> int num_pages;
>
> - num_pages = calc_pages_for((u64)osd_data->alignment,
> + num_pages = calc_pages_for((u64)osd_data->offset,
> (u64)osd_data->length);
As far as I can see, length is already u64, but offset is u32. Why do we not
have u64 for both fields? Then we don't need in (u64)osd_data->length/offset
here.
> ceph_release_page_vector(osd_data->pages, num_pages);
> } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
> @@ -969,7 +969,7 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
> BUG_ON(length > (u64) SIZE_MAX);
> if (length)
> ceph_msg_data_add_pages(msg, osd_data->pages,
> - length, osd_data->alignment, false);
> + length, osd_data->offset, false);
> } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
> BUG_ON(!length);
> ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
>
>
Thanks,
Slava.
next prev parent reply other threads:[~2025-03-14 19:04 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-13 23:32 [RFC PATCH 00/35] ceph, rbd, netfs: Make ceph fully use netfslib David Howells
2025-03-13 23:32 ` [RFC PATCH 01/35] ceph: Fix incorrect flush end position calculation David Howells
2025-03-13 23:32 ` [RFC PATCH 02/35] libceph: Rename alignment to offset David Howells
2025-03-14 19:04 ` Viacheslav Dubeyko [this message]
2025-03-14 20:01 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 03/35] libceph: Add a new data container type, ceph_databuf David Howells
2025-03-14 20:06 ` Viacheslav Dubeyko
2025-03-17 11:27 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 04/35] ceph: Convert ceph_mds_request::r_pagelist to a databuf David Howells
2025-03-14 22:27 ` slava
2025-03-17 11:52 ` David Howells
2025-03-20 20:34 ` Viacheslav Dubeyko
2025-03-20 22:01 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 05/35] libceph: Add functions to add ceph_databufs to requests David Howells
2025-03-13 23:32 ` [RFC PATCH 06/35] rbd: Use ceph_databuf for rbd_obj_read_sync() David Howells
2025-03-17 19:08 ` Viacheslav Dubeyko
2025-04-11 13:48 ` David Howells
2025-03-13 23:32 ` [RFC PATCH 07/35] libceph: Change ceph_osdc_call()'s reply to a ceph_databuf David Howells
2025-03-17 19:41 ` Viacheslav Dubeyko
2025-03-17 22:12 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 08/35] libceph: Unexport osd_req_op_cls_request_data_pages() David Howells
2025-03-13 23:33 ` [RFC PATCH 09/35] libceph: Remove osd_req_op_cls_response_data_pages() David Howells
2025-03-13 23:33 ` [RFC PATCH 10/35] libceph: Convert notify_id_pages to a ceph_databuf David Howells
2025-03-13 23:33 ` [RFC PATCH 11/35] ceph: Use ceph_databuf in DIO David Howells
2025-03-17 20:03 ` Viacheslav Dubeyko
2025-03-17 22:26 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 12/35] libceph: Bypass the messenger-v1 Tx loop for databuf/iter data blobs David Howells
2025-03-13 23:33 ` [RFC PATCH 13/35] rbd: Switch from using bvec_iter to iov_iter David Howells
2025-03-18 19:38 ` Viacheslav Dubeyko
2025-03-18 22:13 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 14/35] libceph: Remove bvec and bio data container types David Howells
2025-03-13 23:33 ` [RFC PATCH 15/35] libceph: Make osd_req_op_cls_init() use a ceph_databuf and map it David Howells
2025-03-13 23:33 ` [RFC PATCH 16/35] libceph: Convert req_page of ceph_osdc_call() to ceph_databuf David Howells
2025-03-13 23:33 ` [RFC PATCH 17/35] libceph, rbd: Use ceph_databuf encoding start/stop David Howells
2025-03-18 19:59 ` Viacheslav Dubeyko
2025-03-18 22:19 ` David Howells
2025-03-20 21:45 ` Viacheslav Dubeyko
2025-03-13 23:33 ` [RFC PATCH 18/35] libceph, rbd: Convert some page arrays to ceph_databuf David Howells
2025-03-18 20:02 ` Viacheslav Dubeyko
2025-03-18 22:25 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 19/35] libceph, ceph: Convert users of ceph_pagelist " David Howells
2025-03-18 20:09 ` Viacheslav Dubeyko
2025-03-18 22:27 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 20/35] libceph: Remove ceph_pagelist David Howells
2025-03-13 23:33 ` [RFC PATCH 21/35] libceph: Make notify code use ceph_databuf_enc_start/stop David Howells
2025-03-18 20:12 ` Viacheslav Dubeyko
2025-03-18 22:36 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 22/35] libceph, rbd: Convert ceph_osdc_notify() reply to ceph_databuf David Howells
2025-03-19 0:08 ` Viacheslav Dubeyko
2025-03-20 14:44 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 23/35] rbd: Use ceph_databuf_enc_start/stop() David Howells
2025-03-19 0:32 ` Viacheslav Dubeyko
2025-03-20 14:59 ` Why use plain numbers and totals rather than predef'd constants for RPC sizes? David Howells
2025-03-20 21:48 ` Viacheslav Dubeyko
2025-03-13 23:33 ` [RFC PATCH 24/35] ceph: Make ceph_calc_file_object_mapping() return size as size_t David Howells
2025-03-13 23:33 ` [RFC PATCH 25/35] ceph: Wrap POSIX_FADV_WILLNEED to get caps David Howells
2025-03-13 23:33 ` [RFC PATCH 26/35] ceph: Kill ceph_rw_context David Howells
2025-03-13 23:33 ` [RFC PATCH 27/35] netfs: Pass extra write context to write functions David Howells
2025-03-13 23:33 ` [RFC PATCH 28/35] netfs: Adjust group handling David Howells
2025-03-19 18:57 ` Viacheslav Dubeyko
2025-03-20 15:22 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 29/35] netfs: Allow fs-private data to be handed through to request alloc David Howells
2025-03-13 23:33 ` [RFC PATCH 30/35] netfs: Make netfs_page_mkwrite() use folio_mkwrite_check_truncate() David Howells
2025-03-13 23:33 ` [RFC PATCH 31/35] netfs: Fix netfs_unbuffered_read() to return ssize_t rather than int David Howells
2025-03-13 23:33 ` [RFC PATCH 32/35] netfs: Add some more RMW support for ceph David Howells
2025-03-19 19:14 ` Viacheslav Dubeyko
2025-03-20 15:25 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 33/35] ceph: Use netfslib [INCOMPLETE] David Howells
2025-03-19 19:54 ` Viacheslav Dubeyko
2025-03-20 15:38 ` David Howells
2025-03-13 23:33 ` [RFC PATCH 34/35] ceph: Enable multipage folios for ceph files David Howells
2025-03-13 23:33 ` [RFC PATCH 35/35] ceph: Remove old I/O API bits David Howells
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f6be2823c1bfd37cb7629feb40a1a579bb9378d6.camel@ibm.com \
--to=slava.dubeyko@ibm.com \
--cc=amarkuze@redhat.com \
--cc=ceph-devel@vger.kernel.org \
--cc=dhowells@redhat.com \
--cc=dongsheng.yang@easystack.cn \
--cc=idryomov@gmail.com \
--cc=jlayton@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=slava@dubeyko.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).