From: David Howells <dhowells@redhat.com>
To: Ilya Dryomov <idryomov@gmail.com>, Xiubo Li <xiubli@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
Jeff Layton <jlayton@kernel.org>,
Dongsheng Yang <dongsheng.yang@easystack.cn>,
ceph-devel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 17/18] ceph: Remove CEPH_MSG_DATA_PAGES and its helpers
Date: Fri, 4 Aug 2023 14:13:26 +0100 [thread overview]
Message-ID: <20230804131327.2574082-18-dhowells@redhat.com> (raw)
In-Reply-To: <20230804131327.2574082-1-dhowells@redhat.com>
---
include/linux/ceph/messenger.h | 26 ++-------
net/ceph/messenger.c | 98 +---------------------------------
net/ceph/osd_client.c | 2 -
3 files changed, 5 insertions(+), 121 deletions(-)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index a2489e266bff..f48657eef648 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -118,23 +118,14 @@ struct ceph_messenger {
enum ceph_msg_data_type {
CEPH_MSG_DATA_NONE, /* message contains no data payload */
CEPH_MSG_DATA_DATABUF, /* data source/destination is a data buffer */
- CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */
CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
struct ceph_msg_data {
enum ceph_msg_data_type type;
- struct iov_iter iter;
bool release_dbuf;
- union {
- struct ceph_databuf *dbuf;
- struct {
- struct page **pages;
- size_t length; /* total # bytes */
- unsigned int offset; /* first page */
- bool own_pages;
- };
- };
+ struct iov_iter iter;
+ struct ceph_databuf *dbuf;
};
struct ceph_msg_data_cursor {
@@ -144,17 +135,8 @@ struct ceph_msg_data_cursor {
size_t resid; /* bytes not yet consumed */
int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
- union {
- struct { /* pages */
- unsigned int page_offset; /* offset in page */
- unsigned short page_index; /* index in array */
- unsigned short page_count; /* pages in array */
- };
- struct {
- struct iov_iter iov_iter;
- unsigned int lastlen;
- };
- };
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
};
/*
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 5b28c27858b2..acbdd086cd7a 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -710,70 +710,6 @@ void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
}
}
-/*
- * For a page array, a piece comes from the first page in the array
- * that has not already been fully consumed.
- */
-static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
- size_t length)
-{
- struct ceph_msg_data *data = cursor->data;
- int page_count;
-
- BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
-
- BUG_ON(!data->pages);
- BUG_ON(!data->length);
-
- cursor->resid = min(length, data->length);
- page_count = calc_pages_for(data->offset, (u64)data->length);
- cursor->page_offset = data->offset & ~PAGE_MASK;
- cursor->page_index = 0;
- BUG_ON(page_count > (int)USHRT_MAX);
- cursor->page_count = (unsigned short)page_count;
- BUG_ON(length > SIZE_MAX - cursor->page_offset);
-}
-
-static struct page *
-ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
- size_t *page_offset, size_t *length)
-{
- struct ceph_msg_data *data = cursor->data;
-
- BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
-
- BUG_ON(cursor->page_index >= cursor->page_count);
- BUG_ON(cursor->page_offset >= PAGE_SIZE);
-
- *page_offset = cursor->page_offset;
- *length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
- return data->pages[cursor->page_index];
-}
-
-static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
- size_t bytes)
-{
- BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
-
- BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
-
- /* Advance the cursor page offset */
-
- cursor->resid -= bytes;
- cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
- if (!bytes || cursor->page_offset)
- return false; /* more bytes to process in the current page */
-
- if (!cursor->resid)
- return false; /* no more data */
-
- /* Move on to the next page; offset is already at 0 */
-
- BUG_ON(cursor->page_index >= cursor->page_count);
- cursor->page_index++;
- return true;
-}
-
static void ceph_msg_data_iter_cursor_init(struct ceph_msg_data_cursor *cursor,
size_t length)
{
@@ -844,9 +780,6 @@ static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
size_t length = cursor->total_resid;
switch (cursor->data->type) {
- case CEPH_MSG_DATA_PAGES:
- ceph_msg_data_pages_cursor_init(cursor, length);
- break;
case CEPH_MSG_DATA_ITER:
ceph_msg_data_iter_cursor_init(cursor, length);
break;
@@ -883,9 +816,6 @@ struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
struct page *page;
switch (cursor->data->type) {
- case CEPH_MSG_DATA_PAGES:
- page = ceph_msg_data_pages_next(cursor, page_offset, length);
- break;
case CEPH_MSG_DATA_ITER:
page = ceph_msg_data_iter_next(cursor, page_offset, length);
break;
@@ -913,9 +843,6 @@ void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
BUG_ON(bytes > cursor->resid);
switch (cursor->data->type) {
- case CEPH_MSG_DATA_PAGES:
- new_piece = ceph_msg_data_pages_advance(cursor, bytes);
- break;
case CEPH_MSG_DATA_ITER:
new_piece = ceph_msg_data_iter_advance(cursor, bytes);
break;
@@ -1644,12 +1571,8 @@ static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
static void ceph_msg_data_destroy(struct ceph_msg_data *data)
{
- if (data->release_dbuf) {
+ if (data->release_dbuf)
ceph_databuf_release(data->dbuf);
- } else if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
- int num_pages = calc_pages_for(data->offset, data->length);
- ceph_release_page_vector(data->pages, num_pages);
- }
}
void ceph_msg_data_add_databuf(struct ceph_msg *msg, struct ceph_databuf *dbuf)
@@ -1670,25 +1593,6 @@ void ceph_msg_data_add_databuf(struct ceph_msg *msg, struct ceph_databuf *dbuf)
}
EXPORT_SYMBOL(ceph_msg_data_add_databuf);
-void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
- size_t length, size_t offset, bool own_pages)
-{
- struct ceph_msg_data *data;
-
- BUG_ON(!pages);
- BUG_ON(!length);
-
- data = ceph_msg_data_add(msg);
- data->type = CEPH_MSG_DATA_PAGES;
- data->pages = pages;
- data->length = length;
- data->offset = offset & ~PAGE_MASK;
- data->own_pages = own_pages;
-
- msg->data_length += length;
-}
-EXPORT_SYMBOL(ceph_msg_data_add_pages);
-
void ceph_msg_data_add_iter(struct ceph_msg *msg,
struct iov_iter *iter)
{
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 70f81a0b62c0..6fb78ae14f03 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -829,8 +829,6 @@ EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
struct ceph_osd_data *osd_data)
{
- u64 length = ceph_osd_data_length(osd_data);
-
if (osd_data->type == CEPH_OSD_DATA_TYPE_ITER) {
ceph_msg_data_add_iter(msg, &osd_data->iter);
} else {
next prev parent reply other threads:[~2023-08-04 13:17 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-04 13:13 [RFC PATCH 00/18] ceph, rbd: Collapse all the I/O types down to something iov_iter-based David Howells
2023-08-04 13:13 ` [RFC PATCH 01/18] iov_iter: Add function to see if buffer is all zeros David Howells
2023-08-04 13:13 ` [RFC PATCH 02/18] ceph: Rename alignment to offset David Howells
2023-08-04 13:13 ` [RFC PATCH 03/18] ceph: Add a new data container type, ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 04/18] ceph: Convert ceph_mds_request::r_pagelist to a databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 05/18] rbd: Use ceph_databuf for rbd_obj_read_sync() David Howells
2023-08-04 13:13 ` [RFC PATCH 06/18] ceph: Change ceph_osdc_call()'s reply to a ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 07/18] ceph: Unexport osd_req_op_cls_request_data_pages() David Howells
2023-08-04 13:13 ` [RFC PATCH 08/18] ceph: Remove osd_req_op_cls_response_data_pages() David Howells
2023-08-04 13:13 ` [RFC PATCH 09/18] ceph: Convert notify_id_pages to a ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 10/18] rbd: Switch from using bvec_iter to iov_iter David Howells
2023-08-04 13:13 ` [RFC PATCH 11/18] ceph: Remove bvec and bio data container types David Howells
2023-08-04 13:13 ` [RFC PATCH 12/18] ceph: Convert some page arrays to ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 13/18] ceph: Convert users of ceph_pagelist " David Howells
2023-08-04 13:13 ` [RFC PATCH 14/18] ceph: Remove ceph_pagelist David Howells
2023-08-04 13:13 ` [RFC PATCH 15/18] ceph: Convert ceph_osdc_notify() reply to ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 16/18] ceph: Remove CEPH_OS_DATA_TYPE_PAGES and its attendant helpers David Howells
2023-08-04 13:13 ` David Howells [this message]
2023-08-04 13:13 ` [RFC PATCH 18/18] ceph: Don't use data_pages David Howells
2023-08-28 1:32 ` Xiubo Li
2023-08-28 1:30 ` [RFC PATCH 00/18] ceph, rbd: Collapse all the I/O types down to something iov_iter-based Xiubo Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230804131327.2574082-18-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=ceph-devel@vger.kernel.org \
--cc=dongsheng.yang@easystack.cn \
--cc=idryomov@gmail.com \
--cc=jlayton@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=xiubli@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).