From: David Howells <dhowells@redhat.com>
To: Ilya Dryomov <idryomov@gmail.com>, Xiubo Li <xiubli@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
Jeff Layton <jlayton@kernel.org>,
Dongsheng Yang <dongsheng.yang@easystack.cn>,
ceph-devel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 12/18] ceph: Convert some page arrays to ceph_databuf
Date: Fri, 4 Aug 2023 14:13:21 +0100 [thread overview]
Message-ID: <20230804131327.2574082-13-dhowells@redhat.com> (raw)
In-Reply-To: <20230804131327.2574082-1-dhowells@redhat.com>
---
drivers/block/rbd.c | 12 +++---
include/linux/ceph/osd_client.h | 3 ++
net/ceph/osd_client.c | 74 +++++++++++++++++++++------------
3 files changed, 55 insertions(+), 34 deletions(-)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 1756973b696f..950b63eb41de 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2108,7 +2108,7 @@ static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
{
- struct page **pages;
+ struct ceph_databuf *dbuf;
/*
* The response data for a STAT call consists of:
@@ -2118,14 +2118,12 @@ static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
* le32 tv_nsec;
* } mtime;
*/
- pages = ceph_alloc_page_vector(1, GFP_NOIO);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ dbuf = ceph_databuf_alloc(1, 8 + sizeof(struct ceph_timespec), GFP_NOIO);
+ if (!dbuf)
+ return -ENOMEM;
osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
- osd_req_op_raw_data_in_pages(osd_req, which, pages,
- 8 + sizeof(struct ceph_timespec),
- 0, false, true);
+ osd_req_op_raw_data_in_databuf(osd_req, which, dbuf);
return 0;
}
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index fd91c5d92600..fec78550d5ce 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -483,6 +483,9 @@ extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 offset, bool pages_from_pool,
bool own_pages);
+void osd_req_op_raw_data_in_databuf(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_databuf *databuf);
extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
unsigned int which,
struct ceph_pagelist *pagelist);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6bbd9fe780c3..c83ae9bb335e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -181,6 +181,17 @@ osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data);
+void osd_req_op_raw_data_in_databuf(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_databuf *dbuf)
+{
+ struct ceph_osd_data *osd_data;
+
+ osd_data = osd_req_op_raw_data_in(osd_req, which);
+ ceph_osd_databuf_init(osd_data, dbuf);
+}
+EXPORT_SYMBOL(osd_req_op_raw_data_in_databuf);
+
void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages,
u64 length, u32 offset,
@@ -280,17 +291,16 @@ void osd_req_op_cls_request_data_pagelist(
}
EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
-static void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
- unsigned int which, struct page **pages, u64 length,
- u32 offset, bool pages_from_pool, bool own_pages)
+static void osd_req_op_cls_request_data_iter(
+ struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
- ceph_osd_data_pages_init(osd_data, pages, length, offset,
- pages_from_pool, own_pages);
- osd_req->r_ops[which].cls.indata_len += length;
- osd_req->r_ops[which].indata_len += length;
+ ceph_osd_iter_init(osd_data, iter);
+ osd_req->r_ops[which].cls.indata_len += iter->count;
+ osd_req->r_ops[which].indata_len += iter->count;
}
void osd_req_op_cls_response_databuf(struct ceph_osd_request *osd_req,
@@ -3017,10 +3027,12 @@ static void linger_commit_cb(struct ceph_osd_request *req)
if (!lreq->is_watch) {
struct ceph_osd_data *osd_data =
osd_req_op_data(req, 0, notify, response_data);
- void *p = page_address(osd_data->pages[0]);
+ void *p;
WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
- osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
+ osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST);
+
+ p = kmap_ceph_databuf_page(osd_data->dbuf, 0);
/* make note of the notify_id */
if (req->r_ops[0].outdata_len >= sizeof(u64)) {
@@ -3030,6 +3042,8 @@ static void linger_commit_cb(struct ceph_osd_request *req)
} else {
dout("lreq %p no notify_id\n", lreq);
}
+
+ kunmap_local(p);
}
out:
@@ -5032,7 +5046,7 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
u32 *num_watchers)
{
struct ceph_osd_request *req;
- struct page **pages;
+ struct ceph_databuf *dbuf;
int ret;
req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
@@ -5043,16 +5057,16 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = CEPH_OSD_FLAG_READ;
- pages = ceph_alloc_page_vector(1, GFP_NOIO);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
+ dbuf = ceph_databuf_alloc(1, PAGE_SIZE, GFP_NOIO);
+ if (!dbuf) {
+ ret = -ENOMEM;
goto out_put_req;
}
osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
- ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
- response_data),
- pages, PAGE_SIZE, 0, false, true);
+ ceph_osd_databuf_init(osd_req_op_data(req, 0, list_watchers,
+ response_data),
+ dbuf);
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
@@ -5061,10 +5075,11 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0) {
- void *p = page_address(pages[0]);
+ void *p = kmap_ceph_databuf_page(dbuf, 0);
void *const end = p + req->r_ops[0].outdata_len;
ret = decode_watchers(&p, end, watchers, num_watchers);
+ kunmap(p);
}
out_put_req:
@@ -5111,6 +5126,8 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
struct ceph_databuf *response)
{
struct ceph_osd_request *req;
+ struct iov_iter iter;
+ struct bio_vec bv;
int ret;
if (req_len > PAGE_SIZE)
@@ -5128,9 +5145,11 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
if (ret)
goto out_put_req;
- if (req_page)
- osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
- 0, false, false);
+ if (req_page) {
+ bvec_set_page(&bv, req_page, 0, req_len);
+ iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, req_len);
+ osd_req_op_cls_request_data_iter(req, 0, &iter);
+ }
if (response)
osd_req_op_cls_response_databuf(req, 0, response);
@@ -5285,12 +5304,12 @@ int osd_req_op_copy_from_init(struct ceph_osd_request *req,
u8 copy_from_flags)
{
struct ceph_osd_req_op *op;
- struct page **pages;
+ struct ceph_databuf *dbuf;
void *p, *end;
- pages = ceph_alloc_page_vector(1, GFP_KERNEL);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ dbuf = ceph_databuf_alloc(1, PAGE_SIZE, GFP_KERNEL);
+ if (!dbuf)
+ return -ENOMEM;
op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
dst_fadvise_flags);
@@ -5299,16 +5318,17 @@ int osd_req_op_copy_from_init(struct ceph_osd_request *req,
op->copy_from.flags = copy_from_flags;
op->copy_from.src_fadvise_flags = src_fadvise_flags;
- p = page_address(pages[0]);
+ p = kmap_ceph_databuf_page(dbuf, 0);
end = p + PAGE_SIZE;
ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
encode_oloc(&p, end, src_oloc);
ceph_encode_32(&p, truncate_seq);
ceph_encode_64(&p, truncate_size);
op->indata_len = PAGE_SIZE - (end - p);
+ dbuf->length = op->indata_len;
+ kunmap_local(p);
- ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
- op->indata_len, 0, false, true);
+ ceph_osd_databuf_init(&op->copy_from.osd_data, dbuf);
return 0;
}
EXPORT_SYMBOL(osd_req_op_copy_from_init);
next prev parent reply other threads:[~2023-08-04 13:16 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-04 13:13 [RFC PATCH 00/18] ceph, rbd: Collapse all the I/O types down to something iov_iter-based David Howells
2023-08-04 13:13 ` [RFC PATCH 01/18] iov_iter: Add function to see if buffer is all zeros David Howells
2023-08-04 13:13 ` [RFC PATCH 02/18] ceph: Rename alignment to offset David Howells
2023-08-04 13:13 ` [RFC PATCH 03/18] ceph: Add a new data container type, ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 04/18] ceph: Convert ceph_mds_request::r_pagelist to a databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 05/18] rbd: Use ceph_databuf for rbd_obj_read_sync() David Howells
2023-08-04 13:13 ` [RFC PATCH 06/18] ceph: Change ceph_osdc_call()'s reply to a ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 07/18] ceph: Unexport osd_req_op_cls_request_data_pages() David Howells
2023-08-04 13:13 ` [RFC PATCH 08/18] ceph: Remove osd_req_op_cls_response_data_pages() David Howells
2023-08-04 13:13 ` [RFC PATCH 09/18] ceph: Convert notify_id_pages to a ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 10/18] rbd: Switch from using bvec_iter to iov_iter David Howells
2023-08-04 13:13 ` [RFC PATCH 11/18] ceph: Remove bvec and bio data container types David Howells
2023-08-04 13:13 ` David Howells [this message]
2023-08-04 13:13 ` [RFC PATCH 13/18] ceph: Convert users of ceph_pagelist to ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 14/18] ceph: Remove ceph_pagelist David Howells
2023-08-04 13:13 ` [RFC PATCH 15/18] ceph: Convert ceph_osdc_notify() reply to ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 16/18] ceph: Remove CEPH_OS_DATA_TYPE_PAGES and its attendant helpers David Howells
2023-08-04 13:13 ` [RFC PATCH 17/18] ceph: Remove CEPH_MSG_DATA_PAGES and its helpers David Howells
2023-08-04 13:13 ` [RFC PATCH 18/18] ceph: Don't use data_pages David Howells
2023-08-28 1:32 ` Xiubo Li
2023-08-28 1:30 ` [RFC PATCH 00/18] ceph, rbd: Collapse all the I/O types down to something iov_iter-based Xiubo Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230804131327.2574082-13-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=ceph-devel@vger.kernel.org \
--cc=dongsheng.yang@easystack.cn \
--cc=idryomov@gmail.com \
--cc=jlayton@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=xiubli@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).