linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: Ilya Dryomov <idryomov@gmail.com>, Xiubo Li <xiubli@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
	Jeff Layton <jlayton@kernel.org>,
	Dongsheng Yang <dongsheng.yang@easystack.cn>,
	ceph-devel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-block@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 05/18] rbd: Use ceph_databuf for rbd_obj_read_sync()
Date: Fri,  4 Aug 2023 14:13:14 +0100	[thread overview]
Message-ID: <20230804131327.2574082-6-dhowells@redhat.com> (raw)
In-Reply-To: <20230804131327.2574082-1-dhowells@redhat.com>

Supply a ceph_databuf to rbd_obj_read_sync() to convey the data.

Signed-off-by: David Howells <dhowells@redhat.com>
---
 drivers/block/rbd.c | 45 ++++++++++++++++++++-------------------------
 1 file changed, 20 insertions(+), 25 deletions(-)

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 08d0908d0583..2a161b03dd7a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4762,13 +4762,10 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
 			     struct ceph_object_id *oid,
 			     struct ceph_object_locator *oloc,
-			     void *buf, int buf_len)
-
+			     struct ceph_databuf *dbuf, int len)
 {
 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 	struct ceph_osd_request *req;
-	struct page **pages;
-	int num_pages = calc_pages_for(0, buf_len);
 	int ret;
 
 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
@@ -4779,15 +4776,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
 	ceph_oloc_copy(&req->r_base_oloc, oloc);
 	req->r_flags = CEPH_OSD_FLAG_READ;
 
-	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
-	if (IS_ERR(pages)) {
-		ret = PTR_ERR(pages);
-		goto out_req;
-	}
-
-	osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
-	osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
-					 true);
+	osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, len, 0, 0);
+	osd_req_op_extent_osd_databuf(req, 0, dbuf);
 
 	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
 	if (ret)
@@ -4795,9 +4785,6 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
 
 	ceph_osdc_start_request(osdc, req);
 	ret = ceph_osdc_wait_request(osdc, req);
-	if (ret >= 0)
-		ceph_copy_from_page_vector(pages, buf, 0, ret);
-
 out_req:
 	ceph_osdc_put_request(req);
 	return ret;
@@ -4810,12 +4797,18 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
  */
 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
 {
-	struct rbd_image_header_ondisk *ondisk = NULL;
+	struct rbd_image_header_ondisk *ondisk;
+	struct ceph_databuf *dbuf = NULL;
 	u32 snap_count = 0;
 	u64 names_size = 0;
 	u32 want_count;
 	int ret;
 
+	dbuf = ceph_databuf_alloc(1, sizeof(*ondisk), GFP_KERNEL);
+	if (!dbuf)
+		return -ENOMEM;
+	ondisk = kmap_ceph_databuf_page(dbuf, 0);
+
 	/*
 	 * The complete header will include an array of its 64-bit
 	 * snapshot ids, followed by the names of those snapshots as
@@ -4826,17 +4819,18 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
 	do {
 		size_t size;
 
-		kfree(ondisk);
-
 		size = sizeof (*ondisk);
 		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
 		size += names_size;
-		ondisk = kmalloc(size, GFP_KERNEL);
-		if (!ondisk)
-			return -ENOMEM;
+
+		ret = -ENOMEM;
+		if (size > dbuf->limit &&
+		    ceph_databuf_reserve(dbuf, size - dbuf->limit,
+					 GFP_KERNEL) < 0)
+			goto out;
 
 		ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
-					&rbd_dev->header_oloc, ondisk, size);
+					&rbd_dev->header_oloc, dbuf, size);
 		if (ret < 0)
 			goto out;
 		if ((size_t)ret < size) {
@@ -4845,6 +4839,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
 				size, ret);
 			goto out;
 		}
+
 		if (!rbd_dev_ondisk_valid(ondisk)) {
 			ret = -ENXIO;
 			rbd_warn(rbd_dev, "invalid header");
@@ -4858,8 +4853,8 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
 
 	ret = rbd_header_from_disk(rbd_dev, ondisk);
 out:
-	kfree(ondisk);
-
+	kunmap_local(ondisk);
+	ceph_databuf_release(dbuf);
 	return ret;
 }
 


  parent reply	other threads:[~2023-08-04 13:15 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-04 13:13 [RFC PATCH 00/18] ceph, rbd: Collapse all the I/O types down to something iov_iter-based David Howells
2023-08-04 13:13 ` [RFC PATCH 01/18] iov_iter: Add function to see if buffer is all zeros David Howells
2023-08-04 13:13 ` [RFC PATCH 02/18] ceph: Rename alignment to offset David Howells
2023-08-04 13:13 ` [RFC PATCH 03/18] ceph: Add a new data container type, ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 04/18] ceph: Convert ceph_mds_request::r_pagelist to a databuf David Howells
2023-08-04 13:13 ` David Howells [this message]
2023-08-04 13:13 ` [RFC PATCH 06/18] ceph: Change ceph_osdc_call()'s reply to a ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 07/18] ceph: Unexport osd_req_op_cls_request_data_pages() David Howells
2023-08-04 13:13 ` [RFC PATCH 08/18] ceph: Remove osd_req_op_cls_response_data_pages() David Howells
2023-08-04 13:13 ` [RFC PATCH 09/18] ceph: Convert notify_id_pages to a ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 10/18] rbd: Switch from using bvec_iter to iov_iter David Howells
2023-08-04 13:13 ` [RFC PATCH 11/18] ceph: Remove bvec and bio data container types David Howells
2023-08-04 13:13 ` [RFC PATCH 12/18] ceph: Convert some page arrays to ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 13/18] ceph: Convert users of ceph_pagelist " David Howells
2023-08-04 13:13 ` [RFC PATCH 14/18] ceph: Remove ceph_pagelist David Howells
2023-08-04 13:13 ` [RFC PATCH 15/18] ceph: Convert ceph_osdc_notify() reply to ceph_databuf David Howells
2023-08-04 13:13 ` [RFC PATCH 16/18] ceph: Remove CEPH_OS_DATA_TYPE_PAGES and its attendant helpers David Howells
2023-08-04 13:13 ` [RFC PATCH 17/18] ceph: Remove CEPH_MSG_DATA_PAGES and its helpers David Howells
2023-08-04 13:13 ` [RFC PATCH 18/18] ceph: Don't use data_pages David Howells
2023-08-28  1:32   ` Xiubo Li
2023-08-28  1:30 ` [RFC PATCH 00/18] ceph, rbd: Collapse all the I/O types down to something iov_iter-based Xiubo Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230804131327.2574082-6-dhowells@redhat.com \
    --to=dhowells@redhat.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=dongsheng.yang@easystack.cn \
    --cc=idryomov@gmail.com \
    --cc=jlayton@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=xiubli@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).