From: Ming Lei <tom.leiming@gmail.com>
To: Jens Axboe <axboe@kernel.dk>, linux-block@vger.kernel.org
Cc: bpf@vger.kernel.org, Alexei Starovoitov <ast@kernel.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
Yonghong Song <yonghong.song@linux.dev>,
Ming Lei <tom.leiming@gmail.com>
Subject: [RFC PATCH 19/22] ublk: bpf: wire bpf aio with ublk io handling
Date: Tue, 7 Jan 2025 20:04:10 +0800 [thread overview]
Message-ID: <20250107120417.1237392-20-tom.leiming@gmail.com> (raw)
In-Reply-To: <20250107120417.1237392-1-tom.leiming@gmail.com>
Add ublk_bpf_aio_prep_io_buf() and call it before running ublk bpf prog,
so wire everything together.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
drivers/block/ublk/bpf.h | 13 +++++++++
drivers/block/ublk/bpf_ops.c | 51 +++++++++++++++++++++++++++++++++++-
drivers/block/ublk/main.c | 5 ----
drivers/block/ublk/ublk.h | 6 +++++
4 files changed, 69 insertions(+), 6 deletions(-)
diff --git a/drivers/block/ublk/bpf.h b/drivers/block/ublk/bpf.h
index 0ab25743ae7d..a3d238bc707d 100644
--- a/drivers/block/ublk/bpf.h
+++ b/drivers/block/ublk/bpf.h
@@ -99,6 +99,9 @@ static inline void ublk_bpf_io_dec_ref(struct ublk_bpf_io *io)
ubq->bpf_ops->release_io_cmd(io);
}
+ if (test_bit(UBLK_BPF_BVEC_ALLOCATED, &io->flags))
+ kvfree(io->buf.bvec);
+
if (test_bit(UBLK_BPF_IO_COMPLETED, &io->flags)) {
smp_rmb();
__clear_bit(UBLK_BPF_IO_PREP, &io->flags);
@@ -158,6 +161,11 @@ static inline queue_io_cmd_t ublk_get_bpf_any_io_cb(struct ublk_queue *ubq)
return ublk_get_bpf_io_cb_daemon(ubq);
}
+static inline bool ublk_support_bpf_aio(const struct ublk_queue *ubq)
+{
+ return ublk_support_bpf(ubq) && ubq->bpf_aio_ops;
+}
+
int ublk_bpf_init(void);
int ublk_bpf_struct_ops_init(void);
int ublk_bpf_prog_attach(struct bpf_prog_consumer *consumer);
@@ -190,6 +198,11 @@ static inline queue_io_cmd_t ublk_get_bpf_any_io_cb(struct ublk_queue *ubq)
return NULL;
}
+static inline bool ublk_support_bpf_aio(const struct ublk_queue *ubq)
+{
+ return false;
+}
+
static inline int ublk_bpf_init(void)
{
return 0;
diff --git a/drivers/block/ublk/bpf_ops.c b/drivers/block/ublk/bpf_ops.c
index 05d8d415b30d..7085eab5e99b 100644
--- a/drivers/block/ublk/bpf_ops.c
+++ b/drivers/block/ublk/bpf_ops.c
@@ -155,6 +155,49 @@ void ublk_bpf_prog_detach(struct bpf_prog_consumer *consumer)
mutex_unlock(&ublk_bpf_ops_lock);
}
+static int ublk_bpf_aio_prep_io_buf(const struct request *req)
+{
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu((struct request *)req);
+ struct ublk_bpf_io *io = &data->bpf_data;
+ struct req_iterator rq_iter;
+ struct bio_vec *bvec;
+ struct bio_vec bv;
+ unsigned offset;
+
+ io->buf.bvec = NULL;
+ io->buf.nr_bvec = 0;
+
+ if (!ublk_rq_has_data(req))
+ return 0;
+
+ rq_for_each_bvec(bv, req, rq_iter)
+ io->buf.nr_bvec++;
+
+ if (!io->buf.nr_bvec)
+ return 0;
+
+ if (req->bio != req->biotail) {
+ int idx = 0;
+
+ bvec = kvmalloc_array(io->buf.nr_bvec, sizeof(struct bio_vec),
+ GFP_NOIO);
+ if (!bvec)
+ return -ENOMEM;
+
+ offset = 0;
+ rq_for_each_bvec(bv, req, rq_iter)
+ bvec[idx++] = bv;
+ __set_bit(UBLK_BPF_BVEC_ALLOCATED, &io->flags);
+ } else {
+ struct bio *bio = req->bio;
+
+ offset = bio->bi_iter.bi_bvec_done;
+ bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ }
+ io->buf.bvec = bvec;
+ io->buf.bvec_off = offset;
+ return 0;
+}
static void ublk_bpf_prep_io(struct ublk_bpf_io *io,
const struct ublksrv_io_desc *iod)
@@ -180,8 +223,14 @@ bool ublk_run_bpf_handler(struct ublk_queue *ubq, struct request *req,
bool res = true;
int err;
- if (!test_bit(UBLK_BPF_IO_PREP, &bpf_io->flags))
+ if (!test_bit(UBLK_BPF_IO_PREP, &bpf_io->flags)) {
ublk_bpf_prep_io(bpf_io, iod);
+ if (ublk_support_bpf_aio(ubq)) {
+ err = ublk_bpf_aio_prep_io_buf(req);
+ if (err)
+ goto fail;
+ }
+ }
do {
enum ublk_bpf_disposition rc;
diff --git a/drivers/block/ublk/main.c b/drivers/block/ublk/main.c
index 3c2ed9bf924d..1974ebd33ce0 100644
--- a/drivers/block/ublk/main.c
+++ b/drivers/block/ublk/main.c
@@ -512,11 +512,6 @@ void ublk_put_device(struct ublk_device *ub)
put_device(&ub->cdev_dev);
}
-static inline bool ublk_rq_has_data(const struct request *rq)
-{
- return bio_has_data(rq->bio);
-}
-
static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
return ublk_get_queue(ub, q_id)->io_cmd_buf;
diff --git a/drivers/block/ublk/ublk.h b/drivers/block/ublk/ublk.h
index 4bd04512c894..00b09589d95c 100644
--- a/drivers/block/ublk/ublk.h
+++ b/drivers/block/ublk/ublk.h
@@ -41,6 +41,7 @@
enum {
UBLK_BPF_IO_PREP = 0,
UBLK_BPF_IO_COMPLETED = 1,
+ UBLK_BPF_BVEC_ALLOCATED = 2,
};
struct ublk_bpf_io {
@@ -215,6 +216,11 @@ static inline bool ublk_dev_support_bpf_aio(const struct ublk_device *ub)
return ub->params.bpf.flags & UBLK_BPF_HAS_AIO_OPS_ID;
}
+static inline bool ublk_rq_has_data(const struct request *rq)
+{
+ return bio_has_data(rq->bio);
+}
+
struct ublk_device *ublk_get_device(struct ublk_device *ub);
struct ublk_device *ublk_get_device_from_id(int idx);
void ublk_put_device(struct ublk_device *ub);
--
2.47.0
next prev parent reply other threads:[~2025-01-07 12:09 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-07 12:03 [RFC PATCH 00/22] ublk: support bpf Ming Lei
2025-01-07 12:03 ` [RFC PATCH 01/22] ublk: remove two unused fields from 'struct ublk_queue' Ming Lei
2025-01-07 12:03 ` [RFC PATCH 02/22] ublk: convert several bool type fields into bitfield of `ublk_queue` Ming Lei
2025-01-07 12:03 ` [RFC PATCH 03/22] ublk: add helper of ublk_need_map_io() Ming Lei
2025-01-07 12:03 ` [RFC PATCH 04/22] ublk: move ublk into one standalone directory Ming Lei
2025-01-07 12:03 ` [RFC PATCH 05/22] ublk: move private definitions into private header Ming Lei
2025-01-07 12:03 ` [RFC PATCH 06/22] ublk: move several helpers to " Ming Lei
2025-01-07 12:03 ` [RFC PATCH 07/22] ublk: bpf: add bpf prog attach helpers Ming Lei
2025-01-07 12:03 ` [RFC PATCH 08/22] ublk: bpf: add bpf struct_ops Ming Lei
2025-01-10 1:43 ` Alexei Starovoitov
2025-01-13 4:08 ` Ming Lei
2025-01-13 21:30 ` Alexei Starovoitov
2025-01-15 11:58 ` Ming Lei
2025-01-15 20:11 ` Amery Hung
2025-01-07 12:04 ` [RFC PATCH 09/22] ublk: bpf: attach bpf prog to ublk device Ming Lei
2025-01-07 12:04 ` [RFC PATCH 10/22] ublk: bpf: add kfunc for ublk bpf prog Ming Lei
2025-01-07 12:04 ` [RFC PATCH 11/22] ublk: bpf: enable ublk-bpf Ming Lei
2025-01-07 12:04 ` [RFC PATCH 12/22] selftests: ublk: add tests for the ublk-bpf initial implementation Ming Lei
2025-01-07 12:04 ` [RFC PATCH 13/22] selftests: ublk: add tests for covering io split Ming Lei
2025-01-07 12:04 ` [RFC PATCH 14/22] selftests: ublk: add tests for covering redirecting to userspace Ming Lei
2025-01-07 12:04 ` [RFC PATCH 15/22] ublk: bpf: add bpf aio kfunc Ming Lei
2025-01-07 12:04 ` [RFC PATCH 16/22] ublk: bpf: add bpf aio struct_ops Ming Lei
2025-01-07 12:04 ` [RFC PATCH 17/22] ublk: bpf: attach bpf aio prog to ublk device Ming Lei
2025-01-07 12:04 ` [RFC PATCH 18/22] ublk: bpf: add several ublk bpf aio kfuncs Ming Lei
2025-01-07 12:04 ` Ming Lei [this message]
2025-01-07 12:04 ` [RFC PATCH 20/22] selftests: add tests for ublk bpf aio Ming Lei
2025-01-07 12:04 ` [RFC PATCH 21/22] selftests: add tests for covering both bpf aio and split Ming Lei
2025-01-07 12:04 ` [RFC PATCH 22/22] ublk: document ublk-bpf & bpf-aio Ming Lei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250107120417.1237392-20-tom.leiming@gmail.com \
--to=tom.leiming@gmail.com \
--cc=ast@kernel.org \
--cc=axboe@kernel.dk \
--cc=bpf@vger.kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=martin.lau@linux.dev \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).