linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ming Lei <tom.leiming@gmail.com>
To: Jens Axboe <axboe@kernel.dk>, linux-block@vger.kernel.org
Cc: bpf@vger.kernel.org, Alexei Starovoitov <ast@kernel.org>,
	Martin KaFai Lau <martin.lau@linux.dev>,
	Yonghong Song <yonghong.song@linux.dev>,
	Ming Lei <tom.leiming@gmail.com>
Subject: [RFC PATCH 17/22] ublk: bpf: attach bpf aio prog to ublk device
Date: Tue,  7 Jan 2025 20:04:08 +0800	[thread overview]
Message-ID: <20250107120417.1237392-18-tom.leiming@gmail.com> (raw)
In-Reply-To: <20250107120417.1237392-1-tom.leiming@gmail.com>

Attach bpf aio program to ublk device before adding ublk disk, and detach it
after the disk is removed. And when the bpf aio prog is unregistered,
all devices will detach from the prog automatically.

ublk device needs to provide the bpf aio struct_ops ID for attaching the
specific prog, and each ublk device has to attach to only single bpf prog.

So that we can use the attached bpf aio prog to submit bpf aio for handling ublk IO.

Given bpf aio prog is attached to ublk device, ublk bpf prog has to
provide one kfunc to assign 'bpf_aio_complete_ops *' to 'struct bpf_aio'
instance.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 drivers/block/ublk/bpf.c         | 81 +++++++++++++++++++++++++++++++-
 drivers/block/ublk/bpf_aio.c     |  4 ++
 drivers/block/ublk/bpf_aio.h     |  4 ++
 drivers/block/ublk/bpf_aio_ops.c | 22 +++++++++
 drivers/block/ublk/ublk.h        | 10 ++++
 include/uapi/linux/ublk_cmd.h    |  4 +-
 6 files changed, 123 insertions(+), 2 deletions(-)

diff --git a/drivers/block/ublk/bpf.c b/drivers/block/ublk/bpf.c
index d5880d61abe5..921bbbcf4d9e 100644
--- a/drivers/block/ublk/bpf.c
+++ b/drivers/block/ublk/bpf.c
@@ -19,6 +19,79 @@ static int ublk_set_bpf_ops(struct ublk_device *ub,
 	return 0;
 }
 
+static int ublk_set_bpf_aio_op(struct ublk_device *ub,
+		struct bpf_aio_complete_ops *ops)
+{
+	int i;
+
+	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+		if (ops && ublk_get_queue(ub, i)->bpf_aio_ops) {
+			ublk_set_bpf_aio_op(ub, NULL);
+			return -EBUSY;
+		}
+		ublk_get_queue(ub, i)->bpf_aio_ops = ops;
+	}
+	return 0;
+}
+
+static int ublk_bpf_aio_prog_attach_cb(struct bpf_prog_consumer *consumer,
+				       struct bpf_prog_provider *provider)
+{
+	struct ublk_device *ub = container_of(consumer, struct ublk_device,
+					      aio_prog);
+	struct bpf_aio_complete_ops *ops = container_of(provider,
+			struct bpf_aio_complete_ops, provider);
+	int ret = -ENODEV;
+
+	if (ublk_get_device(ub)) {
+		ret = ublk_set_bpf_aio_op(ub, ops);
+		if (ret)
+			ublk_put_device(ub);
+	}
+
+	return ret;
+}
+
+static void ublk_bpf_aio_prog_detach_cb(struct bpf_prog_consumer *consumer,
+					bool unreg)
+{
+	struct ublk_device *ub = container_of(consumer, struct ublk_device,
+					      aio_prog);
+
+	if (unreg) {
+		blk_mq_freeze_queue(ub->ub_disk->queue);
+		ublk_set_bpf_aio_op(ub, NULL);
+		blk_mq_unfreeze_queue(ub->ub_disk->queue);
+	} else {
+		ublk_set_bpf_aio_op(ub, NULL);
+	}
+	ublk_put_device(ub);
+}
+
+static const struct bpf_prog_consumer_ops ublk_aio_prog_consumer_ops = {
+	.attach_fn	= ublk_bpf_aio_prog_attach_cb,
+	.detach_fn	= ublk_bpf_aio_prog_detach_cb,
+};
+
+static int ublk_bpf_aio_attach(struct ublk_device *ub)
+{
+	if (!ublk_dev_support_bpf_aio(ub))
+		return 0;
+
+	ub->aio_prog.prog_id = ub->params.bpf.aio_ops_id;
+	ub->aio_prog.ops = &ublk_aio_prog_consumer_ops;
+
+	return bpf_aio_prog_attach(&ub->aio_prog);
+}
+
+static void ublk_bpf_aio_detach(struct ublk_device *ub)
+{
+	if (!ublk_dev_support_bpf_aio(ub))
+		return;
+	bpf_aio_prog_detach(&ub->aio_prog);
+}
+
+
 static int ublk_bpf_prog_attach_cb(struct bpf_prog_consumer *consumer,
 				   struct bpf_prog_provider *provider)
 {
@@ -76,19 +149,25 @@ static const struct bpf_prog_consumer_ops ublk_prog_consumer_ops = {
 
 int ublk_bpf_attach(struct ublk_device *ub)
 {
+	int ret;
+
 	if (!ublk_dev_support_bpf(ub))
 		return 0;
 
 	ub->prog.prog_id = ub->params.bpf.ops_id;
 	ub->prog.ops = &ublk_prog_consumer_ops;
 
-	return ublk_bpf_prog_attach(&ub->prog);
+	ret = ublk_bpf_prog_attach(&ub->prog);
+	if (ret)
+		return ret;
+	return ublk_bpf_aio_attach(ub);
 }
 
 void ublk_bpf_detach(struct ublk_device *ub)
 {
 	if (!ublk_dev_support_bpf(ub))
 		return;
+	ublk_bpf_aio_detach(ub);
 	ublk_bpf_prog_detach(&ub->prog);
 }
 
diff --git a/drivers/block/ublk/bpf_aio.c b/drivers/block/ublk/bpf_aio.c
index 6e93f28f389b..da050be4b710 100644
--- a/drivers/block/ublk/bpf_aio.c
+++ b/drivers/block/ublk/bpf_aio.c
@@ -213,6 +213,10 @@ __bpf_kfunc int bpf_aio_submit(struct bpf_aio *aio, int fd, loff_t pos,
 {
 	struct file *file;
 
+	/*
+	 * ->ops has to assigned by kfunc of consumer subsystem because
+	 * bpf prog lifetime is aligned with the consumer subsystem
+	 */
 	if (!aio->ops)
 		return -EINVAL;
 
diff --git a/drivers/block/ublk/bpf_aio.h b/drivers/block/ublk/bpf_aio.h
index 07fcd43fd2ac..d144c5e20dcb 100644
--- a/drivers/block/ublk/bpf_aio.h
+++ b/drivers/block/ublk/bpf_aio.h
@@ -75,4 +75,8 @@ struct bpf_aio *bpf_aio_alloc_sleepable(unsigned int op, enum bpf_aio_flag aio_f
 void bpf_aio_release(struct bpf_aio *aio);
 int bpf_aio_submit(struct bpf_aio *aio, int fd, loff_t pos, unsigned bytes,
 		unsigned io_flags);
+
+int bpf_aio_prog_attach(struct bpf_prog_consumer *consumer);
+void bpf_aio_prog_detach(struct bpf_prog_consumer *consumer);
+
 #endif
diff --git a/drivers/block/ublk/bpf_aio_ops.c b/drivers/block/ublk/bpf_aio_ops.c
index 12757f634dbd..04ad45fd24e6 100644
--- a/drivers/block/ublk/bpf_aio_ops.c
+++ b/drivers/block/ublk/bpf_aio_ops.c
@@ -120,6 +120,28 @@ static void bpf_aio_unreg(void *kdata, struct bpf_link *link)
 	kfree(curr);
 }
 
+int bpf_aio_prog_attach(struct bpf_prog_consumer *consumer)
+{
+	unsigned id = consumer->prog_id;
+	struct bpf_aio_complete_ops *ops;
+	int ret = -EINVAL;
+
+	mutex_lock(&bpf_aio_ops_lock);
+	ops = xa_load(&bpf_aio_all_ops, id);
+	if (ops && ops->id == id)
+		ret = bpf_prog_consumer_attach(consumer, &ops->provider);
+	mutex_unlock(&bpf_aio_ops_lock);
+
+	return ret;
+}
+
+void bpf_aio_prog_detach(struct bpf_prog_consumer *consumer)
+{
+	mutex_lock(&bpf_aio_ops_lock);
+	bpf_prog_consumer_detach(consumer, false);
+	mutex_unlock(&bpf_aio_ops_lock);
+}
+
 static void bpf_aio_cb(struct bpf_aio *io, long ret)
 {
 }
diff --git a/drivers/block/ublk/ublk.h b/drivers/block/ublk/ublk.h
index 8343e70bd723..2c33f6a94bf2 100644
--- a/drivers/block/ublk/ublk.h
+++ b/drivers/block/ublk/ublk.h
@@ -126,6 +126,7 @@ struct ublk_queue {
 
 #ifdef CONFIG_UBLK_BPF
 	struct ublk_bpf_ops     *bpf_ops;
+	struct bpf_aio_complete_ops     *bpf_aio_ops;
 #endif
 
 	unsigned short force_abort:1;
@@ -159,6 +160,7 @@ struct ublk_device {
 
 #ifdef CONFIG_UBLK_BPF
 	struct bpf_prog_consumer prog;
+	struct bpf_prog_consumer aio_prog;
 #endif
 	struct mutex		mutex;
 
@@ -203,6 +205,14 @@ static inline bool ublk_dev_support_bpf(const struct ublk_device *ub)
 	return ub->dev_info.flags & UBLK_F_BPF;
 }
 
+static inline bool ublk_dev_support_bpf_aio(const struct ublk_device *ub)
+{
+	if (!ublk_dev_support_bpf(ub))
+		return false;
+
+	return ub->params.bpf.flags & UBLK_BPF_HAS_AIO_OPS_ID;
+}
+
 struct ublk_device *ublk_get_device(struct ublk_device *ub);
 struct ublk_device *ublk_get_device_from_id(int idx);
 void ublk_put_device(struct ublk_device *ub);
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 27cf14e65cbc..ed6df4d61e89 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -406,9 +406,11 @@ struct ublk_param_zoned {
 
 struct ublk_param_bpf {
 #define UBLK_BPF_HAS_OPS_ID            (1 << 0)
+#define UBLK_BPF_HAS_AIO_OPS_ID        (1 << 1)
 	__u8	flags;
 	__u8	ops_id;
-	__u8	reserved[6];
+	__u16	aio_ops_id;
+	__u8	reserved[4];
 };
 
 struct ublk_params {
-- 
2.47.0


  parent reply	other threads:[~2025-01-07 12:08 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-07 12:03 [RFC PATCH 00/22] ublk: support bpf Ming Lei
2025-01-07 12:03 ` [RFC PATCH 01/22] ublk: remove two unused fields from 'struct ublk_queue' Ming Lei
2025-01-07 12:03 ` [RFC PATCH 02/22] ublk: convert several bool type fields into bitfield of `ublk_queue` Ming Lei
2025-01-07 12:03 ` [RFC PATCH 03/22] ublk: add helper of ublk_need_map_io() Ming Lei
2025-01-07 12:03 ` [RFC PATCH 04/22] ublk: move ublk into one standalone directory Ming Lei
2025-01-07 12:03 ` [RFC PATCH 05/22] ublk: move private definitions into private header Ming Lei
2025-01-07 12:03 ` [RFC PATCH 06/22] ublk: move several helpers to " Ming Lei
2025-01-07 12:03 ` [RFC PATCH 07/22] ublk: bpf: add bpf prog attach helpers Ming Lei
2025-01-07 12:03 ` [RFC PATCH 08/22] ublk: bpf: add bpf struct_ops Ming Lei
2025-01-10  1:43   ` Alexei Starovoitov
2025-01-13  4:08     ` Ming Lei
2025-01-13 21:30       ` Alexei Starovoitov
2025-01-15 11:58         ` Ming Lei
2025-01-15 20:11           ` Amery Hung
2025-01-07 12:04 ` [RFC PATCH 09/22] ublk: bpf: attach bpf prog to ublk device Ming Lei
2025-01-07 12:04 ` [RFC PATCH 10/22] ublk: bpf: add kfunc for ublk bpf prog Ming Lei
2025-01-07 12:04 ` [RFC PATCH 11/22] ublk: bpf: enable ublk-bpf Ming Lei
2025-01-07 12:04 ` [RFC PATCH 12/22] selftests: ublk: add tests for the ublk-bpf initial implementation Ming Lei
2025-01-07 12:04 ` [RFC PATCH 13/22] selftests: ublk: add tests for covering io split Ming Lei
2025-01-07 12:04 ` [RFC PATCH 14/22] selftests: ublk: add tests for covering redirecting to userspace Ming Lei
2025-01-07 12:04 ` [RFC PATCH 15/22] ublk: bpf: add bpf aio kfunc Ming Lei
2025-01-07 12:04 ` [RFC PATCH 16/22] ublk: bpf: add bpf aio struct_ops Ming Lei
2025-01-07 12:04 ` Ming Lei [this message]
2025-01-07 12:04 ` [RFC PATCH 18/22] ublk: bpf: add several ublk bpf aio kfuncs Ming Lei
2025-01-07 12:04 ` [RFC PATCH 19/22] ublk: bpf: wire bpf aio with ublk io handling Ming Lei
2025-01-07 12:04 ` [RFC PATCH 20/22] selftests: add tests for ublk bpf aio Ming Lei
2025-01-07 12:04 ` [RFC PATCH 21/22] selftests: add tests for covering both bpf aio and split Ming Lei
2025-01-07 12:04 ` [RFC PATCH 22/22] ublk: document ublk-bpf & bpf-aio Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250107120417.1237392-18-tom.leiming@gmail.com \
    --to=tom.leiming@gmail.com \
    --cc=ast@kernel.org \
    --cc=axboe@kernel.dk \
    --cc=bpf@vger.kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=martin.lau@linux.dev \
    --cc=yonghong.song@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).