From: Ming Lei <tom.leiming@gmail.com>
To: Jens Axboe <axboe@kernel.dk>, linux-block@vger.kernel.org
Cc: bpf@vger.kernel.org, Alexei Starovoitov <ast@kernel.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
Yonghong Song <yonghong.song@linux.dev>,
Ming Lei <tom.leiming@gmail.com>
Subject: [RFC PATCH 21/22] selftests: add tests for covering both bpf aio and split
Date: Tue, 7 Jan 2025 20:04:12 +0800 [thread overview]
Message-ID: <20250107120417.1237392-22-tom.leiming@gmail.com> (raw)
In-Reply-To: <20250107120417.1237392-1-tom.leiming@gmail.com>
Add ublk-stripe for covering both bpf aio and io split features.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
tools/testing/selftests/ublk/Makefile | 3 +
.../selftests/ublk/progs/ublk_stripe.c | 319 ++++++++++++++++++
.../testing/selftests/ublk/test_stripe_01.sh | 35 ++
.../testing/selftests/ublk/test_stripe_02.sh | 26 ++
tools/testing/selftests/ublk/ublk_bpf.c | 88 ++++-
5 files changed, 468 insertions(+), 3 deletions(-)
create mode 100644 tools/testing/selftests/ublk/progs/ublk_stripe.c
create mode 100755 tools/testing/selftests/ublk/test_stripe_01.sh
create mode 100755 tools/testing/selftests/ublk/test_stripe_02.sh
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile
index 2540ae7a75a3..7c30c5728694 100644
--- a/tools/testing/selftests/ublk/Makefile
+++ b/tools/testing/selftests/ublk/Makefile
@@ -27,6 +27,9 @@ TEST_PROGS += test_null_04.sh
TEST_PROGS += test_loop_01.sh
TEST_PROGS += test_loop_02.sh
+TEST_PROGS += test_stripe_01.sh
+TEST_PROGS += test_stripe_02.sh
+
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS_EXTENDED = ublk_bpf
diff --git a/tools/testing/selftests/ublk/progs/ublk_stripe.c b/tools/testing/selftests/ublk/progs/ublk_stripe.c
new file mode 100644
index 000000000000..98a59239047c
--- /dev/null
+++ b/tools/testing/selftests/ublk/progs/ublk_stripe.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <linux/const.h>
+#include <linux/errno.h>
+#include <linux/falloc.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+//#define DEBUG
+#include "ublk_bpf.h"
+
+/* libbpf v1.4.5 is required for struct_ops to work */
+
+struct ublk_stripe {
+#define MAX_BACKFILES 4
+ unsigned char chunk_shift;
+ unsigned char nr_backfiles;
+ int fds[MAX_BACKFILES];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 128);
+ __type(key, unsigned int); /* dev id */
+ __type(value, struct ublk_stripe); /* stripe setting */
+} stripe_map SEC(".maps");
+
+/* todo: make it writable payload of ublk_bpf_io */
+struct ublk_io_payload {
+ unsigned int ref;
+ int res;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10240);
+ __type(key, unsigned long long); /* dev_id + q_id + tag */
+ __type(value, struct ublk_io_payload); /* io payload */
+} io_map SEC(".maps");
+
+static inline void dec_stripe_io_ref(const struct ublk_bpf_io *io, struct ublk_io_payload *pv, int ret)
+{
+ if (!pv)
+ return;
+
+ if (pv->res >= 0)
+ pv->res = ret;
+
+ if (!__sync_sub_and_fetch(&pv->ref, 1)) {
+ unsigned rw = (io->iod->op_flags & 0xff);
+
+ if (pv->res >= 0 && (rw <= 1))
+ pv->res = io->iod->nr_sectors << 9;
+ ublk_bpf_complete_io(io, pv->res);
+ }
+}
+
+static inline void ublk_stripe_comp_and_release_aio(struct bpf_aio *aio, int ret)
+{
+ struct ublk_bpf_io *io = ublk_bpf_acquire_io_from_aio(aio);
+ struct ublk_io_payload *pv = NULL;
+ unsigned long long io_key = build_io_key(io);
+
+ if (!io)
+ return;
+
+ io_key = build_io_key(io);
+ pv = bpf_map_lookup_elem(&io_map, &io_key);
+
+ /* drop reference for each underlying aio */
+ dec_stripe_io_ref(io, pv, ret);
+ ublk_bpf_release_io_from_aio(io);
+
+ ublk_bpf_dettach_and_complete_aio(aio);
+ bpf_aio_release(aio);
+}
+
+SEC("struct_ops/bpf_aio_complete_cb")
+void BPF_PROG(ublk_stripe_comp_cb, struct bpf_aio *aio, long ret)
+{
+ BPF_DBG("aio result %d, back_file %s pos %llx", ret,
+ aio->iocb.ki_filp->f_path.dentry->d_name.name,
+ aio->iocb.ki_pos);
+ ublk_stripe_comp_and_release_aio(aio, ret);
+}
+
+SEC(".struct_ops.link")
+struct bpf_aio_complete_ops stripe_ublk_bpf_aio_ops = {
+ .id = 32,
+ .bpf_aio_complete_cb = (void *)ublk_stripe_comp_cb,
+};
+
+static inline int ublk_stripe_submit_backing_io(const struct ublk_bpf_io *io,
+ int backfile_fd, unsigned long backfile_off,
+ unsigned int backfile_bytes,
+ unsigned int buf_off)
+{
+ const struct ublksrv_io_desc *iod = io->iod;
+ unsigned int op_flags = 0;
+ struct bpf_aio *aio;
+ int res = -EINVAL;
+ int op;
+
+ /* translate ublk opcode into backing file's */
+ switch (iod->op_flags & 0xff) {
+ case 0 /*UBLK_IO_OP_READ*/:
+ op = BPF_AIO_OP_FS_READ;
+ break;
+ case 1 /*UBLK_IO_OP_WRITE*/:
+ op = BPF_AIO_OP_FS_WRITE;
+ break;
+ case 2 /*UBLK_IO_OP_FLUSH*/:
+ op = BPF_AIO_OP_FS_FSYNC;
+ break;
+ case 3 /*UBLK_IO_OP_DISCARD*/:
+ op = BPF_AIO_OP_FS_FALLOCATE;
+ op_flags = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+ break;
+ case 4 /*UBLK_IO_OP_WRITE_SAME*/:
+ op = BPF_AIO_OP_FS_FALLOCATE;
+ op_flags = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+ break;
+ case 5 /*UBLK_IO_OP_WRITE_ZEROES*/:
+ op = BPF_AIO_OP_FS_FALLOCATE;
+ op_flags = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ res = -ENOMEM;
+ aio = bpf_aio_alloc(op, 0);
+ if (!aio)
+ goto fail;
+
+ /* attach aio into the specified range of this io command */
+ res = ublk_bpf_attach_and_prep_aio(io, buf_off, backfile_bytes, aio);
+ if (res < 0) {
+ bpf_printk("bpf aio attaching failed %d\n", res);
+ goto fail;
+ }
+
+ /* submit this aio onto the backing file */
+ res = bpf_aio_submit(aio, backfile_fd, backfile_off, backfile_bytes, op_flags);
+ if (res < 0) {
+ bpf_printk("aio submit failed %d\n", res);
+ ublk_stripe_comp_and_release_aio(aio, res);
+ }
+ return 0;
+fail:
+ return res;
+}
+
+static int calculate_backfile_off_bytes(const struct ublk_stripe *stripe,
+ unsigned long stripe_off, unsigned int stripe_bytes,
+ unsigned long *backfile_off,
+ unsigned int *backfile_bytes)
+{
+ unsigned long chunk_size = 1U << stripe->chunk_shift;
+ unsigned int nr_bf = stripe->nr_backfiles;
+ unsigned long unit_chunk_size = nr_bf << stripe->chunk_shift;
+ unsigned long start_off = stripe_off & ~(chunk_size - 1);
+ unsigned long unit_start_off = stripe_off & ~(unit_chunk_size - 1);
+ unsigned int idx = (start_off - unit_start_off) >> stripe->chunk_shift;
+
+ *backfile_bytes = stripe_bytes;
+ *backfile_off = (unit_start_off / nr_bf) + (idx << stripe->chunk_shift) + (stripe_off - start_off);
+
+ return stripe->fds[idx % MAX_BACKFILES];
+}
+
+static unsigned int calculate_stripe_off_bytes(const struct ublk_stripe *stripe,
+ const struct ublksrv_io_desc *iod, unsigned int this_off,
+ unsigned long *stripe_off)
+{
+ unsigned long off, next_off;
+ unsigned int chunk_size = 1U << stripe->chunk_shift;
+ unsigned int max_size = (iod->nr_sectors << 9) - this_off;
+
+ off = (iod->start_sector << 9) + this_off;
+ next_off = (off & ~(chunk_size - 1)) + chunk_size;;
+
+ *stripe_off = off;
+
+ if (max_size < next_off - off)
+ return max_size;
+ return next_off - off;
+}
+
+static inline ublk_bpf_return_t __ublk_stripe_handle_io_cmd(const struct ublk_bpf_io *io, unsigned int off)
+{
+ ublk_bpf_return_t ret = ublk_bpf_return_val(UBLK_BPF_IO_QUEUED, 0);
+ unsigned long stripe_off, backfile_off;
+ unsigned int stripe_bytes, backfile_bytes;
+ int dev_id = ublk_bpf_get_dev_id(io);
+ const struct ublksrv_io_desc *iod;
+ const struct ublk_stripe *stripe;
+ int res = -EINVAL;
+ int backfile_fd;
+ unsigned long long io_key = build_io_key(io);
+ struct ublk_io_payload pl = {
+ .ref = 2,
+ .res = 0,
+ };
+ struct ublk_io_payload *pv = NULL;
+
+ iod = ublk_bpf_get_iod(io);
+ if (!iod) {
+ ublk_bpf_complete_io(io, res);
+ return ret;
+ }
+
+ BPF_DBG("ublk dev %u qid %u: handle io cmd tag %u op %u %lx-%d off %u",
+ ublk_bpf_get_dev_id(io),
+ ublk_bpf_get_queue_id(io),
+ ublk_bpf_get_io_tag(io),
+ iod->op_flags & 0xff,
+ iod->start_sector << 9,
+ iod->nr_sectors << 9, off);
+
+ /* retrieve backing file descriptor */
+ stripe = bpf_map_lookup_elem(&stripe_map, &dev_id);
+ if (!stripe) {
+ bpf_printk("can't get FD from %d\n", dev_id);
+ return ret;
+ }
+
+ /* todo: build as big chunk as possible for each underlying files/disks */
+ stripe_bytes = calculate_stripe_off_bytes(stripe, iod, off, &stripe_off);
+ backfile_fd = calculate_backfile_off_bytes(stripe, stripe_off, stripe_bytes,
+ &backfile_off, &backfile_bytes);
+ BPF_DBG("\t <chunk_shift %u files %u> stripe(%lx %lu) backfile(%d %lx %lu)",
+ stripe->chunk_shift, stripe->nr_backfiles,
+ stripe_off, stripe_bytes,
+ backfile_fd, backfile_off, backfile_bytes);
+
+ if (!stripe_bytes) {
+ bpf_printk("submit bpf aio failed %d\n", res);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ /* grab one submission reference, and one extra for the whole batch */
+ if (!off) {
+ res = bpf_map_update_elem(&io_map, &io_key, &pl, BPF_ANY);
+ if (res) {
+ bpf_printk("update io map element failed %d key %llx\n", res, io_key);
+ goto exit;
+ }
+ } else {
+ pv = bpf_map_lookup_elem(&io_map, &io_key);
+ if (pv)
+ __sync_fetch_and_add(&pv->ref, 1);
+ }
+
+ /* handle this io command by submitting IOs on backing file */
+ res = ublk_stripe_submit_backing_io(io, backfile_fd, backfile_off, backfile_bytes, off);
+
+exit:
+ /* io cmd can't be completes until this reference is dropped */
+ if (res < 0) {
+ bpf_printk("submit bpf aio failed %d\n", res);
+ ublk_bpf_complete_io(io, res);
+ return ret;
+ }
+
+ /* drop the extra reference for the whole batch */
+ if (off + stripe_bytes == iod->nr_sectors << 9) {
+ if (!pv)
+ pv = bpf_map_lookup_elem(&io_map, &io_key);
+ dec_stripe_io_ref(io, pv, pv ? pv->res : 0);
+ }
+
+ return ublk_bpf_return_val(UBLK_BPF_IO_CONTINUE, stripe_bytes);
+}
+
+SEC("struct_ops/ublk_bpf_release_io_cmd")
+void BPF_PROG(ublk_stripe_release_io_cmd, struct ublk_bpf_io *io)
+{
+ BPF_DBG("%s: complete io command %d", __func__, io->res);
+}
+
+SEC("struct_ops.s/ublk_bpf_queue_io_cmd_daemon")
+ublk_bpf_return_t BPF_PROG(ublk_stripe_handle_io_cmd, struct ublk_bpf_io *io, unsigned int off)
+{
+ return __ublk_stripe_handle_io_cmd(io, off);
+}
+
+SEC("struct_ops/ublk_bpf_attach_dev")
+int BPF_PROG(ublk_stripe_attach_dev, int dev_id)
+{
+ const struct ublk_stripe *stripe;
+
+ /* retrieve backing file descriptor */
+ stripe = bpf_map_lookup_elem(&stripe_map, &dev_id);
+ if (!stripe) {
+ bpf_printk("can't get FD from %d\n", dev_id);
+ return -EINVAL;
+ }
+
+ if (stripe->nr_backfiles >= MAX_BACKFILES)
+ return -EINVAL;
+
+ if (stripe->chunk_shift < 12)
+ return -EINVAL;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct ublk_bpf_ops stripe_ublk_bpf_ops = {
+ .id = 32,
+ .attach_dev = (void *)ublk_stripe_attach_dev,
+ .queue_io_cmd_daemon = (void *)ublk_stripe_handle_io_cmd,
+ .release_io_cmd = (void *)ublk_stripe_release_io_cmd,
+};
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/ublk/test_stripe_01.sh b/tools/testing/selftests/ublk/test_stripe_01.sh
new file mode 100755
index 000000000000..3c21f7db495a
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stripe_01.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. test_common.sh
+
+TID="stripe_01"
+ERR_CODE=0
+
+# prepare & register and pin bpf prog
+_prep_bpf_test "stripe" ublk_stripe.bpf.o
+
+backfile_0=`_create_backfile 256M`
+backfile_1=`_create_backfile 256M`
+
+# add two ublk null disks with the pinned bpf prog
+_add_ublk_dev -t stripe -n 0 --bpf_prog 32 --bpf_aio_prog 32 --quiet $backfile_0 $backfile_1
+
+# run fio over the ublk disk
+fio --name=write_and_verify \
+ --filename=/dev/ublkb0 \
+ --ioengine=libaio --iodepth=4 \
+ --rw=write \
+ --size=256M \
+ --direct=1 \
+ --verify=crc32c \
+ --do_verify=1 \
+ --bs=4k > /dev/null 2>&1
+ERR_CODE=$?
+
+# cleanup & unregister and unpin the bpf prog
+_cleanup_bpf_test "stripe"
+
+_remove_backfile $backfile_0
+_remove_backfile $backfile_1
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_02.sh b/tools/testing/selftests/ublk/test_stripe_02.sh
new file mode 100755
index 000000000000..fdbb81dc53d8
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stripe_02.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. test_common.sh
+
+TID="stripe_02"
+ERR_CODE=0
+
+# prepare & register and pin bpf prog
+_prep_bpf_test "stripe" ublk_stripe.bpf.o
+
+backfile_0=`_create_backfile 256M`
+backfile_1=`_create_backfile 256M`
+
+# add two ublk null disks with the pinned bpf prog
+_add_ublk_dev -t stripe -n 0 --bpf_prog 32 --bpf_aio_prog 32 --quiet $backfile_0 $backfile_1
+
+_mkfs_mount_test /dev/ublkb0
+ERR_CODE=$?
+
+# cleanup & unregister and unpin the bpf prog
+_cleanup_bpf_test "stripe"
+
+_remove_backfile $backfile_0
+_remove_backfile $backfile_1
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/ublk_bpf.c b/tools/testing/selftests/ublk/ublk_bpf.c
index c24d5e18a1b1..85b2b4a09e05 100644
--- a/tools/testing/selftests/ublk/ublk_bpf.c
+++ b/tools/testing/selftests/ublk/ublk_bpf.c
@@ -1283,14 +1283,14 @@ static int cmd_dev_reg_bpf(struct dev_ctx *ctx)
static int cmd_dev_help(char *exe)
{
- printf("%s add -t [null|loop] [-q nr_queues] [-d depth] [-n dev_id] [--bpf_prog ublk_prog_id] [--bpf_aio_prog ublk_aio_prog_id] [backfile1] [backfile2] ...\n", exe);
+ printf("%s add -t [null|loop|stripe] [-q nr_queues] [-d depth] [-n dev_id] [--bpf_prog ublk_prog_id] [--bpf_aio_prog ublk_aio_prog_id] [backfile1] [backfile2] ...\n", exe);
printf("\t default: nr_queues=2(max 4), depth=128(max 128), dev_id=-1(auto allocation)\n");
printf("%s del [-n dev_id] -a \n", exe);
printf("\t -a delete all devices -n delete specified device\n");
printf("%s list [-n dev_id] -a \n", exe);
printf("\t -a list all devices, -n list specified device, default -a \n");
- printf("%s reg -t [null|loop] bpf_prog_obj_path \n", exe);
- printf("%s unreg -t [null|loop]\n", exe);
+ printf("%s reg -t [null|loop|stripe] bpf_prog_obj_path \n", exe);
+ printf("%s unreg -t [null|loop|stripe]\n", exe);
return 0;
}
@@ -1475,6 +1475,83 @@ static int ublk_loop_tgt_init(struct ublk_dev *dev)
return 0;
}
+struct ublk_stripe_params {
+ unsigned char chunk_shift;
+ unsigned char nr_backfiles;
+ int fds[MAX_BACK_FILES];
+};
+
+static int stripe_bpf_setup_parameters(struct ublk_dev *dev, unsigned int chunk_shift)
+{
+ int dev_id = dev->dev_info.dev_id;
+ struct ublk_stripe_params stripe = {
+ .chunk_shift = chunk_shift,
+ .nr_backfiles = dev->nr_fds - 1,
+ };
+ int map_fd;
+ int err, i;
+
+ for (i = 0; i < stripe.nr_backfiles; i++)
+ stripe.fds[i] = dev->fds[i + 1];
+
+ map_fd = bpf_obj_get("/sys/fs/bpf/ublk/stripe/stripe_map");
+ if (map_fd < 0) {
+ ublk_err("Error getting map file descriptor\n");
+ return -EINVAL;
+ }
+
+ err = bpf_map_update_elem(map_fd, &dev_id, &stripe, BPF_ANY);
+ if (err) {
+ ublk_err("Error updating map element: %d\n", errno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ublk_stripe_tgt_init(struct ublk_dev *dev)
+{
+ unsigned long long bytes = 0;
+ unsigned chunk_shift = 12;
+ int ret, i;
+ struct ublk_params p = {
+ .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_BPF,
+ .basic = {
+ .logical_bs_shift = 9,
+ .physical_bs_shift = 12,
+ .io_opt_shift = 12,
+ .io_min_shift = 9,
+ .max_sectors = dev->dev_info.max_io_buf_bytes >> 9,
+ },
+ .bpf = {
+ .flags = UBLK_BPF_HAS_OPS_ID | UBLK_BPF_HAS_AIO_OPS_ID,
+ .ops_id = dev->bpf_prog_id,
+ .aio_ops_id = dev->bpf_aio_prog_id,
+ },
+ };
+
+ ret = backing_file_tgt_init(dev);
+ if (ret)
+ return ret;
+
+ assert(stripe_bpf_setup_parameters(dev, chunk_shift) == 0);
+
+ for (i = 0; i < dev->nr_fds - 1; i++) {
+ unsigned long size = dev->tgt.backing_file_size[i];
+
+ if (size != dev->tgt.backing_file_size[0])
+ return -EINVAL;
+ if (size & ((1 << chunk_shift) - 1))
+ return -EINVAL;
+ bytes += size;
+ }
+
+ dev->tgt.dev_size = bytes;
+ p.basic.dev_sectors = bytes >> 9;
+ dev->tgt.params = p;
+
+ return 0;
+}
static const struct ublk_tgt_ops tgt_ops_list[] = {
{
@@ -1487,6 +1564,11 @@ static const struct ublk_tgt_ops tgt_ops_list[] = {
.init_tgt = ublk_loop_tgt_init,
.deinit_tgt = backing_file_tgt_deinit,
},
+ {
+ .name = "stripe",
+ .init_tgt = ublk_stripe_tgt_init,
+ .deinit_tgt = backing_file_tgt_deinit,
+ },
};
static const struct ublk_tgt_ops *ublk_find_tgt(const char *name)
--
2.47.0
next prev parent reply other threads:[~2025-01-07 12:09 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-07 12:03 [RFC PATCH 00/22] ublk: support bpf Ming Lei
2025-01-07 12:03 ` [RFC PATCH 01/22] ublk: remove two unused fields from 'struct ublk_queue' Ming Lei
2025-01-07 12:03 ` [RFC PATCH 02/22] ublk: convert several bool type fields into bitfield of `ublk_queue` Ming Lei
2025-01-07 12:03 ` [RFC PATCH 03/22] ublk: add helper of ublk_need_map_io() Ming Lei
2025-01-07 12:03 ` [RFC PATCH 04/22] ublk: move ublk into one standalone directory Ming Lei
2025-01-07 12:03 ` [RFC PATCH 05/22] ublk: move private definitions into private header Ming Lei
2025-01-07 12:03 ` [RFC PATCH 06/22] ublk: move several helpers to " Ming Lei
2025-01-07 12:03 ` [RFC PATCH 07/22] ublk: bpf: add bpf prog attach helpers Ming Lei
2025-01-07 12:03 ` [RFC PATCH 08/22] ublk: bpf: add bpf struct_ops Ming Lei
2025-01-10 1:43 ` Alexei Starovoitov
2025-01-13 4:08 ` Ming Lei
2025-01-13 21:30 ` Alexei Starovoitov
2025-01-15 11:58 ` Ming Lei
2025-01-15 20:11 ` Amery Hung
2025-01-07 12:04 ` [RFC PATCH 09/22] ublk: bpf: attach bpf prog to ublk device Ming Lei
2025-01-07 12:04 ` [RFC PATCH 10/22] ublk: bpf: add kfunc for ublk bpf prog Ming Lei
2025-01-07 12:04 ` [RFC PATCH 11/22] ublk: bpf: enable ublk-bpf Ming Lei
2025-01-07 12:04 ` [RFC PATCH 12/22] selftests: ublk: add tests for the ublk-bpf initial implementation Ming Lei
2025-01-07 12:04 ` [RFC PATCH 13/22] selftests: ublk: add tests for covering io split Ming Lei
2025-01-07 12:04 ` [RFC PATCH 14/22] selftests: ublk: add tests for covering redirecting to userspace Ming Lei
2025-01-07 12:04 ` [RFC PATCH 15/22] ublk: bpf: add bpf aio kfunc Ming Lei
2025-01-07 12:04 ` [RFC PATCH 16/22] ublk: bpf: add bpf aio struct_ops Ming Lei
2025-01-07 12:04 ` [RFC PATCH 17/22] ublk: bpf: attach bpf aio prog to ublk device Ming Lei
2025-01-07 12:04 ` [RFC PATCH 18/22] ublk: bpf: add several ublk bpf aio kfuncs Ming Lei
2025-01-07 12:04 ` [RFC PATCH 19/22] ublk: bpf: wire bpf aio with ublk io handling Ming Lei
2025-01-07 12:04 ` [RFC PATCH 20/22] selftests: add tests for ublk bpf aio Ming Lei
2025-01-07 12:04 ` Ming Lei [this message]
2025-01-07 12:04 ` [RFC PATCH 22/22] ublk: document ublk-bpf & bpf-aio Ming Lei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250107120417.1237392-22-tom.leiming@gmail.com \
--to=tom.leiming@gmail.com \
--cc=ast@kernel.org \
--cc=axboe@kernel.dk \
--cc=bpf@vger.kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=martin.lau@linux.dev \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).