* [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset
@ 2022-03-23 19:45 Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 1/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_clone Mike Snitzer
` (5 more replies)
0 siblings, 6 replies; 8+ messages in thread
From: Mike Snitzer @ 2022-03-23 19:45 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, hch, dm-devel, linux-block
Hi Jens,
I ran with your suggestion and DM now sees a ~7% improvement in hipri
bio polling with io_uring (using dm-linear on null_blk, IOPS went from
900K to 966K).
Christoph,
I tried to address your review of the previous set. Patch 1 and 2 can
obviously be folded but I left them split out for review purposes.
Feel free to see if these changes are meaningful for nvme's use.
Happy for either you to take on iterating on these block changes
further or you letting me know what changes you'd like made.
Thanks,
Mike
v2: add REQ_ALLOC_CACHE and move use of bio_alloc_percpu_cache to
bio_alloc_bioset
Mike Snitzer (4):
block: allow BIOSET_PERCPU_CACHE use from bio_alloc_clone
block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset
dm: enable BIOSET_PERCPU_CACHE for dm_io bioset
dm: conditionally enable BIOSET_PERCPU_CACHE for bio-based dm_io bioset
block/bio.c | 67 +++++++++++++++++++++++++++++++----------------
block/blk.h | 7 -----
drivers/md/dm-table.c | 11 +++++---
drivers/md/dm.c | 10 +++----
drivers/md/dm.h | 4 +--
include/linux/bio.h | 9 +++++++
include/linux/blk_types.h | 4 ++-
7 files changed, 71 insertions(+), 41 deletions(-)
--
2.15.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 1/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_clone
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
@ 2022-03-23 19:45 ` Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 2/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset Mike Snitzer
` (4 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Mike Snitzer @ 2022-03-23 19:45 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, hch, dm-devel, linux-block
These changes allow DM core to make full use of BIOSET_PERCPU_CACHE for
REQ_POLLED bios:
Factor out bio_alloc_percpu_cache() from bio_alloc_kiocb() to allow
use by bio_alloc_clone() too.
Update bioset_init_from_src() to set BIOSET_PERCPU_CACHE if
bio_src->cache is not NULL.
Move bio_clear_polled() to include/linux/bio.h to allow users outside
of block core.
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
---
block/bio.c | 56 +++++++++++++++++++++++++++++++++--------------------
block/blk.h | 7 -------
include/linux/bio.h | 7 +++++++
3 files changed, 42 insertions(+), 28 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index b15f5466ce08..a7633aa82d7d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -420,6 +420,33 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
queue_work(bs->rescue_workqueue, &bs->rescue_work);
}
+static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
+ unsigned short nr_vecs, unsigned int opf, gfp_t gfp,
+ struct bio_set *bs)
+{
+ struct bio_alloc_cache *cache;
+ struct bio *bio;
+
+ cache = per_cpu_ptr(bs->cache, get_cpu());
+ if (cache->free_list) {
+ bio = cache->free_list;
+ cache->free_list = bio->bi_next;
+ cache->nr--;
+ put_cpu();
+ bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
+ nr_vecs, opf);
+ bio->bi_pool = bs;
+ bio_set_flag(bio, BIO_PERCPU_CACHE);
+ return bio;
+ }
+ put_cpu();
+ bio = bio_alloc_bioset(bdev, nr_vecs, opf, gfp, bs);
+ if (!bio)
+ return NULL;
+ bio_set_flag(bio, BIO_PERCPU_CACHE);
+ return bio;
+}
+
/**
* bio_alloc_bioset - allocate a bio for I/O
* @bdev: block device to allocate the bio for (can be %NULL)
@@ -768,7 +795,10 @@ struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
{
struct bio *bio;
- bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
+ if (bs->cache && bio_src->bi_opf & REQ_POLLED)
+ bio = bio_alloc_percpu_cache(bdev, 0, bio_src->bi_opf, gfp, bs);
+ else
+ bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
if (!bio)
return NULL;
@@ -1736,6 +1766,8 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
flags |= BIOSET_NEED_BVECS;
if (src->rescue_workqueue)
flags |= BIOSET_NEED_RESCUER;
+ if (src->cache)
+ flags |= BIOSET_PERCPU_CACHE;
return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
}
@@ -1753,35 +1785,17 @@ EXPORT_SYMBOL(bioset_init_from_src);
* Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
* used to check if we should dip into the per-cpu bio_set allocation
* cache. The allocation uses GFP_KERNEL internally. On return, the
- * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
+ * bio is marked BIO_PERCPU_CACHE, and the final put of the bio
* MUST be done from process context, not hard/soft IRQ.
*
*/
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
{
- struct bio_alloc_cache *cache;
- struct bio *bio;
-
if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
- cache = per_cpu_ptr(bs->cache, get_cpu());
- if (cache->free_list) {
- bio = cache->free_list;
- cache->free_list = bio->bi_next;
- cache->nr--;
- put_cpu();
- bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
- nr_vecs, opf);
- bio->bi_pool = bs;
- bio_set_flag(bio, BIO_PERCPU_CACHE);
- return bio;
- }
- put_cpu();
- bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
- bio_set_flag(bio, BIO_PERCPU_CACHE);
- return bio;
+ return bio_alloc_percpu_cache(bdev, nr_vecs, opf, GFP_KERNEL, bs);
}
EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
diff --git a/block/blk.h b/block/blk.h
index ebaa59ca46ca..8e338e76d303 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -451,13 +451,6 @@ extern struct device_attribute dev_attr_events;
extern struct device_attribute dev_attr_events_async;
extern struct device_attribute dev_attr_events_poll_msecs;
-static inline void bio_clear_polled(struct bio *bio)
-{
- /* can't support alloc cache if we turn off polling */
- bio_clear_flag(bio, BIO_PERCPU_CACHE);
- bio->bi_opf &= ~REQ_POLLED;
-}
-
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7523aba4ddf7..709663ae757a 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -787,6 +787,13 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
bio->bi_opf |= REQ_NOWAIT;
}
+static inline void bio_clear_polled(struct bio *bio)
+{
+ /* can't support alloc cache if we turn off polling */
+ bio_clear_flag(bio, BIO_PERCPU_CACHE);
+ bio->bi_opf &= ~REQ_POLLED;
+}
+
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp);
--
2.15.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 2/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 1/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_clone Mike Snitzer
@ 2022-03-23 19:45 ` Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 3/4] dm: enable BIOSET_PERCPU_CACHE for dm_io bioset Mike Snitzer
` (3 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Mike Snitzer @ 2022-03-23 19:45 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, hch, dm-devel, linux-block
Add REQ_ALLOC_CACHE and set it in %opf passed to bio_alloc_bioset to
inform bio_alloc_bioset (and any stacked block drivers) that bio should
be allocated from respective bioset's per-cpu alloc cache if possible.
This decouples access control to the alloc cache (via REQ_ALLOC_CACHE)
from actual participation in a specific alloc cache (BIO_PERCPU_CACHE).
Otherwise an upper layer's bioset may not have an alloc cache, in which
case the bio issued to underlying device(s) wouldn't reflect that
allocating from an alloc cache warranted (if possible).
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
---
block/bio.c | 33 ++++++++++++++++++++-------------
include/linux/bio.h | 4 +++-
include/linux/blk_types.h | 4 +++-
3 files changed, 26 insertions(+), 15 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index a7633aa82d7d..0b65ea241f54 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -440,11 +440,7 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
return bio;
}
put_cpu();
- bio = bio_alloc_bioset(bdev, nr_vecs, opf, gfp, bs);
- if (!bio)
- return NULL;
- bio_set_flag(bio, BIO_PERCPU_CACHE);
- return bio;
+ return NULL;
}
/**
@@ -488,11 +484,24 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
gfp_t saved_gfp = gfp_mask;
struct bio *bio;
void *p;
+ bool use_alloc_cache;
/* should not use nobvec bioset for nr_vecs > 0 */
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
return NULL;
+ use_alloc_cache = (bs->cache && (opf & REQ_ALLOC_CACHE) &&
+ nr_vecs <= BIO_INLINE_VECS);
+ if (use_alloc_cache) {
+ bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, gfp_mask, bs);
+ if (bio)
+ return bio;
+ /*
+ * No cached bio available, mark bio returned below to
+ * particpate in per-cpu alloc cache.
+ */
+ }
+
/*
* submit_bio_noacct() converts recursion to iteration; this means if
* we're running beneath it, any bios we allocate and submit will not be
@@ -546,6 +555,8 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
bio_init(bio, bdev, NULL, 0, opf);
}
+ if (use_alloc_cache)
+ bio_set_flag(bio, BIO_PERCPU_CACHE);
bio->bi_pool = bs;
return bio;
@@ -795,10 +806,7 @@ struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
{
struct bio *bio;
- if (bs->cache && bio_src->bi_opf & REQ_POLLED)
- bio = bio_alloc_percpu_cache(bdev, 0, bio_src->bi_opf, gfp, bs);
- else
- bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
+ bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
if (!bio)
return NULL;
@@ -1792,10 +1800,9 @@ EXPORT_SYMBOL(bioset_init_from_src);
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
{
- if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
- return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
-
- return bio_alloc_percpu_cache(bdev, nr_vecs, opf, GFP_KERNEL, bs);
+ if (kiocb->ki_flags & IOCB_ALLOC_CACHE)
+ opf |= REQ_ALLOC_CACHE;
+ return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
}
EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 709663ae757a..1be27e87a1f4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -783,6 +783,8 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
bio->bi_opf |= REQ_POLLED;
+ if (kiocb->ki_flags & IOCB_ALLOC_CACHE)
+ bio->bi_opf |= REQ_ALLOC_CACHE;
if (!is_sync_kiocb(kiocb))
bio->bi_opf |= REQ_NOWAIT;
}
@@ -791,7 +793,7 @@ static inline void bio_clear_polled(struct bio *bio)
{
/* can't support alloc cache if we turn off polling */
bio_clear_flag(bio, BIO_PERCPU_CACHE);
- bio->bi_opf &= ~REQ_POLLED;
+ bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
}
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5561e58d158a..5f9a0c39d4c5 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -327,7 +327,7 @@ enum {
BIO_TRACKED, /* set if bio goes through the rq_qos path */
BIO_REMAPPED,
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
- BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
+ BIO_PERCPU_CACHE, /* participates in per-cpu alloc cache */
BIO_FLAG_LAST
};
@@ -414,6 +414,7 @@ enum req_flag_bits {
__REQ_NOUNMAP, /* do not free blocks when zeroing */
__REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
/* for driver use */
__REQ_DRV,
@@ -439,6 +440,7 @@ enum req_flag_bits {
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_POLLED (1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
--
2.15.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 3/4] dm: enable BIOSET_PERCPU_CACHE for dm_io bioset
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 1/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_clone Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 2/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset Mike Snitzer
@ 2022-03-23 19:45 ` Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 4/4] dm: conditionally enable BIOSET_PERCPU_CACHE for bio-based " Mike Snitzer
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Mike Snitzer @ 2022-03-23 19:45 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, hch, dm-devel, linux-block
Also change dm_io_complete() to use bio_clear_polled() so that it
properly clears all associated bio state (REQ_POLLED, BIO_PERCPU_CACHE,
etc).
This commit improves DM's hipri bio polling (REQ_POLLED) perf by ~7%.
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
---
drivers/md/dm.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1c4d1e12d74b..b3cb2c1aea2a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -899,9 +899,9 @@ static void dm_io_complete(struct dm_io *io)
/*
* Upper layer won't help us poll split bio, io->orig_bio
* may only reflect a subset of the pre-split original,
- * so clear REQ_POLLED in case of requeue
+ * so clear REQ_POLLED and BIO_PERCPU_CACHE on requeue.
*/
- bio->bi_opf &= ~REQ_POLLED;
+ bio_clear_polled(bio);
return;
}
@@ -3016,7 +3016,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
- ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
+ ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, BIOSET_PERCPU_CACHE);
if (ret)
goto out;
if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
--
2.15.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 4/4] dm: conditionally enable BIOSET_PERCPU_CACHE for bio-based dm_io bioset
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
` (2 preceding siblings ...)
2022-03-23 19:45 ` [PATCH v2 3/4] dm: enable BIOSET_PERCPU_CACHE for dm_io bioset Mike Snitzer
@ 2022-03-23 19:45 ` Mike Snitzer
2022-03-24 0:25 ` [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Jens Axboe
2022-03-24 7:39 ` Christoph Hellwig
5 siblings, 0 replies; 8+ messages in thread
From: Mike Snitzer @ 2022-03-23 19:45 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, hch, dm-devel, linux-block
A bioset's percpu cache may have broader utility in the future but for
now constrain it to being tightly coupled to QUEUE_FLAG_POLL.
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
---
drivers/md/dm-table.c | 11 ++++++++---
drivers/md/dm.c | 6 +++---
drivers/md/dm.h | 4 ++--
3 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index c0be4f60b427..7ebc70e3eb2f 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1002,6 +1002,8 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
+static int dm_table_supports_poll(struct dm_table *t);
+
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
@@ -1009,21 +1011,24 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
unsigned min_pool_size = 0;
struct dm_target *ti;
unsigned i;
+ bool poll_supported = false;
if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools");
return -EINVAL;
}
- if (__table_type_bio_based(type))
+ if (__table_type_bio_based(type)) {
for (i = 0; i < t->num_targets; i++) {
ti = t->targets + i;
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
min_pool_size = max(min_pool_size, ti->num_flush_bios);
}
+ poll_supported = !!dm_table_supports_poll(t);
+ }
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
- per_io_data_size, min_pool_size);
+ t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
+ t->integrity_supported, poll_supported);
if (!t->mempools)
return -ENOMEM;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3cb2c1aea2a..ebd7919e555f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2999,8 +2999,8 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_io_data_size,
- unsigned min_pool_size)
+ unsigned per_io_data_size, unsigned min_pool_size,
+ bool integrity, bool poll)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0;
@@ -3016,7 +3016,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
- ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, BIOSET_PERCPU_CACHE);
+ ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0);
if (ret)
goto out;
if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 9013dc1a7b00..3f89664fea01 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -221,8 +221,8 @@ void dm_kcopyd_exit(void);
* Mempool operations
*/
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_bio_data_size,
- unsigned min_pool_size);
+ unsigned per_io_data_size, unsigned min_pool_size,
+ bool integrity, bool poll);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
--
2.15.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
` (3 preceding siblings ...)
2022-03-23 19:45 ` [PATCH v2 4/4] dm: conditionally enable BIOSET_PERCPU_CACHE for bio-based " Mike Snitzer
@ 2022-03-24 0:25 ` Jens Axboe
2022-03-24 7:39 ` Christoph Hellwig
5 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2022-03-24 0:25 UTC (permalink / raw)
To: Mike Snitzer; +Cc: ming.lei, hch, dm-devel, linux-block
On 3/23/22 1:45 PM, Mike Snitzer wrote:
> Hi Jens,
>
> I ran with your suggestion and DM now sees a ~7% improvement in hipri
> bio polling with io_uring (using dm-linear on null_blk, IOPS went from
> 900K to 966K).
>
> Christoph,
>
> I tried to address your review of the previous set. Patch 1 and 2 can
> obviously be folded but I left them split out for review purposes.
> Feel free to see if these changes are meaningful for nvme's use.
> Happy for either you to take on iterating on these block changes
> further or you letting me know what changes you'd like made.
Ran the usual peak testing, and it's good for about a 20% improvement
for me. 5.6M -> 6.6M IOPS on a single core, dm-linear.
--
Jens Axboe
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
` (4 preceding siblings ...)
2022-03-24 0:25 ` [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Jens Axboe
@ 2022-03-24 7:39 ` Christoph Hellwig
2022-03-24 14:41 ` Mike Snitzer
5 siblings, 1 reply; 8+ messages in thread
From: Christoph Hellwig @ 2022-03-24 7:39 UTC (permalink / raw)
To: Mike Snitzer; +Cc: axboe, ming.lei, hch, dm-devel, linux-block
On Wed, Mar 23, 2022 at 03:45:20PM -0400, Mike Snitzer wrote:
> I tried to address your review of the previous set. Patch 1 and 2 can
> obviously be folded but I left them split out for review purposes.
> Feel free to see if these changes are meaningful for nvme's use.
> Happy for either you to take on iterating on these block changes
> further or you letting me know what changes you'd like made.
I'd be tempted to go with something like the version below, which
does away with the bio flag and the bio_alloc_kiocb wrapper to
further simplify the interface. The additional changes neeed for
dm like the bioset_init_from_src changes and move of bio_clear_polled
can then built on top of that.
---
From ec0493b86a3240e7f9f2d46a1298bd40ccf15e80 Mon Sep 17 00:00:00 2001
From: Mike Snitzer <snitzer@redhat.com>
Date: Wed, 23 Mar 2022 15:45:21 -0400
Subject: block: allow using the per-cpu bio cache from bio_alloc_bioset
Replace the BIO_PERCPU_CACHE bio-internal flag with a REQ_ALLOC_CACHE
one that can be passed to bio_alloc / bio_alloc_bioset, and implement
the percpu cache allocation logic in a helper called from
bio_alloc_bioset. This allows any bio_alloc_bioset user to use the
percpu caches instead of having the functionality tied to struct kiocb.
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
[hch: refactored a bit]
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/bio.c | 86 +++++++++++++++++++--------------------
block/blk.h | 3 +-
block/fops.c | 11 +++--
include/linux/bio.h | 2 -
include/linux/blk_types.h | 3 +-
5 files changed, 52 insertions(+), 53 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 33979f306e9e7..d780e2cbea437 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -420,6 +420,28 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
queue_work(bs->rescue_workqueue, &bs->rescue_work);
}
+static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
+ unsigned short nr_vecs, unsigned int opf, gfp_t gfp,
+ struct bio_set *bs)
+{
+ struct bio_alloc_cache *cache;
+ struct bio *bio;
+
+ cache = per_cpu_ptr(bs->cache, get_cpu());
+ if (!cache->free_list) {
+ put_cpu();
+ return NULL;
+ }
+ bio = cache->free_list;
+ cache->free_list = bio->bi_next;
+ cache->nr--;
+ put_cpu();
+
+ bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
+ bio->bi_pool = bs;
+ return bio;
+}
+
/**
* bio_alloc_bioset - allocate a bio for I/O
* @bdev: block device to allocate the bio for (can be %NULL)
@@ -452,6 +474,9 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
* submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
* for per bio allocations.
*
+ * If REQ_ALLOC_CACHE is set, the final put of the bio MUST be done from process
+ * context, not hard/soft IRQ.
+ *
* Returns: Pointer to new bio on success, NULL on failure.
*/
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
@@ -466,6 +491,21 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
return NULL;
+ if (opf & REQ_ALLOC_CACHE) {
+ if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
+ bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
+ gfp_mask, bs);
+ if (bio)
+ return bio;
+ /*
+ * No cached bio available, mark bio returned below to
+ * particpate in per-cpu alloc cache.
+ */
+ } else {
+ opf &= ~REQ_ALLOC_CACHE;
+ }
+ }
+
/*
* submit_bio_noacct() converts recursion to iteration; this means if
* we're running beneath it, any bios we allocate and submit will not be
@@ -712,7 +752,7 @@ void bio_put(struct bio *bio)
return;
}
- if (bio_flagged(bio, BIO_PERCPU_CACHE)) {
+ if (bio->bi_opf & REQ_ALLOC_CACHE) {
struct bio_alloc_cache *cache;
bio_uninit(bio);
@@ -1734,50 +1774,6 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
}
EXPORT_SYMBOL(bioset_init_from_src);
-/**
- * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
- * @kiocb: kiocb describing the IO
- * @bdev: block device to allocate the bio for (can be %NULL)
- * @nr_vecs: number of iovecs to pre-allocate
- * @opf: operation and flags for bio
- * @bs: bio_set to allocate from
- *
- * Description:
- * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
- * used to check if we should dip into the per-cpu bio_set allocation
- * cache. The allocation uses GFP_KERNEL internally. On return, the
- * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
- * MUST be done from process context, not hard/soft IRQ.
- *
- */
-struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
- unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
-{
- struct bio_alloc_cache *cache;
- struct bio *bio;
-
- if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
- return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
-
- cache = per_cpu_ptr(bs->cache, get_cpu());
- if (cache->free_list) {
- bio = cache->free_list;
- cache->free_list = bio->bi_next;
- cache->nr--;
- put_cpu();
- bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
- nr_vecs, opf);
- bio->bi_pool = bs;
- bio_set_flag(bio, BIO_PERCPU_CACHE);
- return bio;
- }
- put_cpu();
- bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
- bio_set_flag(bio, BIO_PERCPU_CACHE);
- return bio;
-}
-EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
-
static int __init init_bio(void)
{
int i;
diff --git a/block/blk.h b/block/blk.h
index 6f21859c7f0ff..9cb04f24ba8a7 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -454,8 +454,7 @@ extern struct device_attribute dev_attr_events_poll_msecs;
static inline void bio_clear_polled(struct bio *bio)
{
/* can't support alloc cache if we turn off polling */
- bio_clear_flag(bio, BIO_PERCPU_CACHE);
- bio->bi_opf &= ~REQ_POLLED;
+ bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
}
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
diff --git a/block/fops.c b/block/fops.c
index e49096354dcd6..d1da85bdec31e 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -198,8 +198,10 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
-
+ if (iocb->ki_flags & IOCB_ALLOC_CACHE)
+ opf |= REQ_ALLOC_CACHE;
+ bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
+ &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
atomic_set(&dio->ref, 1);
/*
@@ -322,7 +324,10 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
+ if (iocb->ki_flags & IOCB_ALLOC_CACHE)
+ opf |= REQ_ALLOC_CACHE;
+ bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
+ &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
dio->flags = 0;
dio->iocb = iocb;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 4c21f6e69e182..10406f57d339e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -408,8 +408,6 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask,
struct bio_set *bs);
-struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
- unsigned short nr_vecs, unsigned int opf, struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
extern void bio_put(struct bio *);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0c3563b45fe90..d4ba5251a3a0b 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -328,7 +328,6 @@ enum {
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
- BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
BIO_FLAG_LAST
};
@@ -415,6 +414,7 @@ enum req_flag_bits {
__REQ_NOUNMAP, /* do not free blocks when zeroing */
__REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
/* for driver use */
__REQ_DRV,
@@ -440,6 +440,7 @@ enum req_flag_bits {
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_POLLED (1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
--
2.30.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset
2022-03-24 7:39 ` Christoph Hellwig
@ 2022-03-24 14:41 ` Mike Snitzer
0 siblings, 0 replies; 8+ messages in thread
From: Mike Snitzer @ 2022-03-24 14:41 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: axboe, ming.lei, dm-devel, linux-block
On Thu, Mar 24 2022 at 3:39P -0400,
Christoph Hellwig <hch@lst.de> wrote:
> On Wed, Mar 23, 2022 at 03:45:20PM -0400, Mike Snitzer wrote:
> > I tried to address your review of the previous set. Patch 1 and 2 can
> > obviously be folded but I left them split out for review purposes.
> > Feel free to see if these changes are meaningful for nvme's use.
> > Happy for either you to take on iterating on these block changes
> > further or you letting me know what changes you'd like made.
>
> I'd be tempted to go with something like the version below, which
> does away with the bio flag and the bio_alloc_kiocb wrapper to
> further simplify the interface. The additional changes neeed for
> dm like the bioset_init_from_src changes and move of bio_clear_polled
> can then built on top of that.
Sure, should work fine, I'll rebase ontop of this and send out v3
later today.
FYI, I kept BIO_PERCPU_CACHE in v2 was because it gave the flexibility
of each bio allocating layer above and below a particular device
autonomy relative to whether or not they provided a bio alloc
cache. But thinking further after seeing your patch: it seems
reasonable for stacked devices to just require the entire stack enable
and use a bio alloc cache. And it does prevent developers from
hijacking REQ_ALLOC_CACHE for their own needs (completely independent
of a bioset's alloc cache).
Thanks,
Mike
> ---
> From ec0493b86a3240e7f9f2d46a1298bd40ccf15e80 Mon Sep 17 00:00:00 2001
> From: Mike Snitzer <snitzer@redhat.com>
> Date: Wed, 23 Mar 2022 15:45:21 -0400
> Subject: block: allow using the per-cpu bio cache from bio_alloc_bioset
>
> Replace the BIO_PERCPU_CACHE bio-internal flag with a REQ_ALLOC_CACHE
> one that can be passed to bio_alloc / bio_alloc_bioset, and implement
> the percpu cache allocation logic in a helper called from
> bio_alloc_bioset. This allows any bio_alloc_bioset user to use the
> percpu caches instead of having the functionality tied to struct kiocb.
>
> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
> [hch: refactored a bit]
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> block/bio.c | 86 +++++++++++++++++++--------------------
> block/blk.h | 3 +-
> block/fops.c | 11 +++--
> include/linux/bio.h | 2 -
> include/linux/blk_types.h | 3 +-
> 5 files changed, 52 insertions(+), 53 deletions(-)
>
> diff --git a/block/bio.c b/block/bio.c
> index 33979f306e9e7..d780e2cbea437 100644
> --- a/block/bio.c
> +++ b/block/bio.c
> @@ -420,6 +420,28 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
> queue_work(bs->rescue_workqueue, &bs->rescue_work);
> }
>
> +static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
> + unsigned short nr_vecs, unsigned int opf, gfp_t gfp,
> + struct bio_set *bs)
> +{
> + struct bio_alloc_cache *cache;
> + struct bio *bio;
> +
> + cache = per_cpu_ptr(bs->cache, get_cpu());
> + if (!cache->free_list) {
> + put_cpu();
> + return NULL;
> + }
> + bio = cache->free_list;
> + cache->free_list = bio->bi_next;
> + cache->nr--;
> + put_cpu();
> +
> + bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
> + bio->bi_pool = bs;
> + return bio;
> +}
> +
> /**
> * bio_alloc_bioset - allocate a bio for I/O
> * @bdev: block device to allocate the bio for (can be %NULL)
> @@ -452,6 +474,9 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
> * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
> * for per bio allocations.
> *
> + * If REQ_ALLOC_CACHE is set, the final put of the bio MUST be done from process
> + * context, not hard/soft IRQ.
> + *
> * Returns: Pointer to new bio on success, NULL on failure.
> */
> struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
> @@ -466,6 +491,21 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
> if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
> return NULL;
>
> + if (opf & REQ_ALLOC_CACHE) {
> + if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
> + bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
> + gfp_mask, bs);
> + if (bio)
> + return bio;
> + /*
> + * No cached bio available, mark bio returned below to
> + * particpate in per-cpu alloc cache.
> + */
> + } else {
> + opf &= ~REQ_ALLOC_CACHE;
> + }
> + }
> +
> /*
> * submit_bio_noacct() converts recursion to iteration; this means if
> * we're running beneath it, any bios we allocate and submit will not be
> @@ -712,7 +752,7 @@ void bio_put(struct bio *bio)
> return;
> }
>
> - if (bio_flagged(bio, BIO_PERCPU_CACHE)) {
> + if (bio->bi_opf & REQ_ALLOC_CACHE) {
> struct bio_alloc_cache *cache;
>
> bio_uninit(bio);
> @@ -1734,50 +1774,6 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
> }
> EXPORT_SYMBOL(bioset_init_from_src);
>
> -/**
> - * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
> - * @kiocb: kiocb describing the IO
> - * @bdev: block device to allocate the bio for (can be %NULL)
> - * @nr_vecs: number of iovecs to pre-allocate
> - * @opf: operation and flags for bio
> - * @bs: bio_set to allocate from
> - *
> - * Description:
> - * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
> - * used to check if we should dip into the per-cpu bio_set allocation
> - * cache. The allocation uses GFP_KERNEL internally. On return, the
> - * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
> - * MUST be done from process context, not hard/soft IRQ.
> - *
> - */
> -struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
> - unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
> -{
> - struct bio_alloc_cache *cache;
> - struct bio *bio;
> -
> - if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
> - return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
> -
> - cache = per_cpu_ptr(bs->cache, get_cpu());
> - if (cache->free_list) {
> - bio = cache->free_list;
> - cache->free_list = bio->bi_next;
> - cache->nr--;
> - put_cpu();
> - bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
> - nr_vecs, opf);
> - bio->bi_pool = bs;
> - bio_set_flag(bio, BIO_PERCPU_CACHE);
> - return bio;
> - }
> - put_cpu();
> - bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
> - bio_set_flag(bio, BIO_PERCPU_CACHE);
> - return bio;
> -}
> -EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
> -
> static int __init init_bio(void)
> {
> int i;
> diff --git a/block/blk.h b/block/blk.h
> index 6f21859c7f0ff..9cb04f24ba8a7 100644
> --- a/block/blk.h
> +++ b/block/blk.h
> @@ -454,8 +454,7 @@ extern struct device_attribute dev_attr_events_poll_msecs;
> static inline void bio_clear_polled(struct bio *bio)
> {
> /* can't support alloc cache if we turn off polling */
> - bio_clear_flag(bio, BIO_PERCPU_CACHE);
> - bio->bi_opf &= ~REQ_POLLED;
> + bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
> }
>
> long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
> diff --git a/block/fops.c b/block/fops.c
> index e49096354dcd6..d1da85bdec31e 100644
> --- a/block/fops.c
> +++ b/block/fops.c
> @@ -198,8 +198,10 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
> (bdev_logical_block_size(bdev) - 1))
> return -EINVAL;
>
> - bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
> -
> + if (iocb->ki_flags & IOCB_ALLOC_CACHE)
> + opf |= REQ_ALLOC_CACHE;
> + bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
> + &blkdev_dio_pool);
> dio = container_of(bio, struct blkdev_dio, bio);
> atomic_set(&dio->ref, 1);
> /*
> @@ -322,7 +324,10 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
> (bdev_logical_block_size(bdev) - 1))
> return -EINVAL;
>
> - bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
> + if (iocb->ki_flags & IOCB_ALLOC_CACHE)
> + opf |= REQ_ALLOC_CACHE;
> + bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
> + &blkdev_dio_pool);
> dio = container_of(bio, struct blkdev_dio, bio);
> dio->flags = 0;
> dio->iocb = iocb;
> diff --git a/include/linux/bio.h b/include/linux/bio.h
> index 4c21f6e69e182..10406f57d339e 100644
> --- a/include/linux/bio.h
> +++ b/include/linux/bio.h
> @@ -408,8 +408,6 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
> struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
> unsigned int opf, gfp_t gfp_mask,
> struct bio_set *bs);
> -struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
> - unsigned short nr_vecs, unsigned int opf, struct bio_set *bs);
> struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
> extern void bio_put(struct bio *);
>
> diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
> index 0c3563b45fe90..d4ba5251a3a0b 100644
> --- a/include/linux/blk_types.h
> +++ b/include/linux/blk_types.h
> @@ -328,7 +328,6 @@ enum {
> BIO_QOS_MERGED, /* but went through rq_qos merge path */
> BIO_REMAPPED,
> BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
> - BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
> BIO_FLAG_LAST
> };
>
> @@ -415,6 +414,7 @@ enum req_flag_bits {
> __REQ_NOUNMAP, /* do not free blocks when zeroing */
>
> __REQ_POLLED, /* caller polls for completion using bio_poll */
> + __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
>
> /* for driver use */
> __REQ_DRV,
> @@ -440,6 +440,7 @@ enum req_flag_bits {
>
> #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
> #define REQ_POLLED (1ULL << __REQ_POLLED)
> +#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE)
>
> #define REQ_DRV (1ULL << __REQ_DRV)
> #define REQ_SWAP (1ULL << __REQ_SWAP)
> --
> 2.30.2
>
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2022-03-24 14:41 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-03-23 19:45 [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 1/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_clone Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 2/4] block: allow BIOSET_PERCPU_CACHE use from bio_alloc_bioset Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 3/4] dm: enable BIOSET_PERCPU_CACHE for dm_io bioset Mike Snitzer
2022-03-23 19:45 ` [PATCH v2 4/4] dm: conditionally enable BIOSET_PERCPU_CACHE for bio-based " Mike Snitzer
2022-03-24 0:25 ` [PATCH v2 0/4] block/dm: use BIOSET_PERCPU_CACHE from bio_alloc_bioset Jens Axboe
2022-03-24 7:39 ` Christoph Hellwig
2022-03-24 14:41 ` Mike Snitzer
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).