* [PATCH 0/2] blk-mq-sched: fix put_rq_private() lock inconsistency
@ 2017-02-02 16:06 Jens Axboe
2017-02-02 16:06 ` [PATCH 1/2] blk-merge: return the merged request Jens Axboe
2017-02-02 16:06 ` [PATCH 2/2] block: free merged request in the caller Jens Axboe
0 siblings, 2 replies; 6+ messages in thread
From: Jens Axboe @ 2017-02-02 16:06 UTC (permalink / raw)
To: linux-block; +Cc: paolo.valente, osandov
I tested the patch I sent to Paolo yesterday, and it seems to work
fine. I broke it up into two pieces, so the functional change is
restricted to patch #2.
Basically this fixes the case where we can invoke the blk-mq-sched
put request functions in an inconsistent state. Most of the time we
invoke them without any locks held, but for the case where we get
a successful request-to-request merge on the back of a bio-to-request
merge, we can invoke it with whatever lock the scheduler held when
it called blk_mq_sched_try_merge().
--
Jens Axboe
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/2] blk-merge: return the merged request
2017-02-02 16:06 [PATCH 0/2] blk-mq-sched: fix put_rq_private() lock inconsistency Jens Axboe
@ 2017-02-02 16:06 ` Jens Axboe
2017-02-03 16:42 ` Omar Sandoval
2017-02-02 16:06 ` [PATCH 2/2] block: free merged request in the caller Jens Axboe
1 sibling, 1 reply; 6+ messages in thread
From: Jens Axboe @ 2017-02-02 16:06 UTC (permalink / raw)
To: linux-block; +Cc: paolo.valente, osandov, Jens Axboe
When we attempt to merge request-to-request, we return a 0/1 if we
ended up merging or not. Change that to return the pointer to the
request that we freed. We will use this to move the freeing of
that request out of the merge logic, so that callers can drop
locks before freeing the request.
There should be no functional changes in this patch.
Signed-off-by: Jens Axboe <axboe@fb.com>
---
block/blk-merge.c | 31 ++++++++++++++++---------------
block/blk.h | 4 ++--
2 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6aa43dec5af4..3826fc32b72c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -659,31 +659,32 @@ static void blk_account_io_merge(struct request *req)
}
/*
- * Has to be called with the request spinlock acquired
+ * For non-mq, this has to be called with the request spinlock acquired.
+ * For mq with scheduling, the appropriate queue wide lock should be held.
*/
-static int attempt_merge(struct request_queue *q, struct request *req,
- struct request *next)
+static struct request *attempt_merge(struct request_queue *q,
+ struct request *req, struct request *next)
{
if (!rq_mergeable(req) || !rq_mergeable(next))
- return 0;
+ return NULL;
if (req_op(req) != req_op(next))
- return 0;
+ return NULL;
/*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
- return 0;
+ return NULL;
if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk
|| req_no_special_merge(next))
- return 0;
+ return NULL;
if (req_op(req) == REQ_OP_WRITE_SAME &&
!blk_write_same_mergeable(req->bio, next->bio))
- return 0;
+ return NULL;
/*
* If we are allowed to merge, then append bio list
@@ -692,7 +693,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
* counts here.
*/
if (!ll_merge_requests_fn(q, req, next))
- return 0;
+ return NULL;
/*
* If failfast settings disagree or any of the two is already
@@ -735,27 +736,27 @@ static int attempt_merge(struct request_queue *q, struct request *req,
/* owner-ship of bio passed from next to req */
next->bio = NULL;
__blk_put_request(q, next);
- return 1;
+ return next;
}
-int attempt_back_merge(struct request_queue *q, struct request *rq)
+struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
{
struct request *next = elv_latter_request(q, rq);
if (next)
return attempt_merge(q, rq, next);
- return 0;
+ return NULL;
}
-int attempt_front_merge(struct request_queue *q, struct request *rq)
+struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
{
struct request *prev = elv_former_request(q, rq);
if (prev)
return attempt_merge(q, prev, rq);
- return 0;
+ return NULL;
}
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
@@ -767,7 +768,7 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0;
- return attempt_merge(q, rq, next);
+ return attempt_merge(q, rq, next) != NULL;
}
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
diff --git a/block/blk.h b/block/blk.h
index c1bd4bf9e645..918cea38d51e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -204,8 +204,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio);
-int attempt_back_merge(struct request_queue *q, struct request *rq);
-int attempt_front_merge(struct request_queue *q, struct request *rq);
+struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
+struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
void blk_recalc_rq_segments(struct request *rq);
--
2.7.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/2] block: free merged request in the caller
2017-02-02 16:06 [PATCH 0/2] blk-mq-sched: fix put_rq_private() lock inconsistency Jens Axboe
2017-02-02 16:06 ` [PATCH 1/2] blk-merge: return the merged request Jens Axboe
@ 2017-02-02 16:06 ` Jens Axboe
2017-02-03 16:44 ` Omar Sandoval
2017-02-07 17:35 ` Paolo Valente
1 sibling, 2 replies; 6+ messages in thread
From: Jens Axboe @ 2017-02-02 16:06 UTC (permalink / raw)
To: linux-block; +Cc: paolo.valente, osandov, Jens Axboe
If we end up doing a request-to-request merge when we have completed
a bio-to-request merge, we free the request from deep down in that
path. For blk-mq-sched, the merge path has to hold the appropriate
lock, but we don't need it for freeing the request. And in fact
holding the lock is problematic, since we are now calling the
mq sched put_rq_private() hook with the lock held. Other call paths
do not hold this lock.
Fix this inconsistency by ensuring that the caller frees a merged
request. Then we can do it outside of the lock, making it both more
efficient and fixing the blk-mq-sched problem of invoking parts of
the scheduler with an unknown lock state.
Reported-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
---
block/blk-core.c | 12 +++++++++---
block/blk-merge.c | 15 ++++++++++++---
block/blk-mq-sched.c | 9 ++++++---
block/blk-mq-sched.h | 3 ++-
block/mq-deadline.c | 8 ++++++--
5 files changed, 35 insertions(+), 12 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index a5726e01f839..00c90f8cd682 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1591,7 +1591,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
struct blk_plug *plug;
int el_ret, where = ELEVATOR_INSERT_SORT;
- struct request *req;
+ struct request *req, *free;
unsigned int request_count = 0;
unsigned int wb_acct;
@@ -1632,15 +1632,21 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
if (el_ret == ELEVATOR_BACK_MERGE) {
if (bio_attempt_back_merge(q, req, bio)) {
elv_bio_merged(q, req, bio);
- if (!attempt_back_merge(q, req))
+ free = attempt_back_merge(q, req);
+ if (!free)
elv_merged_request(q, req, el_ret);
+ else
+ __blk_put_request(q, free);
goto out_unlock;
}
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
if (bio_attempt_front_merge(q, req, bio)) {
elv_bio_merged(q, req, bio);
- if (!attempt_front_merge(q, req))
+ free = attempt_front_merge(q, req);
+ if (!free)
elv_merged_request(q, req, el_ret);
+ else
+ __blk_put_request(q, free);
goto out_unlock;
}
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3826fc32b72c..a373416dbc9a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -733,9 +733,11 @@ static struct request *attempt_merge(struct request_queue *q,
if (blk_rq_cpu_valid(next))
req->cpu = next->cpu;
- /* owner-ship of bio passed from next to req */
+ /*
+ * ownership of bio passed from next to req, return 'next' for
+ * the caller to free
+ */
next->bio = NULL;
- __blk_put_request(q, next);
return next;
}
@@ -763,12 +765,19 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
+ struct request *free;
if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0;
- return attempt_merge(q, rq, next) != NULL;
+ free = attempt_merge(q, rq, next);
+ if (free) {
+ __blk_put_request(q, free);
+ return 1;
+ }
+
+ return 0;
}
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 114814ec3d49..d93b56d53c4e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -234,7 +234,8 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch);
-bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
+bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
+ struct request **merged_request)
{
struct request *rq;
int ret;
@@ -244,7 +245,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
if (bio_attempt_back_merge(q, rq, bio)) {
- if (!attempt_back_merge(q, rq))
+ *merged_request = attempt_back_merge(q, rq);
+ if (!*merged_request)
elv_merged_request(q, rq, ret);
return true;
}
@@ -252,7 +254,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
if (bio_attempt_front_merge(q, rq, bio)) {
- if (!attempt_front_merge(q, rq))
+ *merged_request = attempt_front_merge(q, rq);
+ if (!*merged_request)
elv_merged_request(q, rq, ret);
return true;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 9478aaeb48c5..3643686a54b8 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -16,7 +16,8 @@ void blk_mq_sched_put_request(struct request *rq);
void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
-bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
+bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
+ struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 49583536698c..682fa64f55ff 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -371,12 +371,16 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
{
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
- int ret;
+ struct request *free = NULL;
+ bool ret;
spin_lock(&dd->lock);
- ret = blk_mq_sched_try_merge(q, bio);
+ ret = blk_mq_sched_try_merge(q, bio, &free);
spin_unlock(&dd->lock);
+ if (free)
+ blk_mq_free_request(free);
+
return ret;
}
--
2.7.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 1/2] blk-merge: return the merged request
2017-02-02 16:06 ` [PATCH 1/2] blk-merge: return the merged request Jens Axboe
@ 2017-02-03 16:42 ` Omar Sandoval
0 siblings, 0 replies; 6+ messages in thread
From: Omar Sandoval @ 2017-02-03 16:42 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-block, paolo.valente
On Thu, Feb 02, 2017 at 09:06:12AM -0700, Jens Axboe wrote:
> When we attempt to merge request-to-request, we return a 0/1 if we
> ended up merging or not. Change that to return the pointer to the
> request that we freed. We will use this to move the freeing of
> that request out of the merge logic, so that callers can drop
> locks before freeing the request.
>
> There should be no functional changes in this patch.
Reviewed-by: Omar Sandoval <osandov@fb.com>
> Signed-off-by: Jens Axboe <axboe@fb.com>
> ---
> block/blk-merge.c | 31 ++++++++++++++++---------------
> block/blk.h | 4 ++--
> 2 files changed, 18 insertions(+), 17 deletions(-)
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 2/2] block: free merged request in the caller
2017-02-02 16:06 ` [PATCH 2/2] block: free merged request in the caller Jens Axboe
@ 2017-02-03 16:44 ` Omar Sandoval
2017-02-07 17:35 ` Paolo Valente
1 sibling, 0 replies; 6+ messages in thread
From: Omar Sandoval @ 2017-02-03 16:44 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-block, paolo.valente
On Thu, Feb 02, 2017 at 09:06:13AM -0700, Jens Axboe wrote:
> If we end up doing a request-to-request merge when we have completed
> a bio-to-request merge, we free the request from deep down in that
> path. For blk-mq-sched, the merge path has to hold the appropriate
> lock, but we don't need it for freeing the request. And in fact
> holding the lock is problematic, since we are now calling the
> mq sched put_rq_private() hook with the lock held. Other call paths
> do not hold this lock.
>
> Fix this inconsistency by ensuring that the caller frees a merged
> request. Then we can do it outside of the lock, making it both more
> efficient and fixing the blk-mq-sched problem of invoking parts of
> the scheduler with an unknown lock state.
>
> Reported-by: Paolo Valente <paolo.valente@linaro.org>
Reviewed-by: Omar Sandoval <osandov@fb.com>
> Signed-off-by: Jens Axboe <axboe@fb.com>
> ---
> block/blk-core.c | 12 +++++++++---
> block/blk-merge.c | 15 ++++++++++++---
> block/blk-mq-sched.c | 9 ++++++---
> block/blk-mq-sched.h | 3 ++-
> block/mq-deadline.c | 8 ++++++--
> 5 files changed, 35 insertions(+), 12 deletions(-)
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 2/2] block: free merged request in the caller
2017-02-02 16:06 ` [PATCH 2/2] block: free merged request in the caller Jens Axboe
2017-02-03 16:44 ` Omar Sandoval
@ 2017-02-07 17:35 ` Paolo Valente
1 sibling, 0 replies; 6+ messages in thread
From: Paolo Valente @ 2017-02-07 17:35 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-block, osandov
> Il giorno 02 feb 2017, alle ore 17:06, Jens Axboe <axboe@fb.com> ha =
scritto:
>=20
> If we end up doing a request-to-request merge when we have completed
> a bio-to-request merge, we free the request from deep down in that
> path. For blk-mq-sched, the merge path has to hold the appropriate
> lock, but we don't need it for freeing the request. And in fact
> holding the lock is problematic, since we are now calling the
> mq sched put_rq_private() hook with the lock held. Other call paths
> do not hold this lock.
>=20
> Fix this inconsistency by ensuring that the caller frees a merged
> request. Then we can do it outside of the lock, making it both more
> efficient and fixing the blk-mq-sched problem of invoking parts of
> the scheduler with an unknown lock state.
>=20
> Reported-by: Paolo Valente <paolo.valente@linaro.org>
> Signed-off-by: Jens Axboe <axboe@fb.com>
> ---
> block/blk-core.c | 12 +++++++++---
> block/blk-merge.c | 15 ++++++++++++---
> block/blk-mq-sched.c | 9 ++++++---
> block/blk-mq-sched.h | 3 ++-
> block/mq-deadline.c | 8 ++++++--
> 5 files changed, 35 insertions(+), 12 deletions(-)
>=20
> diff --git a/block/blk-core.c b/block/blk-core.c
> index a5726e01f839..00c90f8cd682 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -1591,7 +1591,7 @@ static blk_qc_t blk_queue_bio(struct =
request_queue *q, struct bio *bio)
> {
> struct blk_plug *plug;
> int el_ret, where =3D ELEVATOR_INSERT_SORT;
> - struct request *req;
> + struct request *req, *free;
> unsigned int request_count =3D 0;
> unsigned int wb_acct;
>=20
> @@ -1632,15 +1632,21 @@ static blk_qc_t blk_queue_bio(struct =
request_queue *q, struct bio *bio)
> if (el_ret =3D=3D ELEVATOR_BACK_MERGE) {
> if (bio_attempt_back_merge(q, req, bio)) {
> elv_bio_merged(q, req, bio);
> - if (!attempt_back_merge(q, req))
> + free =3D attempt_back_merge(q, req);
> + if (!free)
> elv_merged_request(q, req, el_ret);
> + else
> + __blk_put_request(q, free);
> goto out_unlock;
> }
> } else if (el_ret =3D=3D ELEVATOR_FRONT_MERGE) {
> if (bio_attempt_front_merge(q, req, bio)) {
> elv_bio_merged(q, req, bio);
> - if (!attempt_front_merge(q, req))
> + free =3D attempt_front_merge(q, req);
> + if (!free)
> elv_merged_request(q, req, el_ret);
> + else
> + __blk_put_request(q, free);
> goto out_unlock;
> }
> }
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index 3826fc32b72c..a373416dbc9a 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -733,9 +733,11 @@ static struct request *attempt_merge(struct =
request_queue *q,
> if (blk_rq_cpu_valid(next))
> req->cpu =3D next->cpu;
>=20
> - /* owner-ship of bio passed from next to req */
> + /*
> + * ownership of bio passed from next to req, return 'next' for
> + * the caller to free
> + */
> next->bio =3D NULL;
> - __blk_put_request(q, next);
> return next;
> }
>=20
> @@ -763,12 +765,19 @@ int blk_attempt_req_merge(struct request_queue =
*q, struct request *rq,
> struct request *next)
> {
> struct elevator_queue *e =3D q->elevator;
> + struct request *free;
>=20
> if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
> if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, =
next))
> return 0;
>=20
> - return attempt_merge(q, rq, next) !=3D NULL;
> + free =3D attempt_merge(q, rq, next);
> + if (free) {
> + __blk_put_request(q, free);
> + return 1;
> + }
> +
> + return 0;
> }
>=20
Hi Jens,
blk_attempt_req_merge is reached also from
blk_mq_sched_try_insert_merge, which is invoked in mq-deadline.
According to the current usage of the scheduler lock in mq-deadline,
that lock should be taken before invoking
blk_mq_sched_try_insert_merge too, as I pointed out in a previous
reply of mine. But, if it is taken, then we have again the
double-lock-state problem in put_rq_private.
Unfortunately, from your general explanation on how to best protect
shared data structures, I did not understand (sorry) whether the
scheduler lock must or must not be taken around
blk_mq_sched_try_insert_merge in the current implementation of
mq-deadline. Could you patiently provide a thickheaded-proof, yes/no
answer, if possible?
Thanks,
Paolo
> bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
> diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
> index 114814ec3d49..d93b56d53c4e 100644
> --- a/block/blk-mq-sched.c
> +++ b/block/blk-mq-sched.c
> @@ -234,7 +234,8 @@ void blk_mq_sched_move_to_dispatch(struct =
blk_mq_hw_ctx *hctx,
> }
> EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch);
>=20
> -bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
> +bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
> + struct request **merged_request)
> {
> struct request *rq;
> int ret;
> @@ -244,7 +245,8 @@ bool blk_mq_sched_try_merge(struct request_queue =
*q, struct bio *bio)
> if (!blk_mq_sched_allow_merge(q, rq, bio))
> return false;
> if (bio_attempt_back_merge(q, rq, bio)) {
> - if (!attempt_back_merge(q, rq))
> + *merged_request =3D attempt_back_merge(q, rq);
> + if (!*merged_request)
> elv_merged_request(q, rq, ret);
> return true;
> }
> @@ -252,7 +254,8 @@ bool blk_mq_sched_try_merge(struct request_queue =
*q, struct bio *bio)
> if (!blk_mq_sched_allow_merge(q, rq, bio))
> return false;
> if (bio_attempt_front_merge(q, rq, bio)) {
> - if (!attempt_front_merge(q, rq))
> + *merged_request =3D attempt_front_merge(q, rq);
> + if (!*merged_request)
> elv_merged_request(q, rq, ret);
> return true;
> }
> diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
> index 9478aaeb48c5..3643686a54b8 100644
> --- a/block/blk-mq-sched.h
> +++ b/block/blk-mq-sched.h
> @@ -16,7 +16,8 @@ void blk_mq_sched_put_request(struct request *rq);
>=20
> void blk_mq_sched_request_inserted(struct request *rq);
> bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct =
request *rq);
> -bool blk_mq_sched_try_merge(struct request_queue *q, struct bio =
*bio);
> +bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
> + struct request **merged_request);
> bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio =
*bio);
> bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct =
request *rq);
> void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
> diff --git a/block/mq-deadline.c b/block/mq-deadline.c
> index 49583536698c..682fa64f55ff 100644
> --- a/block/mq-deadline.c
> +++ b/block/mq-deadline.c
> @@ -371,12 +371,16 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx =
*hctx, struct bio *bio)
> {
> struct request_queue *q =3D hctx->queue;
> struct deadline_data *dd =3D q->elevator->elevator_data;
> - int ret;
> + struct request *free =3D NULL;
> + bool ret;
>=20
> spin_lock(&dd->lock);
> - ret =3D blk_mq_sched_try_merge(q, bio);
> + ret =3D blk_mq_sched_try_merge(q, bio, &free);
> spin_unlock(&dd->lock);
>=20
> + if (free)
> + blk_mq_free_request(free);
> +
> return ret;
> }
>=20
> --=20
> 2.7.4
>=20
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2017-02-07 17:35 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-02-02 16:06 [PATCH 0/2] blk-mq-sched: fix put_rq_private() lock inconsistency Jens Axboe
2017-02-02 16:06 ` [PATCH 1/2] blk-merge: return the merged request Jens Axboe
2017-02-03 16:42 ` Omar Sandoval
2017-02-02 16:06 ` [PATCH 2/2] block: free merged request in the caller Jens Axboe
2017-02-03 16:44 ` Omar Sandoval
2017-02-07 17:35 ` Paolo Valente
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).