* [PATCH] dm: gracefully fail any request beyond the end of the device
@ 2012-09-20 19:28 Mike Snitzer
2012-09-21 15:47 ` [PATCH v2] " Mike Snitzer
0 siblings, 1 reply; 5+ messages in thread
From: Mike Snitzer @ 2012-09-20 19:28 UTC (permalink / raw)
To: dm-devel; +Cc: Mike Christie
The access beyond the end of device BUG_ON that was introduced to
dm_request_fn via commit 29e4013de7ad950280e4b2208 ("dm: implement
REQ_FLUSH/FUA support for request-based dm") is an overly drastic
response. Use dm_kill_unmapped_request() to fail the original and
clone request with -EIO.
map_request() will assign the valid target returned by
dm_table_find_target() to tio->ti. But in the case where the target
isn't valid tio->ti is never assigned (because map_request isn't
called); so add a check for tio->ti != NULL to dm_done().
Reported-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org # v2.6.37+
---
drivers/md/dm.c | 22 +++++++++++++++++-----
1 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4e09b6f..27fb739 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -865,7 +865,10 @@ static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ dm_request_endio_fn rq_end_io = NULL;
+
+ if (tio->ti)
+ rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
@@ -1651,19 +1654,30 @@ static void dm_request_fn(struct request_queue *q)
if (!rq)
goto delay_and_out;
+ clone = rq->special;
+
/* always use block 0 to find the target for flushes for now */
pos = 0;
if (!(rq->cmd_flags & REQ_FLUSH))
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
- BUG_ON(!dm_target_is_valid(ti));
+ if (!dm_target_is_valid(ti)) {
+ /*
+ * Must perform setup, that dm_done() requires,
+ * before calling dm_kill_unmapped_request
+ */
+ blk_start_request(rq);
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+ dm_get(md);
+ dm_kill_unmapped_request(clone, -EIO);
+ goto out;
+ }
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
blk_start_request(rq);
- clone = rq->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);
spin_unlock(q->queue_lock);
@@ -1684,8 +1698,6 @@ delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
-
- return;
}
int dm_underlying_device_busy(struct request_queue *q)
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v2] dm: gracefully fail any request beyond the end of the device
2012-09-20 19:28 [PATCH] dm: gracefully fail any request beyond the end of the device Mike Snitzer
@ 2012-09-21 15:47 ` Mike Snitzer
2012-09-24 9:38 ` Jun'ichi Nomura
0 siblings, 1 reply; 5+ messages in thread
From: Mike Snitzer @ 2012-09-21 15:47 UTC (permalink / raw)
To: dm-devel; +Cc: Jun'ichi Nomura, Mike Christie
The access beyond the end of device BUG_ON that was introduced to
dm_request_fn via commit 29e4013de7ad950280e4b2208 ("dm: implement
REQ_FLUSH/FUA support for request-based dm") is an overly drastic
response. Use dm_kill_unmapped_request() to fail the clone and original
request with -EIO.
map_request() will assign the valid target returned by
dm_table_find_target to tio->ti. But in the case where the target
isn't valid tio->ti is never assigned (because map_request isn't
called); so add a check for tio->ti != NULL to dm_done().
Reported-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org # v2.6.37+
---
drivers/md/dm.c | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)
v2: added a DMERR_LIMIT message to give context for the IO errors
Index: linux/drivers/md/dm.c
===================================================================
--- linux.orig/drivers/md/dm.c
+++ linux/drivers/md/dm.c
@@ -865,7 +865,10 @@ static void dm_done(struct request *clon
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ dm_request_endio_fn rq_end_io = NULL;
+
+ if (tio->ti)
+ rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
@@ -1651,19 +1654,31 @@ static void dm_request_fn(struct request
if (!rq)
goto delay_and_out;
+ clone = rq->special;
+
/* always use block 0 to find the target for flushes for now */
pos = 0;
if (!(rq->cmd_flags & REQ_FLUSH))
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
- BUG_ON(!dm_target_is_valid(ti));
+ if (!dm_target_is_valid(ti)) {
+ /*
+ * Must perform setup, that dm_done() requires,
+ * before calling dm_kill_unmapped_request
+ */
+ DMERR_LIMIT("request attempted access beyond the end of device");
+ blk_start_request(rq);
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+ dm_get(md);
+ dm_kill_unmapped_request(clone, -EIO);
+ goto out;
+ }
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
blk_start_request(rq);
- clone = rq->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);
spin_unlock(q->queue_lock);
@@ -1684,8 +1699,6 @@ delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
-
- return;
}
int dm_underlying_device_busy(struct request_queue *q)
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] dm: gracefully fail any request beyond the end of the device
2012-09-21 15:47 ` [PATCH v2] " Mike Snitzer
@ 2012-09-24 9:38 ` Jun'ichi Nomura
2012-09-24 13:07 ` Mike Snitzer
0 siblings, 1 reply; 5+ messages in thread
From: Jun'ichi Nomura @ 2012-09-24 9:38 UTC (permalink / raw)
To: Mike Snitzer; +Cc: dm-devel, Mike Christie
[-- Attachment #1: Type: text/plain, Size: 1082 bytes --]
On 09/22/12 00:47, Mike Snitzer wrote:
> @@ -1651,19 +1654,31 @@ static void dm_request_fn(struct request
> if (!rq)
> goto delay_and_out;
>
> + clone = rq->special;
> +
> /* always use block 0 to find the target for flushes for now */
> pos = 0;
> if (!(rq->cmd_flags & REQ_FLUSH))
> pos = blk_rq_pos(rq);
>
> ti = dm_table_find_target(map, pos);
> - BUG_ON(!dm_target_is_valid(ti));
> + if (!dm_target_is_valid(ti)) {
> + /*
> + * Must perform setup, that dm_done() requires,
> + * before calling dm_kill_unmapped_request
> + */
> + DMERR_LIMIT("request attempted access beyond the end of device");
> + blk_start_request(rq);
> + atomic_inc(&md->pending[rq_data_dir(clone)]);
> + dm_get(md);
> + dm_kill_unmapped_request(clone, -EIO);
> + goto out;
This "goto out" should be "continue" so that request_fn
process next requests in the queue.
Also I think introducing a function dm_start_request()
will make this part of code a little bit easier for reading.
An edited patch is attached.
--
Jun'ichi Nomura, NEC Corporation
[-- Attachment #2: a.patch --]
[-- Type: text/x-patch, Size: 2723 bytes --]
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e24143c..3977f8d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -865,7 +865,10 @@ static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ dm_request_endio_fn rq_end_io = NULL;
+
+ if (tio->ti)
+ rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
@@ -1566,15 +1569,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
- /*
- * Hold the md reference here for the in-flight I/O.
- * We can't rely on the reference count by device opener,
- * because the device may be closed during the request completion
- * when all bios are completed.
- * See the comment in rq_completed() too.
- */
- dm_get(md);
-
tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
@@ -1606,6 +1600,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
return requeued;
}
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+{
+ struct request *clone;
+
+ blk_start_request(orig);
+ clone = orig->special;
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+
+ return clone;
+}
+
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
@@ -1635,14 +1649,21 @@ static void dm_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
- BUG_ON(!dm_target_is_valid(ti));
+ if (!dm_target_is_valid(ti)) {
+ /*
+ * Must perform setup, that dm_done() requires,
+ * before calling dm_kill_unmapped_request
+ */
+ DMERR_LIMIT("request attempted access beyond the end of device");
+ clone = dm_start_request(md, rq);
+ dm_kill_unmapped_request(clone, -EIO);
+ continue;
+ }
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
- blk_start_request(rq);
- clone = rq->special;
- atomic_inc(&md->pending[rq_data_dir(clone)]);
+ clone = dm_start_request(md, rq);
spin_unlock(q->queue_lock);
if (map_request(ti, clone, md))
@@ -1662,8 +1683,6 @@ delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
-
- return;
}
int dm_underlying_device_busy(struct request_queue *q)
[-- Attachment #3: Type: text/plain, Size: 0 bytes --]
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v2] dm: gracefully fail any request beyond the end of the device
2012-09-24 9:38 ` Jun'ichi Nomura
@ 2012-09-24 13:07 ` Mike Snitzer
2012-09-24 13:28 ` [PATCH v3] " Mike Snitzer
0 siblings, 1 reply; 5+ messages in thread
From: Mike Snitzer @ 2012-09-24 13:07 UTC (permalink / raw)
To: Jun'ichi Nomura; +Cc: dm-devel, Mike Christie
On Mon, Sep 24 2012 at 5:38am -0400,
Jun'ichi Nomura <j-nomura@ce.jp.nec.com> wrote:
> On 09/22/12 00:47, Mike Snitzer wrote:
> > @@ -1651,19 +1654,31 @@ static void dm_request_fn(struct request
> > if (!rq)
> > goto delay_and_out;
> >
> > + clone = rq->special;
> > +
> > /* always use block 0 to find the target for flushes for now */
> > pos = 0;
> > if (!(rq->cmd_flags & REQ_FLUSH))
> > pos = blk_rq_pos(rq);
> >
> > ti = dm_table_find_target(map, pos);
> > - BUG_ON(!dm_target_is_valid(ti));
> > + if (!dm_target_is_valid(ti)) {
> > + /*
> > + * Must perform setup, that dm_done() requires,
> > + * before calling dm_kill_unmapped_request
> > + */
> > + DMERR_LIMIT("request attempted access beyond the end of device");
> > + blk_start_request(rq);
> > + atomic_inc(&md->pending[rq_data_dir(clone)]);
> > + dm_get(md);
> > + dm_kill_unmapped_request(clone, -EIO);
> > + goto out;
>
> This "goto out" should be "continue" so that request_fn
> process next requests in the queue.
>
> Also I think introducing a function dm_start_request()
> will make this part of code a little bit easier for reading.
> An edited patch is attached.
Aside from the continue, matches exactly what I was going to do for v3
(based on Mike Christie's feedback -- which was to introduce
dm_start_request too). Anyway, looks great.
I'll get a formal v3 posted so Alasdair can stage it.
Thanks,
Mike
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v3] dm: gracefully fail any request beyond the end of the device
2012-09-24 13:07 ` Mike Snitzer
@ 2012-09-24 13:28 ` Mike Snitzer
0 siblings, 0 replies; 5+ messages in thread
From: Mike Snitzer @ 2012-09-24 13:28 UTC (permalink / raw)
To: dm-devel; +Cc: Jun'ichi Nomura, Mike Christie
The access beyond the end of device BUG_ON that was introduced to
dm_request_fn via commit 29e4013de7ad950280e4b2208 ("dm: implement
REQ_FLUSH/FUA support for request-based dm") is an overly drastic
response. Use dm_kill_unmapped_request() to fail the clone and original
request with -EIO.
map_request() will assign the valid target returned by
dm_table_find_target to tio->ti. But in the case where the target
isn't valid tio->ti is never assigned (because map_request isn't
called); so add a check for tio->ti != NULL to dm_done().
Reported-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Cc: stable@vger.kernel.org # v2.6.37+
---
drivers/md/dm.c | 51 +++++++++++++++++++++++++++++++++++----------------
1 file changed, 35 insertions(+), 16 deletions(-)
v2: added a DMERR_LIMIT message to give context for the IO errors
v3: folded in Jun'ichi's changes: dm_start_request and continue
Index: linux-2.6/drivers/md/dm.c
===================================================================
--- linux-2.6.orig/drivers/md/dm.c
+++ linux-2.6/drivers/md/dm.c
@@ -865,7 +865,10 @@ static void dm_done(struct request *clon
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ dm_request_endio_fn rq_end_io = NULL;
+
+ if (tio->ti)
+ rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
@@ -1588,15 +1591,6 @@ static int map_request(struct dm_target
int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
- /*
- * Hold the md reference here for the in-flight I/O.
- * We can't rely on the reference count by device opener,
- * because the device may be closed during the request completion
- * when all bios are completed.
- * See the comment in rq_completed() too.
- */
- dm_get(md);
-
tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
@@ -1628,6 +1622,26 @@ static int map_request(struct dm_target
return requeued;
}
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+{
+ struct request *clone;
+
+ blk_start_request(orig);
+ clone = orig->special;
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+
+ return clone;
+}
+
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
@@ -1657,14 +1671,21 @@ static void dm_request_fn(struct request
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
- BUG_ON(!dm_target_is_valid(ti));
+ if (!dm_target_is_valid(ti)) {
+ /*
+ * Must perform setup, that dm_done() requires,
+ * before calling dm_kill_unmapped_request
+ */
+ DMERR_LIMIT("request attempted access beyond the end of device");
+ clone = dm_start_request(md, rq);
+ dm_kill_unmapped_request(clone, -EIO);
+ continue;
+ }
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
- blk_start_request(rq);
- clone = rq->special;
- atomic_inc(&md->pending[rq_data_dir(clone)]);
+ clone = dm_start_request(md, rq);
spin_unlock(q->queue_lock);
if (map_request(ti, clone, md))
@@ -1684,8 +1705,6 @@ delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
-
- return;
}
int dm_underlying_device_busy(struct request_queue *q)
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2012-09-24 13:28 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-20 19:28 [PATCH] dm: gracefully fail any request beyond the end of the device Mike Snitzer
2012-09-21 15:47 ` [PATCH v2] " Mike Snitzer
2012-09-24 9:38 ` Jun'ichi Nomura
2012-09-24 13:07 ` Mike Snitzer
2012-09-24 13:28 ` [PATCH v3] " Mike Snitzer
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).