public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [patch] max-sectors-2.6.9-rc1-bk14-A0
@ 2004-09-08 10:04 Ingo Molnar
  2004-09-08 10:09 ` Andrew Morton
  2004-09-08 10:17 ` Jens Axboe
  0 siblings, 2 replies; 8+ messages in thread
From: Ingo Molnar @ 2004-09-08 10:04 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, Andrew Morton

[-- Attachment #1: Type: text/plain, Size: 715 bytes --]


this is a re-send of the max-sectors patch against 2.6.9-rc1-bk14.

the attached patch introduces two new /sys/block values:

  /sys/block/*/queue/max_hw_sectors_kb
  /sys/block/*/queue/max_sectors_kb

max_hw_sectors_kb is the maximum that the driver can handle and is
readonly. max_sectors_kb is the current max_sectors value and can be
tuned by root. PAGE_SIZE granularity is enforced.

It's all locking-safe and all affected layered drivers have been updated
as well. The patch has been in testing for a couple of weeks already as
part of the voluntary-preempt patches and it works just fine - people
use it to reduce IDE IRQ handling latencies. Please apply.

Signed-off-by: Ingo Molnar <mingo@elte.hu>

	Ingo

[-- Attachment #2: max-sectors-2.6.9-rc1-bk14-A0 --]
[-- Type: text/plain, Size: 6638 bytes --]

--- linux/include/linux/blkdev.h.orig	
+++ linux/include/linux/blkdev.h	
@@ -344,6 +344,7 @@ struct request_queue
 	unsigned int		nr_congestion_off;
 
 	unsigned short		max_sectors;
+	unsigned short		max_hw_sectors;
 	unsigned short		max_phys_segments;
 	unsigned short		max_hw_segments;
 	unsigned short		hardsect_size;
--- linux/drivers/block/ll_rw_blk.c.orig	
+++ linux/drivers/block/ll_rw_blk.c	
@@ -352,7 +352,7 @@ void blk_queue_max_sectors(request_queue
 		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
 	}
 
-	q->max_sectors = max_sectors;
+	q->max_sectors = q->max_hw_sectors = max_sectors;
 }
 
 EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -454,7 +454,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
 {
 	/* zero is "infinity" */
-	t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+	t->max_sectors = t->max_hw_sectors =
+		min_not_zero(t->max_sectors,b->max_sectors);
 
 	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
 	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@@ -2583,11 +2584,11 @@ end_io:
 			break;
 		}
 
-		if (unlikely(bio_sectors(bio) > q->max_sectors)) {
+		if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
 			printk("bio too big device %s (%u > %u)\n", 
 				bdevname(bio->bi_bdev, b),
 				bio_sectors(bio),
-				q->max_sectors);
+				q->max_hw_sectors);
 			goto end_io;
 		}
 
@@ -3206,13 +3207,61 @@ queue_ra_store(struct request_queue *q, 
 	unsigned long ra_kb;
 	ssize_t ret = queue_var_store(&ra_kb, page, count);
 
+	spin_lock_irq(q->queue_lock);
 	if (ra_kb > (q->max_sectors >> 1))
 		ra_kb = (q->max_sectors >> 1);
 
 	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+	spin_unlock_irq(q->queue_lock);
+
 	return ret;
 }
 
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+	int max_sectors_kb = q->max_sectors >> 1;
+
+	return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+	unsigned long max_sectors_kb,
+			max_hw_sectors_kb = q->max_hw_sectors >> 1,
+			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+	int ra_kb;
+
+	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+		return -EINVAL;
+	/*
+	 * Take the queue lock to update the readahead and max_sectors
+	 * values synchronously:
+	 */
+	spin_lock_irq(q->queue_lock);
+	/*
+	 * Trim readahead window as well, if necessary:
+	 */
+	ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+	if (ra_kb > max_sectors_kb)
+		q->backing_dev_info.ra_pages =
+				max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
+
+	q->max_sectors = max_sectors_kb << 1;
+	spin_unlock_irq(q->queue_lock);
+
+	return ret;
+}
+
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+
+	return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_requests_show,
@@ -3225,9 +3274,22 @@ static struct queue_sysfs_entry queue_ra
 	.store = queue_ra_store,
 };
 
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_max_sectors_show,
+	.store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+	.show = queue_max_hw_sectors_show,
+};
+
 static struct attribute *default_attrs[] = {
 	&queue_requests_entry.attr,
 	&queue_ra_entry.attr,
+	&queue_max_hw_sectors_entry.attr,
+	&queue_max_sectors_entry.attr,
 	NULL,
 };
 
--- linux/drivers/md/dm-table.c.orig	
+++ linux/drivers/md/dm-table.c	
@@ -825,7 +825,7 @@ void dm_table_set_restrictions(struct dm
 	 * Make sure we obey the optimistic sub devices
 	 * restrictions.
 	 */
-	q->max_sectors = t->limits.max_sectors;
+	blk_queue_max_sectors(q, t->limits.max_sectors);
 	q->max_phys_segments = t->limits.max_phys_segments;
 	q->max_hw_segments = t->limits.max_hw_segments;
 	q->hardsect_size = t->limits.hardsect_size;
--- linux/drivers/md/linear.c.orig	
+++ linux/drivers/md/linear.c	
@@ -157,7 +157,7 @@ static int linear_run (mddev_t *mddev)
 		 */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		disk->size = rdev->size;
 		mddev->array_size += rdev->size;
--- linux/drivers/md/multipath.c.orig	
+++ linux/drivers/md/multipath.c	
@@ -325,7 +325,7 @@ static int multipath_add_disk(mddev_t *m
 		 */
 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-				mddev->queue->max_sectors = (PAGE_SIZE>>9);
+				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 			conf->working_disks++;
 			rdev->raid_disk = path;
@@ -479,7 +479,7 @@ static int multipath_run (mddev_t *mddev
 		 * a merge_bvec_fn to be involved in multipath */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		if (!rdev->faulty) 
 			conf->working_disks++;
--- linux/drivers/md/raid0.c.orig	
+++ linux/drivers/md/raid0.c	
@@ -162,7 +162,7 @@ static int create_strip_zones (mddev_t *
 
 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		if (!smallest || (rdev1->size <smallest->size))
 			smallest = rdev1;
--- linux/drivers/md/raid1.c.orig	
+++ linux/drivers/md/raid1.c	
@@ -753,7 +753,7 @@ static int raid1_add_disk(mddev_t *mddev
 			 */
 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-				mddev->queue->max_sectors = (PAGE_SIZE>>9);
+				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 			p->head_position = 0;
 			rdev->raid_disk = mirror;
@@ -1196,7 +1196,7 @@ static int run(mddev_t *mddev)
 		 */
 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
-			mddev->queue->max_sectors = (PAGE_SIZE>>9);
+			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
 		disk->head_position = 0;
 		if (!rdev->faulty && rdev->in_sync)

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch] max-sectors-2.6.9-rc1-bk14-A0
  2004-09-08 10:04 [patch] max-sectors-2.6.9-rc1-bk14-A0 Ingo Molnar
@ 2004-09-08 10:09 ` Andrew Morton
  2004-09-08 10:49   ` Ingo Molnar
  2004-09-08 10:17 ` Jens Axboe
  1 sibling, 1 reply; 8+ messages in thread
From: Andrew Morton @ 2004-09-08 10:09 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: axboe, linux-kernel

Ingo Molnar <mingo@elte.hu> wrote:
>
> the attached patch introduces two new /sys/block values:
> 
>    /sys/block/*/queue/max_hw_sectors_kb
>    /sys/block/*/queue/max_sectors_kb
> 
>  max_hw_sectors_kb is the maximum that the driver can handle and is
>  readonly. max_sectors_kb is the current max_sectors value and can be
>  tuned by root. PAGE_SIZE granularity is enforced.
> 
>  It's all locking-safe and all affected layered drivers have been updated
>  as well. The patch has been in testing for a couple of weeks already as
>  part of the voluntary-preempt patches and it works just fine - people
>  use it to reduce IDE IRQ handling latencies.

Could you remind us what the cause of the latency is, and its duration?

(Am vaguely surprised that it's an issue at, what, 32 pages?  Is something
sucky happening?)


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch] max-sectors-2.6.9-rc1-bk14-A0
  2004-09-08 10:04 [patch] max-sectors-2.6.9-rc1-bk14-A0 Ingo Molnar
  2004-09-08 10:09 ` Andrew Morton
@ 2004-09-08 10:17 ` Jens Axboe
  2004-09-08 10:54   ` Ingo Molnar
  1 sibling, 1 reply; 8+ messages in thread
From: Jens Axboe @ 2004-09-08 10:17 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: linux-kernel, Andrew Morton

On Wed, Sep 08 2004, Ingo Molnar wrote:
> 
> this is a re-send of the max-sectors patch against 2.6.9-rc1-bk14.
> 
> the attached patch introduces two new /sys/block values:
> 
>   /sys/block/*/queue/max_hw_sectors_kb
>   /sys/block/*/queue/max_sectors_kb
> 
> max_hw_sectors_kb is the maximum that the driver can handle and is
> readonly. max_sectors_kb is the current max_sectors value and can be
> tuned by root. PAGE_SIZE granularity is enforced.
> 
> It's all locking-safe and all affected layered drivers have been updated
> as well. The patch has been in testing for a couple of weeks already as
> part of the voluntary-preempt patches and it works just fine - people
> use it to reduce IDE IRQ handling latencies. Please apply.

Wasn't the move of the ide_lock grabbing enough to solve this problem by
itself?


-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch] max-sectors-2.6.9-rc1-bk14-A0
  2004-09-08 10:09 ` Andrew Morton
@ 2004-09-08 10:49   ` Ingo Molnar
  2004-09-08 11:43     ` Andrew Morton
  0 siblings, 1 reply; 8+ messages in thread
From: Ingo Molnar @ 2004-09-08 10:49 UTC (permalink / raw)
  To: Andrew Morton; +Cc: axboe, linux-kernel


* Andrew Morton <akpm@osdl.org> wrote:

> > the attached patch introduces two new /sys/block values:
> > 
> >    /sys/block/*/queue/max_hw_sectors_kb
> >    /sys/block/*/queue/max_sectors_kb
> > 
> >  max_hw_sectors_kb is the maximum that the driver can handle and is
> >  readonly. max_sectors_kb is the current max_sectors value and can be
> >  tuned by root. PAGE_SIZE granularity is enforced.
> > 
> >  It's all locking-safe and all affected layered drivers have been updated
> >  as well. The patch has been in testing for a couple of weeks already as
> >  part of the voluntary-preempt patches and it works just fine - people
> >  use it to reduce IDE IRQ handling latencies.
> 
> Could you remind us what the cause of the latency is, and its
> duration?
>
> (Am vaguely surprised that it's an issue at, what, 32 pages?  Is
> something sucky happening?)

yes, we are touching and completing 32 (or 64?) completely cache-cold
structures: the page and the bio which are on two separate cachelines a
pop. We also call into the mempool code for every bio completed. With
the default max_sectors people reported hardirq latencies up to 1msec or
more. You can see a trace of a 600+usec latency at:

  http://krustophenia.net/testresults.php?dataset=2.6.8-rc4-bk3-O7#/var/www/2.6.8-rc4-bk3-O7/ide_irq_latency_trace.txt

here it's ~8 usecs per page completion - with 64 pages this completion
activity alone is 512 usecs. So people want to have a way to tune down
the maximum overhead in hardirq handlers. Users of the VP patches have
reported good results (== no significant performance impact) with
max_sectors at 32KB (8 pages) or even 16KB (4 pages).

	Ingo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch] max-sectors-2.6.9-rc1-bk14-A0
  2004-09-08 10:17 ` Jens Axboe
@ 2004-09-08 10:54   ` Ingo Molnar
  2004-09-08 11:05     ` Jens Axboe
  0 siblings, 1 reply; 8+ messages in thread
From: Ingo Molnar @ 2004-09-08 10:54 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, Andrew Morton


* Jens Axboe <axboe@suse.de> wrote:

> Wasn't the move of the ide_lock grabbing enough to solve this problem
> by itself?

yes and no. It does solve it for the specific case of the
voluntary-preemption patches: there hardirqs can run in separate kernel
threads which are preemptable (no HARDIRQ_OFFSET). In stock Linux
hardirqs are not preemptable so the earlier dropping of ide_lock doesnt
solve the latency.

so in the upstream kernel the only solution is to reduce the size of IO.
(I'll push the hardirq patches later on too but their acceptance should
not hinder people in achieving good latencies.) It can be useful for
other reasons too to reduce IO, so why not? The patch certainly causes
no overhead anywhere in the block layer and people are happy with it.

	Ingo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch] max-sectors-2.6.9-rc1-bk14-A0
  2004-09-08 10:54   ` Ingo Molnar
@ 2004-09-08 11:05     ` Jens Axboe
  0 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2004-09-08 11:05 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: linux-kernel, Andrew Morton

On Wed, Sep 08 2004, Ingo Molnar wrote:
> 
> * Jens Axboe <axboe@suse.de> wrote:
> 
> > Wasn't the move of the ide_lock grabbing enough to solve this problem
> > by itself?
> 
> yes and no. It does solve it for the specific case of the
> voluntary-preemption patches: there hardirqs can run in separate kernel
> threads which are preemptable (no HARDIRQ_OFFSET). In stock Linux
> hardirqs are not preemptable so the earlier dropping of ide_lock doesnt
> solve the latency.
> 
> so in the upstream kernel the only solution is to reduce the size of IO.
> (I'll push the hardirq patches later on too but their acceptance should
> not hinder people in achieving good latencies.) It can be useful for
> other reasons too to reduce IO, so why not? The patch certainly causes
> no overhead anywhere in the block layer and people are happy with it.

I'm not particularly against it, I was just curious. The splitting of
max_sectors into a max_hw_sectors is something we need to do anyways, so
I'm quite fine with the patch. You can add my signed-off-by too.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch] max-sectors-2.6.9-rc1-bk14-A0
  2004-09-08 10:49   ` Ingo Molnar
@ 2004-09-08 11:43     ` Andrew Morton
  2004-09-08 12:38       ` Ingo Molnar
  0 siblings, 1 reply; 8+ messages in thread
From: Andrew Morton @ 2004-09-08 11:43 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: axboe, linux-kernel

Ingo Molnar <mingo@elte.hu> wrote:
>
> * Andrew Morton <akpm@osdl.org> wrote:
> 
>  > > the attached patch introduces two new /sys/block values:
>  > > 
>  > >    /sys/block/*/queue/max_hw_sectors_kb
>  > >    /sys/block/*/queue/max_sectors_kb
>  > > 
>  > >  max_hw_sectors_kb is the maximum that the driver can handle and is
>  > >  readonly. max_sectors_kb is the current max_sectors value and can be
>  > >  tuned by root. PAGE_SIZE granularity is enforced.
>  > > 
>  > >  It's all locking-safe and all affected layered drivers have been updated
>  > >  as well. The patch has been in testing for a couple of weeks already as
>  > >  part of the voluntary-preempt patches and it works just fine - people
>  > >  use it to reduce IDE IRQ handling latencies.
>  > 
>  > Could you remind us what the cause of the latency is, and its
>  > duration?
>  >
>  > (Am vaguely surprised that it's an issue at, what, 32 pages?  Is
>  > something sucky happening?)
> 
>  yes, we are touching and completing 32 (or 64?) completely cache-cold
>  structures: the page and the bio which are on two separate cachelines a
>  pop. We also call into the mempool code for every bio completed. With
>  the default max_sectors people reported hardirq latencies up to 1msec or
>  more. You can see a trace of a 600+usec latency at:
> 
>    http://krustophenia.net/testresults.php?dataset=2.6.8-rc4-bk3-O7#/var/www/2.6.8-rc4-bk3-O7/ide_irq_latency_trace.txt
> 
>  here it's ~8 usecs per page completion - with 64 pages this completion
>  activity alone is 512 usecs. So people want to have a way to tune down
>  the maximum overhead in hardirq handlers. Users of the VP patches have
>  reported good results (== no significant performance impact) with
>  max_sectors at 32KB (8 pages) or even 16KB (4 pages).

Still sounds a bit odd.  How many cachelines can that CPU fetch in 8 usecs?
Several tens at least?

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [patch] max-sectors-2.6.9-rc1-bk14-A0
  2004-09-08 11:43     ` Andrew Morton
@ 2004-09-08 12:38       ` Ingo Molnar
  0 siblings, 0 replies; 8+ messages in thread
From: Ingo Molnar @ 2004-09-08 12:38 UTC (permalink / raw)
  To: Andrew Morton; +Cc: axboe, linux-kernel


* Andrew Morton <akpm@osdl.org> wrote:

> Still sounds a bit odd.  How many cachelines can that CPU fetch in 8
> usecs? Several tens at least?

the CPU in question is a 600 MHz C3, so it should be dozens. Considering
a conservative 200nsec cacheline-fetch latency and 8 nsecs per byte
bursted - so for a 32-byte cacheline it could take 264 nsecs. So with
... ~8 cachelines touched that could only explain 2-3 usec of overhead.
The bio itself is not layed out optimally: the bio and the vector are on
two different cachelines plus we have the buffer_head too (in the ext3
case) - all on different cachelines.

but the latency does happen and it happens even with tracing turned
completely off.

The main overhead is the completion path for a single page, which goes
like:

__end_that_request_first()
  bio_endio()
    end_bio_bh_io_sync()
      journal_end_buffer_io_sync()
         unlock_buffer()
           wake_up_buffer()
    bio_put()
      bio_destructor()
        mempool_free()
          mempool_free_slab()
            kmem_cache_free()
        mempool_free()
          mempool_free_slab()
            kmem_cache_free()

this is quite fat just from an instruction count POV - 14 functions with
at least 20 instructions in each function, amounting to ~300
instructions per iteration - that alone is quite an icache footprint
assumption.

Plus we could be trashing the cache due to touching at least 3 new
cachelines per iteration - which is 192 new (dirty) cachelines for the
full completion or ~6K of new L1 cache contents. With 128 byte
cachelines it's much worse: at least 24K worth of new cache contents. 
I'd suggest to at least attempt to merge bio and bio->bi_io_vec into a
single cacheline, for the simpler cases.

another detail is the SLAB's FIFO logic memmove-ing the full array:

 0.184ms (+0.000ms): kmem_cache_free (mempool_free)
 0.185ms (+0.000ms): cache_flusharray (kmem_cache_free)
 0.185ms (+0.000ms): free_block (cache_flusharray)
 0.200ms (+0.014ms): memmove (cache_flusharray)
 0.200ms (+0.000ms): memcpy (memmove)

that's 14 usecs a pop and quite likely a fair amount of new dirty cache
contents.

The building of the sg-list of the next DMA request was responsible for
some of the latency as well:

 0.571ms (+0.000ms): ide_build_dmatable (ide_start_dma)
 0.571ms (+0.000ms): ide_build_sglist (ide_build_dmatable)
 0.572ms (+0.000ms): blk_rq_map_sg (ide_build_sglist)
 0.593ms (+0.021ms): do_IRQ (common_interrupt)
 0.594ms (+0.000ms): mask_and_ack_8259A (do_IRQ)

this completion codeath isnt something people really profiled/measured
previously, because it's in an irqs-off hardirq path that triggers
relatively rarely. But for scheduling latencies it can be quite high.

	Ingo

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2004-09-08 12:39 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2004-09-08 10:04 [patch] max-sectors-2.6.9-rc1-bk14-A0 Ingo Molnar
2004-09-08 10:09 ` Andrew Morton
2004-09-08 10:49   ` Ingo Molnar
2004-09-08 11:43     ` Andrew Morton
2004-09-08 12:38       ` Ingo Molnar
2004-09-08 10:17 ` Jens Axboe
2004-09-08 10:54   ` Ingo Molnar
2004-09-08 11:05     ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox