* [RFC] cfq: adapt slice to number of processes doing I/O
@ 2009-09-03 11:07 Corrado Zoccolo
2009-09-03 13:01 ` Jeff Moyer
0 siblings, 1 reply; 12+ messages in thread
From: Corrado Zoccolo @ 2009-09-03 11:07 UTC (permalink / raw)
To: Linux-Kernel, Jens Axboe
[-- Attachment #1: Type: text/plain, Size: 1817 bytes --]
When the number of processes performing I/O concurrently increases, a
fixed time slice per process will cause large latencies.
In the patch, if there are more than 3 processes performing concurrent
I/O, we scale the time slice down proportionally.
To safeguard sequential bandwidth, we impose a minimum time slice,
computed from cfq_slice_idle (the idea is that cfq_slice_idle
approximates the cost for a seek).
I performed two tests, on a rotational disk:
* 32 concurrent processes performing random reads
** the bandwidth is improved from 466KB/s to 477KB/s
** the maximum latency is reduced from 7.667s to 1.728
* 32 concurrent processes performing sequential reads
** the bandwidth is reduced from 28093KB/s to 24393KB/s
** the maximum latency is reduced from 3.781s to 1.115s
I expect numbers to be even better on SSDs, where the penalty to
disrupt sequential read is much less.
Signed-off-by: Corrado Zoccolo <czoccolo@gmail-com>
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fd7080e..cff4ca8 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -306,7 +306,15 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct
cfq_queue *cfqq)
static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
+ unsigned low_slice = cfqd->cfq_slice_idle * (1 + cfq_cfqq_sync(cfqq));
+ unsigned interested_queues = cfq_class_rt(cfqq) ?
cfqd->busy_rt_queues : cfqd->busy_queues;
+ unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
+ if (interested_queues > 3) {
+ slice *= 3;
+ slice /= interested_queues;
+ }
+
+ cfqq->slice_end = jiffies + max(slice, low_slice);
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
}
[-- Attachment #2: test2f.fio --]
[-- Type: application/octet-stream, Size: 4743 bytes --]
[randomread.0]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.1]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.2]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.3]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.4]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.5]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.6]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.7]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.8]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.9]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.10]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.11]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.12]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.13]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.14]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.15]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.16]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.17]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.18]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.19]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.20]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.21]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.22]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.23]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.24]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.25]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.26]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.27]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.28]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.29]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.30]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.31]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[randomread.32]
rw=randread
size=2G
runtime=30
ioengine=psync
filename=randomread.2.0
[seqread.0]
stonewall
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.1]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.2]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.3]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.4]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.5]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.6]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.7]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.8]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.9]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.10]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.11]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.12]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.13]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.14]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.15]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.16]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.17]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.18]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.19]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.20]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.21]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.22]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.23]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.24]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.25]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.26]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.27]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.28]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.29]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.30]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.31]
rw=read
size=30M
runtime=30
ioengine=psync
[seqread.32]
rw=read
size=30M
runtime=30
ioengine=psync
[-- Attachment #3: cfq_orig.out --]
[-- Type: application/octet-stream, Size: 55683 bytes --]
randomread.0: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.1: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.2: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.3: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.4: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.5: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.6: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.7: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.8: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.9: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.10: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.11: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.12: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.13: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.14: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.15: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.16: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.17: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.18: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.19: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.20: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.21: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.22: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.23: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.24: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.25: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.26: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.27: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.28: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.29: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.30: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.31: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.32: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.0: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.1: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.2: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.3: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.4: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.5: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.6: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.7: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.8: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.9: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.10: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.11: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.12: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.13: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.14: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.15: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.16: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.17: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.18: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.19: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.20: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.21: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.22: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.23: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.24: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.25: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.26: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.27: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.28: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.29: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.30: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.31: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.32: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
Starting 66 processes
randomread.0: (groupid=0, jobs=1): err= 0: pid=8040
read : io=352KiB, bw=11KiB/s, iops=2, runt= 30151msec
clat (msec): min=2, max=4624, avg=342.61, stdev=1078.69
bw (KiB/s) : min= 2, max= 23, per=2.33%, avg=11.20, stdev= 6.30
cpu : usr=0.00%, sys=0.01%, ctx=102, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=88/0, short=0/0
lat (msec): 4=5.68%, 10=54.55%, 20=22.73%, 50=3.41%, 100=1.14%
lat (msec): 250=1.14%, 500=1.14%, 750=1.14%, 1000=1.14%, >=2000=7.95%
randomread.1: (groupid=0, jobs=1): err= 0: pid=8041
read : io=364KiB, bw=12KiB/s, iops=3, runt= 30258msec
clat (usec): min=4, max=4437K, avg=332490.47, stdev=1033196.11
bw (KiB/s) : min= 1, max= 93, per=3.84%, avg=18.44, stdev=28.76
cpu : usr=0.00%, sys=0.01%, ctx=112, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=91/0, short=0/0
lat (usec): 10=2.20%, 500=1.10%
lat (msec): 4=4.40%, 10=48.35%, 20=25.27%, 50=3.30%, 250=3.30%
lat (msec): 500=2.20%, 750=1.10%, 2000=1.10%, >=2000=7.69%
randomread.2: (groupid=0, jobs=1): err= 0: pid=8042
read : io=224KiB, bw=7KiB/s, iops=1, runt= 30040msec
clat (msec): min=3, max=4594, avg=536.42, stdev=1336.21
bw (KiB/s) : min= 1, max= 18, per=1.41%, avg= 6.75, stdev= 6.69
cpu : usr=0.00%, sys=0.01%, ctx=67, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=56/0, short=0/0
lat (msec): 4=5.36%, 10=48.21%, 20=28.57%, 250=1.79%, 500=1.79%
lat (msec): 2000=1.79%, >=2000=12.50%
randomread.3: (groupid=0, jobs=1): err= 0: pid=8043
read : io=416KiB, bw=14KiB/s, iops=3, runt= 30081msec
clat (msec): min=3, max=4607, avg=289.23, stdev=959.51
bw (KiB/s) : min= 1, max= 42, per=3.50%, avg=16.80, stdev=14.73
cpu : usr=0.00%, sys=0.01%, ctx=118, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=104/0, short=0/0
lat (msec): 4=2.88%, 10=64.42%, 20=19.23%, 100=2.88%, 250=0.96%
lat (msec): 750=0.96%, 1000=0.96%, 2000=0.96%, >=2000=6.73%
randomread.4: (groupid=0, jobs=1): err= 0: pid=8044
read : io=548KiB, bw=18KiB/s, iops=4, runt= 30165msec
clat (msec): min=2, max=4500, avg=220.17, stdev=862.35
bw (KiB/s) : min= 1, max= 171, per=6.85%, avg=32.89, stdev=53.41
cpu : usr=0.00%, sys=0.02%, ctx=153, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=137/0, short=0/0
lat (msec): 4=8.76%, 10=53.28%, 20=27.01%, 50=0.73%, 100=1.46%
lat (msec): 250=2.19%, 500=0.73%, 2000=0.73%, >=2000=5.11%
randomread.5: (groupid=0, jobs=1): err= 0: pid=8045
read : io=404KiB, bw=13KiB/s, iops=3, runt= 30245msec
clat (msec): min=2, max=4472, avg=299.44, stdev=933.76
bw (KiB/s) : min= 2, max= 52, per=3.58%, avg=17.17, stdev=16.72
cpu : usr=0.00%, sys=0.02%, ctx=121, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=101/0, short=0/0
lat (msec): 4=6.93%, 10=56.44%, 20=21.78%, 100=0.99%, 250=1.98%
lat (msec): 500=0.99%, 750=0.99%, 1000=0.99%, 2000=2.97%, >=2000=5.94%
randomread.6: (groupid=0, jobs=1): err= 0: pid=8046
read : io=396KiB, bw=13KiB/s, iops=3, runt= 30237msec
clat (msec): min=2, max=4412, avg=305.42, stdev=988.04
bw (KiB/s) : min= 0, max= 123, per=5.66%, avg=27.18, stdev=42.89
cpu : usr=0.00%, sys=0.01%, ctx=112, majf=0, minf=22
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=99/0, short=0/0
lat (msec): 4=8.08%, 10=49.49%, 20=26.26%, 100=1.01%, 250=3.03%
lat (msec): 500=3.03%, 750=1.01%, 2000=2.02%, >=2000=6.06%
randomread.7: (groupid=0, jobs=1): err= 0: pid=8047
read : io=428KiB, bw=14KiB/s, iops=3, runt= 30025msec
clat (usec): min=4, max=4455K, avg=280597.65, stdev=946022.47
bw (KiB/s) : min= 2, max= 46, per=3.03%, avg=14.56, stdev=14.22
cpu : usr=0.00%, sys=0.00%, ctx=119, majf=0, minf=22
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=107/0, short=0/0
lat (usec): 10=0.93%, 500=0.93%
lat (msec): 4=7.48%, 10=54.21%, 20=21.50%, 50=2.80%, 250=2.80%
lat (msec): 500=0.93%, 2000=1.87%, >=2000=6.54%
randomread.8: (groupid=0, jobs=1): err= 0: pid=8048
read : io=508KiB, bw=17KiB/s, iops=4, runt= 30001msec
clat (msec): min=2, max=4438, avg=236.22, stdev=870.00
bw (KiB/s) : min= 3, max= 60, per=4.15%, avg=19.90, stdev=18.16
cpu : usr=0.00%, sys=0.03%, ctx=153, majf=0, minf=23
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=127/0, short=0/0
lat (msec): 4=4.72%, 10=55.91%, 20=27.56%, 50=0.79%, 250=1.57%
lat (msec): 500=1.57%, 1000=1.57%, 2000=1.57%, >=2000=4.72%
randomread.9: (groupid=0, jobs=1): err= 0: pid=8049
read : io=476KiB, bw=16KiB/s, iops=3, runt= 30071msec
clat (usec): min=7, max=4392K, avg=252683.54, stdev=921640.40
bw (KiB/s) : min= 1, max= 26, per=3.08%, avg=14.78, stdev= 8.86
cpu : usr=0.00%, sys=0.02%, ctx=136, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=119/0, short=0/0
lat (usec): 10=0.84%
lat (msec): 4=4.20%, 10=57.98%, 20=23.53%, 50=4.20%, 500=1.68%
lat (msec): 750=1.68%, >=2000=5.88%
randomread.10: (groupid=0, jobs=1): err= 0: pid=8050
read : io=448KiB, bw=15KiB/s, iops=3, runt= 30170msec
clat (usec): min=496, max=4487K, avg=269362.92, stdev=943548.08
bw (KiB/s) : min= 1, max= 40, per=2.94%, avg=14.11, stdev=12.47
cpu : usr=0.00%, sys=0.00%, ctx=122, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=112/0, short=0/0
lat (usec): 500=0.89%
lat (msec): 2=0.89%, 4=8.04%, 10=60.71%, 20=16.07%, 50=0.89%
lat (msec): 100=1.79%, 250=0.89%, 500=1.79%, 750=0.89%, 2000=0.89%
lat (msec): >=2000=6.25%
randomread.11: (groupid=0, jobs=1): err= 0: pid=8051
read : io=436KiB, bw=14KiB/s, iops=3, runt= 30181msec
clat (usec): min=6, max=4377K, avg=276876.99, stdev=893374.46
bw (KiB/s) : min= 0, max= 43, per=3.42%, avg=16.40, stdev=14.73
cpu : usr=0.00%, sys=0.00%, ctx=120, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=109/0, short=0/0
lat (usec): 10=0.92%
lat (msec): 4=4.59%, 10=61.47%, 20=18.35%, 100=0.92%, 250=3.67%
lat (msec): 500=0.92%, 2000=2.75%, >=2000=6.42%
randomread.12: (groupid=0, jobs=1): err= 0: pid=8052
read : io=528KiB, bw=17KiB/s, iops=4, runt= 30098msec
clat (msec): min=2, max=5756, avg=228.00, stdev=908.72
bw (KiB/s) : min= 2, max= 46, per=4.00%, avg=19.22, stdev=13.29
cpu : usr=0.00%, sys=0.01%, ctx=155, majf=0, minf=22
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=132/0, short=0/0
lat (msec): 4=6.06%, 10=52.27%, 20=31.06%, 50=0.76%, 100=1.52%
lat (msec): 250=1.52%, 500=0.76%, 2000=1.52%, >=2000=4.55%
randomread.13: (groupid=0, jobs=1): err= 0: pid=8053
read : io=616KiB, bw=20KiB/s, iops=5, runt= 30126msec
clat (msec): min=2, max=4438, avg=195.62, stdev=797.07
bw (KiB/s) : min= 2, max= 112, per=6.89%, avg=33.09, stdev=34.92
cpu : usr=0.00%, sys=0.02%, ctx=179, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=154/0, short=0/0
lat (msec): 4=4.55%, 10=59.74%, 20=24.68%, 50=1.95%, 100=0.65%
lat (msec): 250=1.30%, 500=1.30%, 750=0.65%, 1000=0.65%, 2000=0.65%
lat (msec): >=2000=3.90%
randomread.14: (groupid=0, jobs=1): err= 0: pid=8054
read : io=496KiB, bw=16KiB/s, iops=4, runt= 30224msec
clat (msec): min=2, max=4362, avg=243.73, stdev=890.41
bw (KiB/s) : min= 2, max= 195, per=6.40%, avg=30.73, stdev=55.84
cpu : usr=0.00%, sys=0.02%, ctx=149, majf=0, minf=21
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=124/0, short=0/0
lat (msec): 4=6.45%, 10=46.77%, 20=33.87%, 100=1.61%, 250=2.42%
lat (msec): 500=0.81%, 750=1.61%, 2000=1.61%, >=2000=4.84%
randomread.15: (groupid=0, jobs=1): err= 0: pid=8055
read : io=424KiB, bw=14KiB/s, iops=3, runt= 30001msec
clat (usec): min=496, max=4638K, avg=283016.50, stdev=981215.27
bw (KiB/s) : min= 3, max= 78, per=3.91%, avg=18.78, stdev=23.44
cpu : usr=0.00%, sys=0.01%, ctx=121, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=106/0, short=0/0
lat (usec): 500=0.94%
lat (msec): 4=4.72%, 10=60.38%, 20=20.75%, 100=0.94%, 250=2.83%
lat (msec): 500=0.94%, 750=1.89%, >=2000=6.60%
randomread.16: (groupid=0, jobs=1): err= 0: pid=8056
read : io=492KiB, bw=16KiB/s, iops=4, runt= 30033msec
clat (usec): min=4, max=4597K, avg=244164.94, stdev=912676.04
bw (KiB/s) : min= 6, max= 25, per=2.99%, avg=14.33, stdev= 6.36
cpu : usr=0.00%, sys=0.00%, ctx=144, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=123/0, short=0/0
lat (usec): 10=1.63%
lat (msec): 4=5.69%, 10=52.03%, 20=29.27%, 50=0.81%, 100=1.63%
lat (msec): 250=0.81%, 500=0.81%, 750=1.63%, >=2000=5.69%
randomread.17: (groupid=0, jobs=1): err= 0: pid=8057
read : io=368KiB, bw=12KiB/s, iops=3, runt= 30130msec
clat (msec): min=2, max=5864, avg=327.49, stdev=1047.29
bw (KiB/s) : min= 1, max= 32, per=2.77%, avg=13.30, stdev=11.22
cpu : usr=0.00%, sys=0.01%, ctx=107, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=92/0, short=0/0
lat (msec): 4=3.26%, 10=54.35%, 20=27.17%, 50=3.26%, 250=1.09%
lat (msec): 750=1.09%, 2000=2.17%, >=2000=7.61%
randomread.18: (groupid=0, jobs=1): err= 0: pid=8058
read : io=480KiB, bw=16KiB/s, iops=3, runt= 30203msec
clat (msec): min=1, max=4623, avg=251.68, stdev=904.66
bw (KiB/s) : min= 5, max= 89, per=4.60%, avg=22.10, stdev=25.77
cpu : usr=0.00%, sys=0.01%, ctx=137, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=120/0, short=0/0
lat (msec): 2=0.83%, 4=9.17%, 10=50.83%, 20=26.67%, 50=1.67%
lat (msec): 250=2.50%, 750=1.67%, 2000=0.83%, >=2000=5.83%
randomread.19: (groupid=0, jobs=1): err= 0: pid=8059
read : io=368KiB, bw=12KiB/s, iops=3, runt= 30137msec
clat (usec): min=6, max=4399K, avg=327562.71, stdev=990078.19
bw (KiB/s) : min= 0, max= 73, per=4.03%, avg=19.36, stdev=22.47
cpu : usr=0.00%, sys=0.01%, ctx=98, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=92/0, short=0/0
lat (usec): 10=1.09%
lat (msec): 4=6.52%, 10=56.52%, 20=18.48%, 50=2.17%, 100=1.09%
lat (msec): 500=3.26%, 2000=4.35%, >=2000=6.52%
randomread.20: (groupid=0, jobs=1): err= 0: pid=8060
read : io=392KiB, bw=13KiB/s, iops=3, runt= 30167msec
clat (usec): min=6, max=4616K, avg=307820.78, stdev=1009164.83
bw (KiB/s) : min= 6, max= 26, per=2.57%, avg=12.33, stdev= 6.46
cpu : usr=0.00%, sys=0.00%, ctx=113, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=98/0, short=0/0
lat (usec): 10=1.02%
lat (msec): 4=5.10%, 10=56.12%, 20=25.51%, 100=1.02%, 250=2.04%
lat (msec): 750=1.02%, 2000=1.02%, >=2000=7.14%
randomread.21: (groupid=0, jobs=1): err= 0: pid=8061
read : io=356KiB, bw=12KiB/s, iops=2, runt= 30030msec
clat (usec): min=5, max=4480K, avg=337406.30, stdev=1036868.88
bw (KiB/s) : min= 4, max= 47, per=3.33%, avg=16.00, stdev=15.58
cpu : usr=0.00%, sys=0.00%, ctx=109, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=89/0, short=0/0
lat (usec): 10=1.12%, 500=1.12%
lat (msec): 4=10.11%, 10=39.33%, 20=33.71%, 50=1.12%, 250=2.25%
lat (msec): 1000=2.25%, 2000=1.12%, >=2000=7.87%
randomread.22: (groupid=0, jobs=1): err= 0: pid=8062
read : io=512KiB, bw=17KiB/s, iops=4, runt= 30023msec
clat (msec): min=2, max=4322, avg=234.54, stdev=846.14
bw (KiB/s) : min= 0, max= 52, per=3.92%, avg=18.80, stdev=14.87
cpu : usr=0.00%, sys=0.01%, ctx=153, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=128/0, short=0/0
lat (msec): 4=3.12%, 10=53.91%, 20=29.69%, 50=3.12%, 250=0.78%
lat (msec): 500=1.56%, 750=0.78%, 2000=2.34%, >=2000=4.69%
randomread.23: (groupid=0, jobs=1): err= 0: pid=8063
read : io=260KiB, bw=8KiB/s, iops=2, runt= 30039msec
clat (msec): min=2, max=5443, avg=462.13, stdev=1269.03
bw (KiB/s) : min= 0, max= 98, per=3.52%, avg=16.89, stdev=31.51
cpu : usr=0.00%, sys=0.00%, ctx=82, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=65/0, short=0/0
lat (msec): 4=4.62%, 10=49.23%, 20=27.69%, 250=3.08%, 500=3.08%
lat (msec): 1000=1.54%, >=2000=10.77%
randomread.24: (groupid=0, jobs=1): err= 0: pid=8064
read : io=388KiB, bw=13KiB/s, iops=3, runt= 30106msec
clat (msec): min=2, max=7667, avg=310.36, stdev=1145.67
bw (KiB/s) : min= 4, max= 103, per=6.44%, avg=30.90, stdev=39.18
cpu : usr=0.00%, sys=0.01%, ctx=115, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=97/0, short=0/0
lat (msec): 4=8.25%, 10=57.73%, 20=23.71%, 750=4.12%, >=2000=6.19%
randomread.25: (groupid=0, jobs=1): err= 0: pid=8065
read : io=444KiB, bw=15KiB/s, iops=3, runt= 30182msec
clat (msec): min=2, max=4393, avg=271.90, stdev=940.56
bw (KiB/s) : min= 2, max= 67, per=3.33%, avg=16.00, stdev=20.95
cpu : usr=0.01%, sys=0.01%, ctx=124, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=111/0, short=0/0
lat (msec): 4=1.80%, 10=59.46%, 20=25.23%, 50=0.90%, 100=2.70%
lat (msec): 250=1.80%, 1000=0.90%, 2000=0.90%, >=2000=6.31%
randomread.26: (groupid=0, jobs=1): err= 0: pid=8066
read : io=552KiB, bw=18KiB/s, iops=4, runt= 30081msec
clat (msec): min=1, max=4477, avg=217.97, stdev=853.62
bw (KiB/s) : min= 3, max= 54, per=4.17%, avg=20.00, stdev=15.16
cpu : usr=0.00%, sys=0.01%, ctx=167, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=138/0, short=0/0
lat (msec): 2=0.72%, 4=5.80%, 10=56.52%, 20=25.36%, 50=2.17%
lat (msec): 250=2.90%, 750=0.72%, 1000=0.72%, >=2000=5.07%
randomread.27: (groupid=0, jobs=1): err= 0: pid=8067
read : io=336KiB, bw=11KiB/s, iops=2, runt= 30224msec
clat (msec): min=3, max=4472, avg=359.80, stdev=1068.35
bw (KiB/s) : min= 4, max= 57, per=3.67%, avg=17.60, stdev=19.23
cpu : usr=0.00%, sys=0.00%, ctx=104, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=84/0, short=0/0
lat (msec): 4=4.76%, 10=42.86%, 20=29.76%, 50=4.76%, 100=2.38%
lat (msec): 250=1.19%, 500=3.57%, 750=1.19%, 2000=2.38%, >=2000=7.14%
randomread.28: (groupid=0, jobs=1): err= 0: pid=8068
read : io=356KiB, bw=12KiB/s, iops=2, runt= 30007msec
clat (usec): min=371, max=4814K, avg=337144.35, stdev=1080878.50
bw (KiB/s) : min= 3, max= 25, per=2.22%, avg=10.67, stdev= 6.87
cpu : usr=0.00%, sys=0.00%, ctx=103, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=89/0, short=0/0
lat (usec): 500=1.12%
lat (msec): 4=5.62%, 10=60.67%, 20=21.35%, 100=1.12%, 750=1.12%
lat (msec): 2000=1.12%, >=2000=7.87%
randomread.29: (groupid=0, jobs=1): err= 0: pid=8069
read : io=484KiB, bw=16KiB/s, iops=4, runt= 30111msec
clat (usec): min=4, max=4498K, avg=248840.70, stdev=903363.44
bw (KiB/s) : min= 0, max= 40, per=3.38%, avg=16.22, stdev=14.18
cpu : usr=0.00%, sys=0.02%, ctx=132, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=121/0, short=0/0
lat (usec): 10=1.65%
lat (msec): 4=8.26%, 10=60.33%, 20=19.01%, 50=1.65%, 250=1.65%
lat (msec): 2000=1.65%, >=2000=5.79%
randomread.30: (groupid=0, jobs=1): err= 0: pid=8070
read : io=368KiB, bw=12KiB/s, iops=3, runt= 30032msec
clat (usec): min=4, max=4522K, avg=326426.00, stdev=1015764.21
bw (KiB/s) : min= 0, max= 137, per=4.54%, avg=21.80, stdev=41.18
cpu : usr=0.00%, sys=0.01%, ctx=106, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=92/0, short=0/0
lat (usec): 10=1.09%
lat (msec): 4=6.52%, 10=51.09%, 20=23.91%, 50=2.17%, 250=2.17%
lat (msec): 500=3.26%, 1000=1.09%, 2000=2.17%, >=2000=6.52%
randomread.31: (groupid=0, jobs=1): err= 0: pid=8071
read : io=472KiB, bw=16KiB/s, iops=3, runt= 30006msec
clat (usec): min=493, max=4469K, avg=254275.11, stdev=952496.97
bw (KiB/s) : min= 0, max= 26, per=2.63%, avg=12.62, stdev= 9.80
cpu : usr=0.00%, sys=0.00%, ctx=130, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=118/0, short=0/0
lat (usec): 500=0.85%
lat (msec): 4=6.78%, 10=64.41%, 20=17.80%, 100=0.85%, 250=0.85%
lat (msec): 500=2.54%, >=2000=5.93%
randomread.32: (groupid=0, jobs=1): err= 0: pid=8072
read : io=500KiB, bw=16KiB/s, iops=4, runt= 30143msec
clat (msec): min=2, max=4476, avg=241.14, stdev=854.29
bw (KiB/s) : min= 1, max= 66, per=4.69%, avg=22.50, stdev=20.35
cpu : usr=0.00%, sys=0.01%, ctx=139, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=125/0, short=0/0
lat (msec): 4=6.40%, 10=60.00%, 20=22.40%, 100=1.60%, 750=1.60%
lat (msec): 1000=0.80%, 2000=2.40%, >=2000=4.80%
seqread.0: (groupid=1, jobs=1): err= 0: pid=8124
read : io=27636KiB, bw=930KiB/s, iops=227, runt= 30428msec
clat (usec): min=2, max=3724K, avg=4402.32, stdev=121733.95
bw (KiB/s) : min= 696, max= 3149, per=3.97%, avg=1104.78, stdev=769.56
cpu : usr=0.02%, sys=0.13%, ctx=290, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6909/0, short=0/0
lat (usec): 4=76.28%, 10=19.03%, 20=0.43%, 50=0.01%, 100=0.04%
lat (usec): 250=0.01%, 500=0.07%, 750=0.36%, 1000=0.25%
lat (msec): 2=0.26%, 4=2.53%, 10=0.48%, 20=0.10%, 1000=0.01%
lat (msec): >=2000=0.12%
seqread.1: (groupid=1, jobs=1): err= 0: pid=8125
read : io=24276KiB, bw=815KiB/s, iops=199, runt= 30465msec
clat (usec): min=2, max=3781K, avg=5017.98, stdev=130175.15
bw (KiB/s) : min= 103, max= 3100, per=3.57%, avg=992.22, stdev=826.53
cpu : usr=0.02%, sys=0.09%, ctx=268, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6069/0, short=0/0
lat (usec): 4=91.20%, 10=2.62%, 20=1.91%, 50=0.20%, 100=0.05%
lat (usec): 500=0.05%, 750=0.21%, 1000=0.21%
lat (msec): 2=0.40%, 4=2.32%, 10=0.58%, 20=0.08%, 250=0.02%
lat (msec): 1000=0.02%, >=2000=0.13%
seqread.2: (groupid=1, jobs=1): err= 0: pid=8126
read : io=21476KiB, bw=723KiB/s, iops=176, runt= 30383msec
clat (usec): min=2, max=3731K, avg=5657.28, stdev=138604.25
bw (KiB/s) : min= 6, max= 2950, per=3.15%, avg=875.56, stdev=855.27
cpu : usr=0.02%, sys=0.09%, ctx=222, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5369/0, short=0/0
lat (usec): 4=80.91%, 10=14.73%, 20=0.13%, 50=0.02%, 100=0.11%
lat (usec): 750=0.30%, 1000=0.32%
lat (msec): 2=0.32%, 4=2.42%, 10=0.50%, 20=0.06%, 50=0.02%
lat (msec): 2000=0.02%, >=2000=0.15%
seqread.3: (groupid=1, jobs=1): err= 0: pid=8127
read : io=25332KiB, bw=851KiB/s, iops=207, runt= 30478msec
clat (usec): min=2, max=3722K, avg=4810.81, stdev=127316.46
bw (KiB/s) : min= 116, max= 3651, per=3.89%, avg=1081.33, stdev=995.78
cpu : usr=0.03%, sys=0.10%, ctx=289, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6333/0, short=0/0
lat (usec): 4=92.15%, 10=1.61%, 20=1.82%, 50=0.25%, 100=0.03%
lat (usec): 500=0.02%, 750=0.38%, 1000=0.30%
lat (msec): 2=0.27%, 4=2.56%, 10=0.39%, 20=0.06%, 500=0.02%
lat (msec): 1000=0.02%, >=2000=0.13%
seqread.4: (groupid=1, jobs=1): err= 0: pid=8128
read : io=25572KiB, bw=858KiB/s, iops=209, runt= 30512msec
clat (usec): min=2, max=3730K, avg=4771.08, stdev=126712.41
bw (KiB/s) : min= 80, max= 6266, per=5.61%, avg=1559.80, stdev=1998.53
cpu : usr=0.03%, sys=0.07%, ctx=278, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6393/0, short=0/0
lat (usec): 4=90.88%, 10=2.71%, 20=2.10%, 50=0.06%, 100=0.02%
lat (usec): 250=0.03%, 500=0.02%, 750=0.48%, 1000=0.31%
lat (msec): 2=0.23%, 4=2.50%, 10=0.47%, 20=0.03%, 500=0.02%
lat (msec): 750=0.02%, >=2000=0.13%
seqread.5: (groupid=1, jobs=1): err= 0: pid=8129
read : io=24628KiB, bw=826KiB/s, iops=201, runt= 30529msec
clat (usec): min=2, max=3734K, avg=4956.78, stdev=129150.47
bw (KiB/s) : min= 8, max= 4403, per=3.76%, avg=1045.70, stdev=1230.51
cpu : usr=0.02%, sys=0.09%, ctx=263, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6157/0, short=0/0
lat (usec): 4=89.44%, 10=4.27%, 20=2.08%, 50=0.10%, 100=0.02%
lat (usec): 250=0.02%, 500=0.02%, 750=0.21%, 1000=0.29%
lat (msec): 2=0.31%, 4=2.50%, 10=0.52%, 20=0.06%, 750=0.03%
lat (msec): >=2000=0.13%
seqread.6: (groupid=1, jobs=1): err= 0: pid=8130
read : io=29428KiB, bw=1004KiB/s, iops=245, runt= 30004msec
clat (usec): min=2, max=3745K, avg=4076.53, stdev=117766.25
bw (KiB/s) : min= 6, max= 1138, per=2.96%, avg=824.11, stdev=321.61
cpu : usr=0.03%, sys=0.10%, ctx=297, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=7357/0, short=0/0
lat (usec): 4=77.67%, 10=18.06%, 20=0.15%, 50=0.01%, 100=0.07%
lat (usec): 250=0.01%, 500=0.01%, 750=0.48%, 1000=0.29%
lat (msec): 2=0.26%, 4=2.46%, 10=0.37%, 20=0.04%, 750=0.01%
lat (msec): >=2000=0.11%
seqread.7: (groupid=1, jobs=1): err= 0: pid=8131
read : io=25588KiB, bw=873KiB/s, iops=213, runt= 30012msec
clat (usec): min=2, max=3738K, avg=4690.00, stdev=126335.21
bw (KiB/s) : min= 5, max= 1134, per=2.88%, avg=801.00, stdev=313.44
cpu : usr=0.04%, sys=0.08%, ctx=281, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6397/0, short=0/0
lat (usec): 4=84.40%, 10=9.36%, 20=1.94%, 50=0.19%, 100=0.05%
lat (usec): 250=0.02%, 500=0.02%, 750=0.39%, 1000=0.25%
lat (msec): 2=0.28%, 4=2.49%, 10=0.45%, 20=0.03%, 750=0.02%
lat (msec): >=2000=0.13%
seqread.8: (groupid=1, jobs=1): err= 0: pid=8132
read : io=24820KiB, bw=845KiB/s, iops=206, runt= 30046msec
clat (usec): min=2, max=3738K, avg=4840.40, stdev=127696.36
bw (KiB/s) : min= 4, max= 1045, per=2.81%, avg=780.67, stdev=303.28
cpu : usr=0.04%, sys=0.10%, ctx=241, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6205/0, short=0/0
lat (usec): 4=74.41%, 10=21.48%, 20=0.21%, 50=0.03%, 500=0.02%
lat (usec): 750=0.16%, 1000=0.27%
lat (msec): 2=0.39%, 4=2.37%, 10=0.45%, 20=0.06%, 1000=0.02%
lat (msec): >=2000=0.13%
seqread.9: (groupid=1, jobs=1): err= 0: pid=8133
read : io=25572KiB, bw=871KiB/s, iops=212, runt= 30030msec
clat (usec): min=2, max=3738K, avg=4695.69, stdev=126059.43
bw (KiB/s) : min= 4, max= 1051, per=2.88%, avg=800.89, stdev=306.72
cpu : usr=0.03%, sys=0.08%, ctx=265, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6393/0, short=0/0
lat (usec): 4=91.76%, 10=2.17%, 20=1.99%, 50=0.13%, 100=0.02%
lat (usec): 250=0.03%, 500=0.02%, 750=0.22%, 1000=0.27%
lat (msec): 2=0.33%, 4=2.47%, 10=0.42%, 20=0.05%, 1000=0.02%
lat (msec): >=2000=0.13%
seqread.10: (groupid=1, jobs=1): err= 0: pid=8134
read : io=24532KiB, bw=835KiB/s, iops=204, runt= 30058msec
clat (usec): min=2, max=3740K, avg=4899.41, stdev=128139.79
bw (KiB/s) : min= 3, max= 1133, per=2.79%, avg=776.89, stdev=310.91
cpu : usr=0.03%, sys=0.07%, ctx=250, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6133/0, short=0/0
lat (usec): 4=81.49%, 10=14.28%, 20=0.15%, 100=0.02%, 500=0.02%
lat (usec): 750=0.16%, 1000=0.31%
lat (msec): 2=0.31%, 4=2.59%, 10=0.52%, 2000=0.02%, >=2000=0.13%
seqread.11: (groupid=1, jobs=1): err= 0: pid=8135
read : io=25180KiB, bw=857KiB/s, iops=209, runt= 30065msec
clat (usec): min=2, max=3747K, avg=4774.35, stdev=126204.69
bw (KiB/s) : min= 3, max= 1139, per=2.87%, avg=799.11, stdev=312.62
cpu : usr=0.02%, sys=0.12%, ctx=266, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6295/0, short=0/0
lat (usec): 4=90.77%, 10=3.15%, 20=1.89%, 50=0.13%, 100=0.06%
lat (usec): 500=0.02%, 750=0.40%, 1000=0.24%
lat (msec): 2=0.29%, 4=2.49%, 10=0.33%, 20=0.10%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.12: (groupid=1, jobs=1): err= 0: pid=8136
read : io=25436KiB, bw=865KiB/s, iops=211, runt= 30077msec
clat (usec): min=2, max=3748K, avg=4725.12, stdev=125339.80
bw (KiB/s) : min= 3, max= 1133, per=2.92%, avg=812.00, stdev=334.79
cpu : usr=0.02%, sys=0.09%, ctx=266, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6359/0, short=0/0
lat (usec): 4=80.39%, 10=15.18%, 20=0.20%, 50=0.05%, 100=0.05%
lat (usec): 500=0.02%, 750=0.49%, 1000=0.22%
lat (msec): 2=0.27%, 4=2.59%, 10=0.39%, 20=0.02%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.13: (groupid=1, jobs=1): err= 0: pid=8137
read : io=25844KiB, bw=879KiB/s, iops=214, runt= 30086msec
clat (usec): min=2, max=3747K, avg=4654.78, stdev=124208.84
bw (KiB/s) : min= 2, max= 1053, per=2.96%, avg=824.44, stdev=317.31
cpu : usr=0.01%, sys=0.12%, ctx=263, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6461/0, short=0/0
lat (usec): 4=78.44%, 10=16.48%, 20=0.94%, 50=0.03%, 100=0.05%
lat (usec): 500=0.02%, 750=0.46%, 1000=0.26%
lat (msec): 2=0.25%, 4=2.52%, 10=0.36%, 20=0.05%, 2000=0.02%
lat (msec): >=2000=0.12%
seqread.14: (groupid=1, jobs=1): err= 0: pid=8138
read : io=23508KiB, bw=799KiB/s, iops=195, runt= 30101msec
clat (usec): min=2, max=3741K, avg=5120.08, stdev=129989.70
bw (KiB/s) : min= 2, max= 1046, per=2.72%, avg=756.56, stdev=318.05
cpu : usr=0.02%, sys=0.08%, ctx=267, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5877/0, short=0/0
lat (usec): 4=89.86%, 10=3.91%, 20=1.74%, 50=0.22%, 100=0.02%
lat (usec): 250=0.09%, 500=0.03%, 750=0.31%, 1000=0.17%
lat (msec): 2=0.36%, 4=2.59%, 10=0.48%, 20=0.09%, 2000=0.02%
lat (msec): >=2000=0.14%
seqread.15: (groupid=1, jobs=1): err= 0: pid=8139
read : io=25300KiB, bw=860KiB/s, iops=210, runt= 30104msec
clat (usec): min=2, max=3743K, avg=4757.80, stdev=125164.02
bw (KiB/s) : min= 2, max= 1136, per=2.94%, avg=818.89, stdev=330.19
cpu : usr=0.01%, sys=0.09%, ctx=272, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6325/0, short=0/0
lat (usec): 4=88.95%, 10=5.26%, 20=1.50%, 50=0.17%, 100=0.05%
lat (usec): 250=0.02%, 500=0.02%, 750=0.44%, 1000=0.19%
lat (msec): 2=0.33%, 4=2.43%, 10=0.46%, 20=0.03%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.16: (groupid=1, jobs=1): err= 0: pid=8140
read : io=24988KiB, bw=849KiB/s, iops=207, runt= 30114msec
clat (usec): min=2, max=3745K, avg=4818.90, stdev=125777.28
bw (KiB/s) : min= 2, max= 1134, per=2.91%, avg=808.11, stdev=315.64
cpu : usr=0.02%, sys=0.08%, ctx=264, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6247/0, short=0/0
lat (usec): 4=81.13%, 10=14.39%, 20=0.21%, 50=0.02%, 100=0.05%
lat (usec): 500=0.03%, 750=0.42%, 1000=0.29%
lat (msec): 2=0.24%, 4=2.55%, 10=0.53%, 20=0.02%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.17: (groupid=1, jobs=1): err= 0: pid=8141
read : io=28724KiB, bw=976KiB/s, iops=238, runt= 30127msec
clat (usec): min=2, max=3673K, avg=4193.66, stdev=116828.57
bw (KiB/s) : min= 2, max= 1772, per=3.33%, avg=926.00, stdev=457.38
cpu : usr=0.03%, sys=0.10%, ctx=349, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=7181/0, short=0/0
lat (usec): 4=91.30%, 10=2.66%, 20=1.62%, 50=0.33%, 100=0.01%
lat (usec): 250=0.01%, 500=0.01%, 750=0.46%, 1000=0.26%
lat (msec): 2=0.19%, 4=2.59%, 10=0.36%, 20=0.06%, 2000=0.01%
lat (msec): >=2000=0.11%
seqread.18: (groupid=1, jobs=1): err= 0: pid=8142
read : io=25076KiB, bw=852KiB/s, iops=208, runt= 30137msec
clat (usec): min=2, max=3738K, avg=4805.59, stdev=125461.07
bw (KiB/s) : min= 2, max= 1310, per=2.97%, avg=826.78, stdev=348.77
cpu : usr=0.02%, sys=0.10%, ctx=256, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6269/0, short=0/0
lat (usec): 4=82.82%, 10=12.92%, 20=0.14%, 50=0.03%, 100=0.02%
lat (usec): 500=0.02%, 750=0.27%, 1000=0.29%
lat (msec): 2=0.33%, 4=2.54%, 10=0.45%, 20=0.03%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.19: (groupid=1, jobs=1): err= 0: pid=8143
read : io=25692KiB, bw=872KiB/s, iops=212, runt= 30162msec
clat (usec): min=2, max=3744K, avg=4694.30, stdev=123999.11
bw (KiB/s) : min= 1, max= 1387, per=3.07%, avg=853.22, stdev=370.12
cpu : usr=0.02%, sys=0.11%, ctx=272, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6423/0, short=0/0
lat (usec): 4=90.89%, 10=2.93%, 20=2.02%, 50=0.08%, 250=0.03%
lat (usec): 500=0.02%, 750=0.33%, 1000=0.20%
lat (msec): 2=0.34%, 4=2.62%, 10=0.39%, 20=0.02%, >=2000=0.14%
seqread.20: (groupid=1, jobs=1): err= 0: pid=8144
read : io=26068KiB, bw=884KiB/s, iops=215, runt= 30174msec
clat (usec): min=2, max=3730K, avg=4628.40, stdev=123044.43
bw (KiB/s) : min= 1, max= 1395, per=3.13%, avg=869.33, stdev=373.20
cpu : usr=0.02%, sys=0.13%, ctx=273, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6517/0, short=0/0
lat (usec): 4=91.58%, 10=2.03%, 20=2.18%, 50=0.09%, 100=0.03%
lat (usec): 500=0.02%, 750=0.31%, 1000=0.31%
lat (msec): 2=0.23%, 4=2.70%, 10=0.40%, >=2000=0.14%
seqread.21: (groupid=1, jobs=1): err= 0: pid=8145
read : io=25556KiB, bw=867KiB/s, iops=211, runt= 30179msec
clat (usec): min=2, max=3729K, avg=4721.89, stdev=124277.08
bw (KiB/s) : min= 1, max= 1410, per=3.09%, avg=858.78, stdev=374.11
cpu : usr=0.02%, sys=0.09%, ctx=269, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6389/0, short=0/0
lat (usec): 4=92.42%, 10=1.41%, 20=1.97%, 50=0.03%, 100=0.02%
lat (usec): 500=0.02%, 750=0.36%, 1000=0.22%
lat (msec): 2=0.30%, 4=2.66%, 10=0.45%, >=2000=0.14%
seqread.22: (groupid=1, jobs=1): err= 0: pid=8146
read : io=24628KiB, bw=835KiB/s, iops=203, runt= 30188msec
clat (usec): min=2, max=3723K, avg=4901.26, stdev=126689.03
bw (KiB/s) : min= 1, max= 1340, per=2.98%, avg=827.89, stdev=350.42
cpu : usr=0.02%, sys=0.11%, ctx=249, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6157/0, short=0/0
lat (usec): 4=82.56%, 10=12.38%, 20=0.93%, 50=0.03%, 100=0.06%
lat (usec): 250=0.02%, 500=0.03%, 750=0.34%, 1000=0.34%
lat (msec): 2=0.23%, 4=2.34%, 10=0.54%, 20=0.06%, >=2000=0.15%
seqread.23: (groupid=1, jobs=1): err= 0: pid=8147
read : io=25076KiB, bw=850KiB/s, iops=207, runt= 30202msec
clat (usec): min=2, max=3716K, avg=4815.90, stdev=125625.50
bw (KiB/s) : min= 1, max= 1490, per=3.07%, avg=855.33, stdev=385.87
cpu : usr=0.01%, sys=0.10%, ctx=247, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6269/0, short=0/0
lat (usec): 4=82.17%, 10=13.73%, 20=0.11%, 50=0.02%, 100=0.05%
lat (usec): 500=0.02%, 750=0.21%, 1000=0.37%
lat (msec): 2=0.29%, 4=2.38%, 10=0.46%, 20=0.06%, >=2000=0.14%
seqread.24: (groupid=1, jobs=1): err= 0: pid=8148
read : io=25636KiB, bw=868KiB/s, iops=212, runt= 30211msec
clat (usec): min=2, max=3717K, avg=4712.29, stdev=124348.97
bw (KiB/s) : min= 1, max= 1677, per=3.19%, avg=888.00, stdev=432.19
cpu : usr=0.02%, sys=0.10%, ctx=266, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6409/0, short=0/0
lat (usec): 4=90.75%, 10=3.12%, 20=2.03%, 50=0.03%, 100=0.02%
lat (usec): 750=0.27%, 1000=0.30%
lat (msec): 2=0.36%, 4=2.53%, 10=0.44%, 20=0.03%, 2000=0.02%
lat (msec): >=2000=0.12%
seqread.25: (groupid=1, jobs=1): err= 0: pid=8149
read : io=25140KiB, bw=851KiB/s, iops=207, runt= 30226msec
clat (usec): min=2, max=3720K, avg=4807.54, stdev=125717.33
bw (KiB/s) : min= 1, max= 1656, per=3.15%, avg=875.00, stdev=421.23
cpu : usr=0.03%, sys=0.09%, ctx=260, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6285/0, short=0/0
lat (usec): 4=82.34%, 10=13.40%, 20=0.18%, 250=0.03%, 750=0.25%
lat (usec): 1000=0.32%
lat (msec): 2=0.32%, 4=2.48%, 10=0.53%, 20=0.02%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.26: (groupid=1, jobs=1): err= 0: pid=8150
read : io=24596KiB, bw=832KiB/s, iops=203, runt= 30239msec
clat (usec): min=2, max=3721K, avg=4916.10, stdev=127290.25
bw (KiB/s) : min= 1, max= 1798, per=3.13%, avg=871.22, stdev=459.41
cpu : usr=0.02%, sys=0.08%, ctx=255, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6149/0, short=0/0
lat (usec): 4=85.77%, 10=9.03%, 20=1.09%, 50=0.10%, 100=0.02%
lat (usec): 750=0.16%, 1000=0.23%
lat (msec): 2=0.46%, 4=2.49%, 10=0.46%, 20=0.07%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.27: (groupid=1, jobs=1): err= 0: pid=8151
read : io=26164KiB, bw=885KiB/s, iops=216, runt= 30249msec
clat (usec): min=2, max=3723K, avg=4622.88, stdev=123618.70
bw (KiB/s) : min= 1, max= 1934, per=3.36%, avg=934.56, stdev=493.33
cpu : usr=0.03%, sys=0.09%, ctx=280, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6541/0, short=0/0
lat (usec): 4=91.65%, 10=2.32%, 20=1.90%, 50=0.09%, 100=0.02%
lat (usec): 750=0.20%, 1000=0.32%
lat (msec): 2=0.32%, 4=2.66%, 10=0.37%, 20=0.02%, 2000=0.02%
lat (msec): >=2000=0.12%
seqread.28: (groupid=1, jobs=1): err= 0: pid=8152
read : io=24884KiB, bw=841KiB/s, iops=205, runt= 30266msec
clat (usec): min=2, max=3727K, avg=4863.54, stdev=127103.82
bw (KiB/s) : min= 1, max= 1956, per=3.24%, avg=900.89, stdev=497.84
cpu : usr=0.03%, sys=0.09%, ctx=263, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6221/0, short=0/0
lat (usec): 4=91.45%, 10=2.52%, 20=1.86%, 50=0.10%, 100=0.02%
lat (usec): 750=0.29%, 1000=0.27%
lat (msec): 2=0.35%, 4=2.51%, 10=0.47%, 20=0.02%, 2000=0.02%
lat (msec): >=2000=0.13%
seqread.29: (groupid=1, jobs=1): err= 0: pid=8153
read : io=25892KiB, bw=875KiB/s, iops=213, runt= 30299msec
clat (usec): min=2, max=3735K, avg=4679.04, stdev=125346.95
bw (KiB/s) : min= 1, max= 2602, per=3.55%, avg=987.89, stdev=675.85
cpu : usr=0.01%, sys=0.11%, ctx=265, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=6473/0, short=0/0
lat (usec): 4=81.20%, 10=14.55%, 20=0.17%, 100=0.02%, 250=0.02%
lat (usec): 750=0.32%, 1000=0.37%
lat (msec): 2=0.26%, 4=2.56%, 10=0.37%, 20=0.02%, 2000=0.02%
lat (msec): >=2000=0.12%
seqread.30: (groupid=1, jobs=1): err= 0: pid=8154
read : io=22548KiB, bw=762KiB/s, iops=186, runt= 30278msec
clat (usec): min=2, max=3724K, avg=5369.63, stdev=134138.41
bw (KiB/s) : min= 1, max= 2247, per=2.99%, avg=830.78, stdev=650.95
cpu : usr=0.02%, sys=0.10%, ctx=238, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5637/0, short=0/0
lat (usec): 4=87.72%, 10=5.71%, 20=2.39%, 50=0.12%, 100=0.02%
lat (usec): 750=0.25%, 1000=0.21%
lat (msec): 2=0.43%, 4=2.50%, 10=0.44%, 20=0.04%, 2000=0.02%
lat (msec): >=2000=0.14%
seqread.31: (groupid=1, jobs=1): err= 0: pid=8155
read : io=22324KiB, bw=754KiB/s, iops=184, runt= 30315msec
clat (usec): min=2, max=3730K, avg=5429.98, stdev=135645.42
bw (KiB/s) : min= 1, max= 2818, per=3.19%, avg=887.00, stdev=816.44
cpu : usr=0.05%, sys=0.08%, ctx=240, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5581/0, short=0/0
lat (usec): 4=81.81%, 10=13.76%, 20=0.18%, 100=0.04%, 250=0.02%
lat (usec): 750=0.48%, 1000=0.30%
lat (msec): 2=0.22%, 4=2.60%, 10=0.36%, 20=0.07%, 2000=0.02%
lat (msec): >=2000=0.14%
seqread.32: (groupid=1, jobs=1): err= 0: pid=8156
read : io=22172KiB, bw=749KiB/s, iops=183, runt= 30289msec
clat (usec): min=2, max=3728K, avg=5462.65, stdev=135578.92
bw (KiB/s) : min= 1, max= 2319, per=2.99%, avg=830.89, stdev=671.12
cpu : usr=0.01%, sys=0.07%, ctx=248, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5543/0, short=0/0
lat (usec): 4=86.85%, 10=7.60%, 20=1.32%, 50=0.05%, 100=0.02%
lat (usec): 500=0.02%, 750=0.45%, 1000=0.29%
lat (msec): 2=0.27%, 4=2.49%, 10=0.45%, 20=0.04%, 2000=0.02%
lat (msec): >=2000=0.14%
Run status group 0 (all jobs):
READ: io=14192KiB, aggrb=480KiB/s, minb=7KiB/s, maxb=20KiB/s, mint=30001msec, maxt=30258msec
Run status group 1 (all jobs):
READ: io=829292KiB, aggrb=27816KiB/s, minb=723KiB/s, maxb=1004KiB/s, mint=30004msec, maxt=30529msec
Disk stats (read/write):
sdc: ios=12918/6, merge=141/8, ticks=4231112/36759, in_queue=4291029, util=99.48%
[-- Attachment #4: cfq_adapt.out --]
[-- Type: application/octet-stream, Size: 56799 bytes --]
randomread.0: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.1: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.2: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.3: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.4: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.5: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.6: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.7: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.8: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.9: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.10: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.11: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.12: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.13: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.14: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.15: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.16: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.17: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.18: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.19: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.20: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.21: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.22: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.23: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.24: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.25: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.26: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.27: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.28: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.29: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.30: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.31: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
randomread.32: (g=0): rw=randread, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.0: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.1: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.2: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.3: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.4: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.5: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.6: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.7: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.8: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.9: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.10: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.11: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.12: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.13: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.14: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.15: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.16: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.17: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.18: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.19: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.20: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.21: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.22: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.23: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.24: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.25: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.26: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.27: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.28: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.29: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.30: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.31: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
seqread.32: (g=1): rw=read, bs=4K-4K/4K-4K, ioengine=psync, iodepth=1
Starting 66 processes
randomread.0: (groupid=0, jobs=1): err= 0: pid=8642
read : io=376KiB, bw=12KiB/s, iops=3, runt= 30241msec
clat (usec): min=5, max=1356K, avg=321698.51, stdev=451568.84
bw (KiB/s) : min= 3, max= 39, per=2.55%, avg=12.43, stdev= 8.82
cpu : usr=0.00%, sys=0.03%, ctx=111, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=94/0, short=0/0
lat (usec): 10=1.06%
lat (msec): 4=2.13%, 10=39.36%, 20=11.70%, 50=3.19%, 100=5.32%
lat (msec): 250=3.19%, 500=6.38%, 750=3.19%, 1000=8.51%, 2000=15.96%
randomread.1: (groupid=0, jobs=1): err= 0: pid=8643
read : io=456KiB, bw=15KiB/s, iops=3, runt= 30213msec
clat (usec): min=5, max=1437K, avg=265013.43, stdev=436025.05
bw (KiB/s) : min= 2, max= 28, per=3.10%, avg=15.11, stdev= 6.85
cpu : usr=0.00%, sys=0.01%, ctx=135, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=114/0, short=0/0
lat (usec): 10=0.88%, 500=1.75%
lat (msec): 4=4.39%, 10=41.23%, 20=12.28%, 50=3.51%, 100=7.89%
lat (msec): 250=2.63%, 500=1.75%, 750=0.88%, 1000=8.77%, 2000=14.04%
randomread.2: (groupid=0, jobs=1): err= 0: pid=8644
read : io=428KiB, bw=14KiB/s, iops=3, runt= 30200msec
clat (usec): min=4, max=1305K, avg=282231.96, stdev=430523.39
bw (KiB/s) : min= 3, max= 48, per=3.01%, avg=14.70, stdev= 8.77
cpu : usr=0.00%, sys=0.02%, ctx=124, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=107/0, short=0/0
lat (usec): 10=1.87%, 500=0.93%
lat (msec): 4=4.67%, 10=30.84%, 20=19.63%, 50=3.74%, 100=0.93%
lat (msec): 250=7.48%, 500=4.67%, 750=2.80%, 1000=6.54%, 2000=15.89%
randomread.3: (groupid=0, jobs=1): err= 0: pid=8645
read : io=432KiB, bw=14KiB/s, iops=3, runt= 30194msec
clat (msec): min=3, max=1360, avg=279.56, stdev=445.34
bw (KiB/s) : min= 3, max= 40, per=3.12%, avg=15.21, stdev= 9.15
cpu : usr=0.00%, sys=0.01%, ctx=124, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=108/0, short=0/0
lat (msec): 4=1.85%, 10=43.52%, 20=15.74%, 50=5.56%, 100=3.70%
lat (msec): 250=2.78%, 500=0.93%, 750=3.70%, 1000=6.48%, 2000=15.74%
randomread.4: (groupid=0, jobs=1): err= 0: pid=8646
read : io=428KiB, bw=14KiB/s, iops=3, runt= 30003msec
clat (msec): min=2, max=1123, avg=280.39, stdev=427.42
bw (KiB/s) : min= 3, max= 32, per=2.90%, avg=14.14, stdev= 7.15
cpu : usr=0.00%, sys=0.01%, ctx=128, majf=0, minf=16
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=107/0, short=0/0
lat (msec): 4=4.67%, 10=36.45%, 20=17.76%, 50=3.74%, 100=1.87%
lat (msec): 250=8.41%, 500=1.87%, 750=1.87%, 1000=7.48%, 2000=15.89%
randomread.5: (groupid=0, jobs=1): err= 0: pid=8647
read : io=480KiB, bw=16KiB/s, iops=3, runt= 30003msec
clat (msec): min=2, max=1220, avg=250.02, stdev=405.47
bw (KiB/s) : min= 3, max= 27, per=3.18%, avg=15.52, stdev= 6.80
cpu : usr=0.01%, sys=0.00%, ctx=136, majf=0, minf=16
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=120/0, short=0/0
lat (msec): 4=5.83%, 10=44.17%, 20=12.50%, 50=1.67%, 100=2.50%
lat (msec): 250=7.50%, 500=3.33%, 750=1.67%, 1000=9.17%, 2000=11.67%
randomread.6: (groupid=0, jobs=1): err= 0: pid=8648
read : io=404KiB, bw=13KiB/s, iops=3, runt= 30052msec
clat (msec): min=2, max=1337, avg=297.53, stdev=437.26
bw (KiB/s) : min= 3, max= 29, per=2.71%, avg=13.22, stdev= 6.99
cpu : usr=0.00%, sys=0.00%, ctx=115, majf=0, minf=16
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=101/0, short=0/0
lat (msec): 4=5.94%, 10=33.66%, 20=17.82%, 50=2.97%, 100=0.99%
lat (msec): 250=6.93%, 500=4.95%, 750=2.97%, 1000=9.90%, 2000=13.86%
randomread.7: (groupid=0, jobs=1): err= 0: pid=8649
read : io=424KiB, bw=14KiB/s, iops=3, runt= 30106msec
clat (usec): min=4, max=1230K, avg=284010.99, stdev=430874.59
bw (KiB/s) : min= 3, max= 26, per=2.81%, avg=13.70, stdev= 5.86
cpu : usr=0.00%, sys=0.01%, ctx=124, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=106/0, short=0/0
lat (usec): 10=0.94%
lat (msec): 4=5.66%, 10=33.02%, 20=17.92%, 50=1.89%, 100=4.72%
lat (msec): 250=8.49%, 500=1.89%, 750=2.83%, 1000=4.72%, 2000=17.92%
randomread.8: (groupid=0, jobs=1): err= 0: pid=8650
read : io=472KiB, bw=16KiB/s, iops=3, runt= 30090msec
clat (usec): min=5, max=1209K, avg=254994.34, stdev=420575.82
bw (KiB/s) : min= 3, max= 29, per=3.22%, avg=15.70, stdev= 6.54
cpu : usr=0.00%, sys=0.01%, ctx=137, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=118/0, short=0/0
lat (usec): 10=0.85%
lat (msec): 4=5.08%, 10=37.29%, 20=18.64%, 50=3.39%, 100=3.39%
lat (msec): 250=5.08%, 500=4.24%, 750=1.69%, 1000=5.93%, 2000=14.41%
randomread.9: (groupid=0, jobs=1): err= 0: pid=8651
read : io=452KiB, bw=15KiB/s, iops=3, runt= 30132msec
clat (usec): min=9, max=1305K, avg=266645.94, stdev=417707.49
bw (KiB/s) : min= 3, max= 27, per=2.93%, avg=14.32, stdev= 7.20
cpu : usr=0.00%, sys=0.01%, ctx=130, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=113/0, short=0/0
lat (usec): 10=0.88%
lat (msec): 4=4.42%, 10=35.40%, 20=17.70%, 50=4.42%, 100=3.54%
lat (msec): 250=6.19%, 500=3.54%, 750=2.65%, 1000=8.85%, 2000=12.39%
randomread.10: (groupid=0, jobs=1): err= 0: pid=8652
read : io=420KiB, bw=14KiB/s, iops=3, runt= 30050msec
clat (msec): min=3, max=1200, avg=286.18, stdev=420.08
bw (KiB/s) : min= 3, max= 26, per=2.82%, avg=13.79, stdev= 6.84
cpu : usr=0.00%, sys=0.00%, ctx=121, majf=0, minf=16
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=105/0, short=0/0
lat (msec): 4=3.81%, 10=41.90%, 20=11.43%, 50=1.90%, 100=5.71%
lat (msec): 250=3.81%, 500=6.67%, 750=2.86%, 1000=8.57%, 2000=13.33%
randomread.11: (groupid=0, jobs=1): err= 0: pid=8653
read : io=476KiB, bw=16KiB/s, iops=3, runt= 30268msec
clat (usec): min=4, max=1189K, avg=254340.46, stdev=412415.32
bw (KiB/s) : min= 5, max= 40, per=3.19%, avg=15.56, stdev= 7.47
cpu : usr=0.00%, sys=0.01%, ctx=137, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=119/0, short=0/0
lat (usec): 10=1.68%
lat (msec): 4=2.52%, 10=38.66%, 20=16.81%, 50=2.52%, 100=5.04%
lat (msec): 250=8.40%, 500=2.52%, 750=1.68%, 1000=5.04%, 2000=15.13%
randomread.12: (groupid=0, jobs=1): err= 0: pid=8654
read : io=424KiB, bw=14KiB/s, iops=3, runt= 30298msec
clat (msec): min=2, max=1246, avg=285.82, stdev=430.29
bw (KiB/s) : min= 3, max= 35, per=2.89%, avg=14.11, stdev= 7.40
cpu : usr=0.00%, sys=0.01%, ctx=122, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=106/0, short=0/0
lat (msec): 4=6.60%, 10=36.79%, 20=16.98%, 50=2.83%, 100=1.89%
lat (msec): 250=2.83%, 500=6.60%, 750=2.83%, 1000=7.55%, 2000=15.09%
randomread.13: (groupid=0, jobs=1): err= 0: pid=8655
read : io=392KiB, bw=13KiB/s, iops=3, runt= 30222msec
clat (msec): min=2, max=1728, avg=308.37, stdev=438.53
bw (KiB/s) : min= 2, max= 31, per=2.78%, avg=13.58, stdev= 8.01
cpu : usr=0.00%, sys=0.00%, ctx=115, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=98/0, short=0/0
lat (msec): 4=2.04%, 10=38.78%, 20=17.35%, 50=1.02%, 100=4.08%
lat (msec): 250=2.04%, 500=5.10%, 750=7.14%, 1000=9.18%, 2000=13.27%
randomread.14: (groupid=0, jobs=1): err= 0: pid=8656
read : io=416KiB, bw=14KiB/s, iops=3, runt= 30228msec
clat (usec): min=503, max=1203K, avg=290644.98, stdev=437072.37
bw (KiB/s) : min= 4, max= 31, per=2.70%, avg=13.18, stdev= 6.62
cpu : usr=0.00%, sys=0.00%, ctx=121, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=104/0, short=0/0
lat (usec): 750=0.96%
lat (msec): 4=3.85%, 10=39.42%, 20=17.31%, 50=3.85%, 100=1.92%
lat (msec): 250=1.92%, 500=4.81%, 750=2.88%, 1000=5.77%, 2000=17.31%
randomread.15: (groupid=0, jobs=1): err= 0: pid=8657
read : io=384KiB, bw=13KiB/s, iops=3, runt= 30170msec
clat (msec): min=2, max=1267, avg=314.26, stdev=464.61
bw (KiB/s) : min= 3, max= 24, per=2.50%, avg=12.22, stdev= 6.97
cpu : usr=0.00%, sys=0.01%, ctx=111, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=96/0, short=0/0
lat (msec): 4=5.21%, 10=38.54%, 20=15.62%, 50=5.21%, 100=1.04%
lat (msec): 250=2.08%, 500=4.17%, 750=2.08%, 1000=6.25%, 2000=19.79%
randomread.16: (groupid=0, jobs=1): err= 0: pid=8658
read : io=460KiB, bw=15KiB/s, iops=3, runt= 30269msec
clat (usec): min=5, max=1642K, avg=263200.82, stdev=428397.98
bw (KiB/s) : min= 2, max= 28, per=3.11%, avg=15.15, stdev= 6.70
cpu : usr=0.00%, sys=0.02%, ctx=128, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=115/0, short=0/0
lat (usec): 10=0.87%
lat (msec): 4=2.61%, 10=41.74%, 20=13.91%, 50=2.61%, 100=8.70%
lat (msec): 250=0.87%, 500=6.96%, 750=1.74%, 1000=7.83%, 2000=12.17%
randomread.17: (groupid=0, jobs=1): err= 0: pid=8659
read : io=412KiB, bw=14KiB/s, iops=3, runt= 30004msec
clat (msec): min=3, max=1197, avg=291.29, stdev=432.18
bw (KiB/s) : min= 3, max= 26, per=2.69%, avg=13.12, stdev= 6.67
cpu : usr=0.00%, sys=0.01%, ctx=119, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=103/0, short=0/0
lat (msec): 4=2.91%, 10=40.78%, 20=13.59%, 50=1.94%, 100=2.91%
lat (msec): 250=9.71%, 500=2.91%, 750=0.97%, 1000=5.83%, 2000=18.45%
randomread.18: (groupid=0, jobs=1): err= 0: pid=8660
read : io=452KiB, bw=15KiB/s, iops=3, runt= 30262msec
clat (usec): min=5, max=1163K, avg=267794.13, stdev=408422.21
bw (KiB/s) : min= 6, max= 31, per=3.03%, avg=14.81, stdev= 6.66
cpu : usr=0.00%, sys=0.01%, ctx=125, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=113/0, short=0/0
lat (usec): 10=0.88%, 500=0.88%
lat (msec): 4=3.54%, 10=30.97%, 20=22.12%, 50=2.65%, 100=4.42%
lat (msec): 250=5.31%, 500=6.19%, 750=1.77%, 1000=11.50%, 2000=9.73%
randomread.19: (groupid=0, jobs=1): err= 0: pid=8661
read : io=432KiB, bw=14KiB/s, iops=3, runt= 30103msec
clat (msec): min=2, max=1325, avg=278.72, stdev=429.91
bw (KiB/s) : min= 3, max= 33, per=2.92%, avg=14.27, stdev= 7.95
cpu : usr=0.00%, sys=0.01%, ctx=127, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=108/0, short=0/0
lat (msec): 4=4.63%, 10=40.74%, 20=13.89%, 50=2.78%, 100=4.63%
lat (msec): 250=4.63%, 500=4.63%, 750=0.93%, 1000=7.41%, 2000=15.74%
randomread.20: (groupid=0, jobs=1): err= 0: pid=8662
read : io=484KiB, bw=16KiB/s, iops=3, runt= 30270msec
clat (usec): min=5, max=1208K, avg=250158.44, stdev=410919.09
bw (KiB/s) : min= 6, max= 30, per=3.29%, avg=16.04, stdev= 6.77
cpu : usr=0.00%, sys=0.01%, ctx=142, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=121/0, short=0/0
lat (usec): 10=0.83%
lat (msec): 4=3.31%, 10=33.88%, 20=22.31%, 50=2.48%, 100=5.79%
lat (msec): 250=7.44%, 500=2.48%, 750=0.83%, 1000=4.96%, 2000=15.70%
randomread.21: (groupid=0, jobs=1): err= 0: pid=8663
read : io=460KiB, bw=15KiB/s, iops=3, runt= 30181msec
clat (usec): min=6, max=1207K, avg=262429.93, stdev=410255.14
bw (KiB/s) : min= 4, max= 32, per=2.94%, avg=14.37, stdev= 7.52
cpu : usr=0.00%, sys=0.00%, ctx=131, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=115/0, short=0/0
lat (usec): 10=0.87%
lat (msec): 4=7.83%, 10=33.04%, 20=18.26%, 50=4.35%, 100=2.61%
lat (msec): 250=6.09%, 500=1.74%, 750=4.35%, 1000=8.70%, 2000=12.17%
randomread.22: (groupid=0, jobs=1): err= 0: pid=8664
read : io=428KiB, bw=14KiB/s, iops=3, runt= 30015msec
clat (msec): min=2, max=1331, avg=280.50, stdev=439.81
bw (KiB/s) : min= 3, max= 31, per=2.84%, avg=13.84, stdev= 7.12
cpu : usr=0.00%, sys=0.00%, ctx=123, majf=0, minf=16
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=107/0, short=0/0
lat (msec): 4=4.67%, 10=42.99%, 20=12.15%, 50=2.80%, 100=4.67%
lat (msec): 250=4.67%, 500=4.67%, 750=0.93%, 1000=6.54%, 2000=15.89%
randomread.23: (groupid=0, jobs=1): err= 0: pid=8665
read : io=440KiB, bw=14KiB/s, iops=3, runt= 30065msec
clat (msec): min=2, max=1276, avg=273.31, stdev=438.12
bw (KiB/s) : min= 3, max= 48, per=3.07%, avg=15.00, stdev= 9.36
cpu : usr=0.00%, sys=0.01%, ctx=129, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=110/0, short=0/0
lat (msec): 4=5.45%, 10=40.91%, 20=16.36%, 50=3.64%, 100=1.82%
lat (msec): 250=5.45%, 500=1.82%, 750=3.64%, 1000=3.64%, 2000=17.27%
randomread.24: (groupid=0, jobs=1): err= 0: pid=8666
read : io=416KiB, bw=14KiB/s, iops=3, runt= 30103msec
clat (msec): min=2, max=1215, avg=289.44, stdev=431.89
bw (KiB/s) : min= 3, max= 29, per=2.69%, avg=13.10, stdev= 6.38
cpu : usr=0.00%, sys=0.00%, ctx=120, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=104/0, short=0/0
lat (msec): 4=2.88%, 10=46.15%, 20=10.58%, 50=2.88%, 100=3.85%
lat (msec): 250=3.85%, 500=1.92%, 750=4.81%, 1000=9.62%, 2000=13.46%
randomread.25: (groupid=0, jobs=1): err= 0: pid=8667
read : io=500KiB, bw=17KiB/s, iops=4, runt= 30109msec
clat (msec): min=2, max=1206, avg=240.86, stdev=412.16
bw (KiB/s) : min= 3, max= 39, per=3.34%, avg=16.29, stdev= 8.13
cpu : usr=0.00%, sys=0.01%, ctx=141, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=125/0, short=0/0
lat (msec): 4=3.20%, 10=38.40%, 20=22.40%, 50=3.20%, 100=6.40%
lat (msec): 250=2.40%, 500=1.60%, 750=1.60%, 1000=7.20%, 2000=13.60%
randomread.26: (groupid=0, jobs=1): err= 0: pid=8668
read : io=476KiB, bw=16KiB/s, iops=3, runt= 30248msec
clat (msec): min=2, max=1183, avg=254.17, stdev=414.81
bw (KiB/s) : min= 3, max= 32, per=3.17%, avg=15.48, stdev= 8.08
cpu : usr=0.00%, sys=0.01%, ctx=139, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=119/0, short=0/0
lat (msec): 4=4.20%, 10=43.70%, 20=14.29%, 50=4.20%, 100=0.84%
lat (msec): 250=6.72%, 500=4.20%, 750=1.68%, 1000=4.20%, 2000=15.97%
randomread.27: (groupid=0, jobs=1): err= 0: pid=8669
read : io=420KiB, bw=14KiB/s, iops=3, runt= 30200msec
clat (msec): min=2, max=1166, avg=287.61, stdev=418.44
bw (KiB/s) : min= 4, max= 28, per=2.83%, avg=13.83, stdev= 6.30
cpu : usr=0.00%, sys=0.01%, ctx=121, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=105/0, short=0/0
lat (msec): 4=2.86%, 10=33.33%, 20=19.05%, 50=4.76%, 100=4.76%
lat (msec): 250=3.81%, 500=3.81%, 750=4.76%, 1000=12.38%, 2000=10.48%
randomread.28: (groupid=0, jobs=1): err= 0: pid=8670
read : io=452KiB, bw=15KiB/s, iops=3, runt= 30005msec
clat (usec): min=368, max=1224K, avg=265525.19, stdev=413517.70
bw (KiB/s) : min= 3, max= 28, per=3.01%, avg=14.69, stdev= 6.77
cpu : usr=0.00%, sys=0.00%, ctx=129, majf=0, minf=16
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=113/0, short=0/0
lat (usec): 500=1.77%
lat (msec): 4=1.77%, 10=46.02%, 20=12.39%, 50=3.54%, 100=0.88%
lat (msec): 250=3.54%, 500=7.08%, 750=1.77%, 1000=7.08%, 2000=14.16%
randomread.29: (groupid=0, jobs=1): err= 0: pid=8671
read : io=408KiB, bw=13KiB/s, iops=3, runt= 30107msec
clat (usec): min=4, max=1153K, avg=295158.45, stdev=430653.80
bw (KiB/s) : min= 3, max= 27, per=2.63%, avg=12.82, stdev= 6.69
cpu : usr=0.00%, sys=0.00%, ctx=118, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=102/0, short=0/0
lat (usec): 10=0.98%, 500=0.98%
lat (msec): 4=0.98%, 10=41.18%, 20=11.76%, 50=7.84%, 100=1.96%
lat (msec): 250=1.96%, 500=4.90%, 750=3.92%, 1000=7.84%, 2000=15.69%
randomread.30: (groupid=0, jobs=1): err= 0: pid=8672
read : io=424KiB, bw=14KiB/s, iops=3, runt= 30122msec
clat (usec): min=4, max=1343K, avg=284162.83, stdev=440435.59
bw (KiB/s) : min= 3, max= 25, per=2.76%, avg=13.48, stdev= 6.85
cpu : usr=0.00%, sys=0.01%, ctx=123, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=106/0, short=0/0
lat (usec): 10=0.94%
lat (msec): 4=2.83%, 10=33.96%, 20=20.75%, 50=3.77%, 100=4.72%
lat (msec): 250=5.66%, 500=1.89%, 750=1.89%, 1000=6.60%, 2000=16.98%
randomread.31: (groupid=0, jobs=1): err= 0: pid=8673
read : io=484KiB, bw=16KiB/s, iops=3, runt= 30254msec
clat (msec): min=2, max=1253, avg=250.02, stdev=418.84
bw (KiB/s) : min= 4, max= 32, per=3.32%, avg=16.19, stdev= 6.50
cpu : usr=0.00%, sys=0.01%, ctx=139, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=121/0, short=0/0
lat (msec): 4=2.48%, 10=45.45%, 20=14.88%, 50=1.65%, 100=3.31%
lat (msec): 250=5.79%, 500=5.79%, 750=0.83%, 1000=4.96%, 2000=14.88%
randomread.32: (groupid=0, jobs=1): err= 0: pid=8674
read : io=432KiB, bw=14KiB/s, iops=3, runt= 30112msec
clat (msec): min=2, max=1215, avg=278.80, stdev=429.96
bw (KiB/s) : min= 3, max= 28, per=2.81%, avg=13.70, stdev= 6.95
cpu : usr=0.00%, sys=0.00%, ctx=125, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=108/0, short=0/0
lat (msec): 4=1.85%, 10=37.04%, 20=20.37%, 50=2.78%, 100=3.70%
lat (msec): 250=6.48%, 500=2.78%, 750=3.70%, 1000=3.70%, 2000=17.59%
seqread.0: (groupid=1, jobs=1): err= 0: pid=8705
read : io=21220KiB, bw=714KiB/s, iops=174, runt= 30412msec
clat (usec): min=2, max=1052K, avg=5730.96, stdev=73798.73
bw (KiB/s) : min= 21, max= 1203, per=2.95%, avg=711.13, stdev=244.70
cpu : usr=0.03%, sys=0.10%, ctx=213, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5305/0, short=0/0
lat (usec): 4=80.81%, 10=14.91%, 20=0.23%, 50=0.02%, 100=0.02%
lat (usec): 250=0.04%, 750=0.17%, 1000=0.36%
lat (msec): 2=0.28%, 4=2.30%, 10=0.26%, 100=0.02%, 500=0.02%
lat (msec): 750=0.02%, 1000=0.28%, 2000=0.26%
seqread.1: (groupid=1, jobs=1): err= 0: pid=8706
read : io=21476KiB, bw=728KiB/s, iops=177, runt= 30197msec
clat (usec): min=2, max=1058K, avg=5622.56, stdev=73236.01
bw (KiB/s) : min= 22, max= 1269, per=3.00%, avg=724.70, stdev=202.11
cpu : usr=0.02%, sys=0.08%, ctx=223, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5369/0, short=0/0
lat (usec): 4=90.46%, 10=3.39%, 20=1.86%, 50=0.07%, 100=0.09%
lat (usec): 250=0.02%, 500=0.02%, 750=0.19%, 1000=0.19%
lat (msec): 2=0.43%, 4=2.31%, 10=0.34%, 20=0.04%, 100=0.04%
lat (msec): 1000=0.32%, 2000=0.24%
seqread.2: (groupid=1, jobs=1): err= 0: pid=8707
read : io=21716KiB, bw=735KiB/s, iops=179, runt= 30254msec
clat (usec): min=2, max=1053K, avg=5571.02, stdev=73187.52
bw (KiB/s) : min= 411, max= 1121, per=3.06%, avg=737.60, stdev=150.75
cpu : usr=0.02%, sys=0.09%, ctx=215, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5429/0, short=0/0
lat (usec): 4=91.38%, 10=2.74%, 20=1.77%, 50=0.06%, 100=0.09%
lat (usec): 250=0.02%, 500=0.02%, 750=0.15%, 1000=0.22%
lat (msec): 2=0.24%, 4=2.45%, 10=0.24%, 20=0.04%, 50=0.04%
lat (msec): 1000=0.31%, 2000=0.24%
seqread.3: (groupid=1, jobs=1): err= 0: pid=8708
read : io=22364KiB, bw=757KiB/s, iops=184, runt= 30242msec
clat (usec): min=2, max=1060K, avg=5407.31, stdev=72063.23
bw (KiB/s) : min= 508, max= 1094, per=3.15%, avg=759.53, stdev=113.84
cpu : usr=0.03%, sys=0.06%, ctx=228, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5591/0, short=0/0
lat (usec): 4=91.86%, 10=2.09%, 20=1.84%, 50=0.02%, 100=0.13%
lat (usec): 750=0.36%, 1000=0.27%
lat (msec): 2=0.27%, 4=2.33%, 10=0.29%, 100=0.02%, 1000=0.27%
lat (msec): 2000=0.27%
seqread.4: (groupid=1, jobs=1): err= 0: pid=8709
read : io=23508KiB, bw=795KiB/s, iops=194, runt= 30260msec
clat (usec): min=2, max=1035K, avg=5147.22, stdev=70186.59
bw (KiB/s) : min= 585, max= 1158, per=3.31%, avg=798.40, stdev=94.15
cpu : usr=0.01%, sys=0.09%, ctx=234, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5877/0, short=0/0
lat (usec): 4=81.23%, 10=14.51%, 20=0.17%, 50=0.02%, 100=0.10%
lat (usec): 500=0.02%, 750=0.17%, 1000=0.34%
lat (msec): 2=0.24%, 4=2.50%, 10=0.15%, 20=0.02%, 250=0.02%
lat (msec): 750=0.02%, 1000=0.29%, 2000=0.20%
seqread.5: (groupid=1, jobs=1): err= 0: pid=8710
read : io=21748KiB, bw=736KiB/s, iops=179, runt= 30227msec
clat (usec): min=2, max=1049K, avg=5557.74, stdev=72971.85
bw (KiB/s) : min= 262, max= 1029, per=3.06%, avg=737.80, stdev=164.76
cpu : usr=0.03%, sys=0.07%, ctx=218, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5437/0, short=0/0
lat (usec): 4=91.17%, 10=2.83%, 20=1.82%, 50=0.09%, 100=0.09%
lat (usec): 500=0.02%, 750=0.22%, 1000=0.31%
lat (msec): 2=0.26%, 4=2.35%, 10=0.22%, 20=0.04%, 250=0.02%
lat (msec): 1000=0.33%, 2000=0.22%
seqread.6: (groupid=1, jobs=1): err= 0: pid=8711
read : io=22516KiB, bw=760KiB/s, iops=185, runt= 30303msec
clat (usec): min=2, max=1063K, avg=5381.65, stdev=71885.76
bw (KiB/s) : min= 537, max= 1052, per=3.17%, avg=764.33, stdev=106.89
cpu : usr=0.03%, sys=0.10%, ctx=227, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5629/0, short=0/0
lat (usec): 4=80.87%, 10=14.85%, 20=0.20%, 50=0.02%, 100=0.04%
lat (usec): 250=0.02%, 500=0.02%, 750=0.12%, 1000=0.32%
lat (msec): 2=0.25%, 4=2.49%, 10=0.27%, 250=0.02%, 750=0.02%
lat (msec): 1000=0.23%, 2000=0.28%
seqread.7: (groupid=1, jobs=1): err= 0: pid=8712
read : io=22260KiB, bw=755KiB/s, iops=184, runt= 30160msec
clat (usec): min=2, max=1059K, avg=5417.82, stdev=71802.42
bw (KiB/s) : min= 546, max= 909, per=3.13%, avg=756.03, stdev=86.82
cpu : usr=0.04%, sys=0.08%, ctx=222, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5565/0, short=0/0
lat (usec): 4=91.12%, 10=3.04%, 20=1.73%, 50=0.07%, 100=0.07%
lat (usec): 500=0.02%, 750=0.18%, 1000=0.20%
lat (msec): 2=0.36%, 4=2.43%, 10=0.22%, 20=0.02%, 250=0.02%
lat (msec): 1000=0.34%, 2000=0.20%
seqread.8: (groupid=1, jobs=1): err= 0: pid=8713
read : io=22620KiB, bw=761KiB/s, iops=186, runt= 30400msec
clat (usec): min=2, max=1054K, avg=5374.07, stdev=71902.31
bw (KiB/s) : min= 533, max= 1357, per=3.19%, avg=770.17, stdev=147.31
cpu : usr=0.02%, sys=0.08%, ctx=221, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5655/0, short=0/0
lat (usec): 4=81.31%, 10=14.50%, 20=0.21%, 50=0.02%, 100=0.05%
lat (usec): 250=0.02%, 500=0.02%, 750=0.19%, 1000=0.37%
lat (msec): 2=0.25%, 4=2.30%, 10=0.21%, 250=0.02%, 750=0.02%
lat (msec): 1000=0.21%, 2000=0.30%
seqread.9: (groupid=1, jobs=1): err= 0: pid=8714
read : io=22620KiB, bw=763KiB/s, iops=186, runt= 30336msec
clat (usec): min=2, max=1094K, avg=5362.81, stdev=71647.53
bw (KiB/s) : min= 501, max= 1193, per=3.19%, avg=768.93, stdev=149.90
cpu : usr=0.01%, sys=0.09%, ctx=224, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5655/0, short=0/0
lat (usec): 4=89.85%, 10=3.89%, 20=2.18%, 50=0.05%, 100=0.09%
lat (usec): 500=0.02%, 750=0.19%, 1000=0.21%
lat (msec): 2=0.42%, 4=2.40%, 10=0.14%, 500=0.02%, 750=0.02%
lat (msec): 1000=0.25%, 2000=0.27%
seqread.10: (groupid=1, jobs=1): err= 0: pid=8715
read : io=21852KiB, bw=735KiB/s, iops=179, runt= 30430msec
clat (usec): min=2, max=1110K, avg=5568.52, stdev=72959.84
bw (KiB/s) : min= 152, max= 1030, per=3.07%, avg=740.07, stdev=140.47
cpu : usr=0.02%, sys=0.10%, ctx=218, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5463/0, short=0/0
lat (usec): 4=81.35%, 10=14.30%, 20=0.22%, 50=0.04%, 100=0.11%
lat (usec): 250=0.02%, 500=0.02%, 750=0.20%, 1000=0.27%
lat (msec): 2=0.24%, 4=2.27%, 10=0.35%, 20=0.02%, 50=0.04%
lat (msec): 500=0.02%, 750=0.02%, 1000=0.33%, 2000=0.20%
seqread.11: (groupid=1, jobs=1): err= 0: pid=8716
read : io=22500KiB, bw=761KiB/s, iops=185, runt= 30271msec
clat (usec): min=2, max=1046K, avg=5379.79, stdev=71543.84
bw (KiB/s) : min= 294, max= 1032, per=3.17%, avg=765.90, stdev=144.03
cpu : usr=0.03%, sys=0.06%, ctx=227, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5625/0, short=0/0
lat (usec): 4=90.19%, 10=3.68%, 20=1.96%, 50=0.05%, 100=0.07%
lat (usec): 250=0.04%, 750=0.20%, 1000=0.25%
lat (msec): 2=0.32%, 4=2.54%, 10=0.16%, 500=0.02%, 750=0.02%
lat (msec): 1000=0.32%, 2000=0.20%
seqread.12: (groupid=1, jobs=1): err= 0: pid=8717
read : io=22364KiB, bw=751KiB/s, iops=183, runt= 30462msec
clat (usec): min=2, max=1085K, avg=5446.59, stdev=72228.87
bw (KiB/s) : min= 394, max= 1508, per=3.17%, avg=764.40, stdev=192.36
cpu : usr=0.02%, sys=0.09%, ctx=229, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5591/0, short=0/0
lat (usec): 4=87.87%, 10=6.01%, 20=1.77%, 50=0.11%, 100=0.16%
lat (usec): 500=0.02%, 750=0.27%, 1000=0.25%
lat (msec): 2=0.23%, 4=2.50%, 10=0.21%, 20=0.02%, 50=0.02%
lat (msec): 500=0.02%, 750=0.02%, 1000=0.25%, 2000=0.27%
seqread.13: (groupid=1, jobs=1): err= 0: pid=8718
read : io=23028KiB, bw=776KiB/s, iops=189, runt= 30365msec
clat (usec): min=2, max=1047K, avg=5272.63, stdev=70761.66
bw (KiB/s) : min= 494, max= 1440, per=3.23%, avg=778.60, stdev=175.37
cpu : usr=0.02%, sys=0.10%, ctx=231, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5757/0, short=0/0
lat (usec): 4=85.91%, 10=7.36%, 20=2.52%, 50=0.12%, 100=0.07%
lat (usec): 250=0.02%, 500=0.02%, 750=0.21%, 1000=0.24%
lat (msec): 2=0.31%, 4=2.45%, 10=0.21%, 50=0.02%, 500=0.02%
lat (msec): 750=0.02%, 1000=0.28%, 2000=0.23%
seqread.14: (groupid=1, jobs=1): err= 0: pid=8719
read : io=20724KiB, bw=706KiB/s, iops=172, runt= 30019msec
clat (usec): min=2, max=1059K, avg=5792.25, stdev=74863.74
bw (KiB/s) : min= 275, max= 887, per=2.94%, avg=709.38, stdev=157.89
cpu : usr=0.02%, sys=0.08%, ctx=206, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5181/0, short=0/0
lat (usec): 4=79.23%, 10=16.48%, 20=0.25%, 50=0.04%, 100=0.04%
lat (usec): 500=0.04%, 750=0.19%, 1000=0.25%
lat (msec): 2=0.29%, 4=2.20%, 10=0.31%, 20=0.10%, 500=0.02%
lat (msec): 1000=0.27%, 2000=0.29%
seqread.15: (groupid=1, jobs=1): err= 0: pid=8720
read : io=22756KiB, bw=766KiB/s, iops=187, runt= 30384msec
clat (usec): min=2, max=1079K, avg=5339.16, stdev=71243.11
bw (KiB/s) : min= 468, max= 1144, per=3.21%, avg=775.17, stdev=116.95
cpu : usr=0.00%, sys=0.07%, ctx=225, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5689/0, short=0/0
lat (usec): 4=88.08%, 10=5.40%, 20=2.41%, 50=0.12%, 100=0.02%
lat (usec): 250=0.02%, 500=0.04%, 750=0.16%, 1000=0.30%
lat (msec): 2=0.28%, 4=2.46%, 10=0.16%, 20=0.02%, 500=0.02%
lat (msec): 750=0.02%, 1000=0.28%, 2000=0.23%
seqread.16: (groupid=1, jobs=1): err= 0: pid=8721
read : io=22004KiB, bw=736KiB/s, iops=179, runt= 30598msec
clat (usec): min=2, max=1051K, avg=5560.58, stdev=72885.81
bw (KiB/s) : min= 150, max= 1258, per=3.06%, avg=738.19, stdev=185.16
cpu : usr=0.00%, sys=0.10%, ctx=221, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5501/0, short=0/0
lat (usec): 4=84.49%, 10=10.11%, 20=1.25%, 50=0.05%, 100=0.09%
lat (usec): 250=0.02%, 750=0.22%, 1000=0.33%
lat (msec): 2=0.16%, 4=2.51%, 10=0.15%, 20=0.04%, 50=0.02%
lat (msec): 500=0.02%, 750=0.02%, 1000=0.29%, 2000=0.24%
seqread.17: (groupid=1, jobs=1): err= 0: pid=8722
read : io=23028KiB, bw=772KiB/s, iops=188, runt= 30543msec
clat (usec): min=2, max=1115K, avg=5303.73, stdev=71244.50
bw (KiB/s) : min= 525, max= 1311, per=3.24%, avg=782.27, stdev=140.21
cpu : usr=0.02%, sys=0.08%, ctx=231, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5757/0, short=0/0
lat (usec): 4=91.02%, 10=2.81%, 20=1.95%, 50=0.16%, 100=0.07%
lat (usec): 500=0.02%, 750=0.23%, 1000=0.36%
lat (msec): 2=0.17%, 4=2.50%, 10=0.17%, 500=0.02%, 750=0.02%
lat (msec): 1000=0.31%, 2000=0.19%
seqread.18: (groupid=1, jobs=1): err= 0: pid=8723
read : io=22500KiB, bw=759KiB/s, iops=185, runt= 30344msec
clat (usec): min=2, max=1043K, avg=5392.82, stdev=71451.49
bw (KiB/s) : min= 7, max= 1065, per=3.10%, avg=748.87, stdev=199.37
cpu : usr=0.02%, sys=0.08%, ctx=220, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5625/0, short=0/0
lat (usec): 4=76.27%, 10=19.43%, 20=0.36%, 50=0.02%, 100=0.04%
lat (usec): 500=0.02%, 750=0.12%, 1000=0.34%
lat (msec): 2=0.32%, 4=2.29%, 10=0.23%, 20=0.02%, 750=0.04%
lat (msec): 1000=0.30%, 2000=0.21%
seqread.19: (groupid=1, jobs=1): err= 0: pid=8724
read : io=22484KiB, bw=756KiB/s, iops=184, runt= 30447msec
clat (usec): min=2, max=1047K, avg=5414.98, stdev=71727.01
bw (KiB/s) : min= 7, max= 1256, per=3.11%, avg=750.68, stdev=193.92
cpu : usr=0.02%, sys=0.08%, ctx=227, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5621/0, short=0/0
lat (usec): 4=90.38%, 10=3.61%, 20=1.85%, 50=0.05%, 100=0.05%
lat (usec): 250=0.04%, 500=0.02%, 750=0.27%, 1000=0.25%
lat (msec): 2=0.28%, 4=2.44%, 10=0.21%, 750=0.04%, 1000=0.30%
lat (msec): 2000=0.21%
seqread.20: (groupid=1, jobs=1): err= 0: pid=8725
read : io=20724KiB, bw=705KiB/s, iops=172, runt= 30087msec
clat (usec): min=2, max=1094K, avg=5805.51, stdev=74838.45
bw (KiB/s) : min= 7, max= 885, per=2.88%, avg=696.23, stdev=181.81
cpu : usr=0.02%, sys=0.10%, ctx=207, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5181/0, short=0/0
lat (usec): 4=79.17%, 10=16.43%, 20=0.29%, 50=0.02%, 100=0.10%
lat (usec): 250=0.02%, 500=0.02%, 750=0.31%, 1000=0.35%
lat (msec): 2=0.23%, 4=2.14%, 10=0.29%, 20=0.04%, 100=0.02%
lat (msec): 750=0.02%, 1000=0.35%, 2000=0.21%
seqread.21: (groupid=1, jobs=1): err= 0: pid=8726
read : io=20980KiB, bw=715KiB/s, iops=174, runt= 30031msec
clat (usec): min=2, max=1059K, avg=5714.26, stdev=74168.04
bw (KiB/s) : min= 6, max= 898, per=2.93%, avg=706.63, stdev=175.53
cpu : usr=0.02%, sys=0.08%, ctx=214, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5245/0, short=0/0
lat (usec): 4=88.81%, 10=5.03%, 20=1.85%, 50=0.11%, 100=0.13%
lat (usec): 250=0.02%, 500=0.02%, 750=0.42%, 1000=0.25%
lat (msec): 2=0.25%, 4=2.33%, 10=0.17%, 20=0.02%, 50=0.02%
lat (msec): 750=0.02%, 1000=0.32%, 2000=0.23%
seqread.22: (groupid=1, jobs=1): err= 0: pid=8727
read : io=20980KiB, bw=714KiB/s, iops=174, runt= 30054msec
clat (usec): min=2, max=1060K, avg=5728.32, stdev=74234.75
bw (KiB/s) : min= 6, max= 937, per=2.93%, avg=706.93, stdev=171.56
cpu : usr=0.01%, sys=0.09%, ctx=221, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5245/0, short=0/0
lat (usec): 4=89.86%, 10=3.57%, 20=2.12%, 50=0.13%, 100=0.13%
lat (usec): 250=0.04%, 500=0.06%, 750=0.36%, 1000=0.32%
lat (msec): 2=0.21%, 4=2.33%, 10=0.27%, 20=0.02%, 50=0.02%
lat (msec): 750=0.02%, 1000=0.32%, 2000=0.23%
seqread.23: (groupid=1, jobs=1): err= 0: pid=8728
read : io=22516KiB, bw=755KiB/s, iops=184, runt= 30499msec
clat (usec): min=2, max=1058K, avg=5416.48, stdev=71750.02
bw (KiB/s) : min= 6, max= 1140, per=3.12%, avg=752.65, stdev=180.35
cpu : usr=0.04%, sys=0.07%, ctx=227, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5629/0, short=0/0
lat (usec): 4=89.66%, 10=3.91%, 20=2.31%, 50=0.05%, 100=0.11%
lat (usec): 500=0.02%, 750=0.20%, 1000=0.28%
lat (msec): 2=0.28%, 4=2.43%, 10=0.18%, 20=0.02%, 750=0.04%
lat (msec): 1000=0.36%, 2000=0.16%
seqread.24: (groupid=1, jobs=1): err= 0: pid=8729
read : io=21660KiB, bw=732KiB/s, iops=178, runt= 30283msec
clat (usec): min=2, max=1071K, avg=5586.15, stdev=72519.69
bw (KiB/s) : min= 5, max= 1052, per=3.00%, avg=725.03, stdev=209.10
cpu : usr=0.03%, sys=0.08%, ctx=212, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5415/0, short=0/0
lat (usec): 4=77.21%, 10=18.58%, 20=0.24%, 50=0.02%, 100=0.04%
lat (usec): 250=0.04%, 750=0.31%, 1000=0.31%
lat (msec): 2=0.18%, 4=2.27%, 10=0.18%, 20=0.02%, 50=0.02%
lat (msec): 750=0.04%, 1000=0.35%, 2000=0.18%
seqread.25: (groupid=1, jobs=1): err= 0: pid=8730
read : io=20892KiB, bw=712KiB/s, iops=173, runt= 30043msec
clat (usec): min=2, max=1070K, avg=5749.05, stdev=74371.36
bw (KiB/s) : min= 5, max= 894, per=2.91%, avg=703.43, stdev=186.13
cpu : usr=0.03%, sys=0.10%, ctx=219, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5223/0, short=0/0
lat (usec): 4=82.50%, 10=10.38%, 20=2.89%, 50=0.10%, 100=0.08%
lat (usec): 500=0.02%, 750=0.23%, 1000=0.23%
lat (msec): 2=0.40%, 4=2.37%, 10=0.21%, 50=0.02%, 750=0.02%
lat (msec): 1000=0.33%, 2000=0.23%
seqread.26: (groupid=1, jobs=1): err= 0: pid=8731
read : io=21932KiB, bw=748KiB/s, iops=182, runt= 30003msec
clat (usec): min=2, max=1049K, avg=5470.33, stdev=72319.73
bw (KiB/s) : min= 5, max= 891, per=2.99%, avg=721.20, stdev=169.79
cpu : usr=0.01%, sys=0.09%, ctx=223, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5483/0, short=0/0
lat (usec): 4=80.01%, 10=15.70%, 20=0.16%, 100=0.07%, 250=0.02%
lat (usec): 750=0.26%, 1000=0.31%
lat (msec): 2=0.18%, 4=2.43%, 10=0.27%, 20=0.04%, 1000=0.40%
lat (msec): 2000=0.15%
seqread.27: (groupid=1, jobs=1): err= 0: pid=8732
read : io=20772KiB, bw=706KiB/s, iops=172, runt= 30127msec
clat (usec): min=2, max=1070K, avg=5799.68, stdev=74812.49
bw (KiB/s) : min= 5, max= 1019, per=2.90%, avg=700.63, stdev=199.13
cpu : usr=0.02%, sys=0.08%, ctx=212, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5193/0, short=0/0
lat (usec): 4=91.45%, 10=2.43%, 20=1.98%, 50=0.04%, 100=0.10%
lat (usec): 250=0.02%, 750=0.21%, 1000=0.21%
lat (msec): 2=0.31%, 4=2.50%, 10=0.13%, 20=0.02%, 50=0.02%
lat (msec): 750=0.02%, 1000=0.29%, 2000=0.27%
seqread.28: (groupid=1, jobs=1): err= 0: pid=8733
read : io=21044KiB, bw=714KiB/s, iops=174, runt= 30141msec
clat (usec): min=2, max=1044K, avg=5727.42, stdev=74286.86
bw (KiB/s) : min= 5, max= 901, per=2.94%, avg=709.67, stdev=184.85
cpu : usr=0.01%, sys=0.07%, ctx=216, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5261/0, short=0/0
lat (usec): 4=80.44%, 10=15.28%, 20=0.19%, 100=0.02%, 250=0.02%
lat (usec): 750=0.25%, 1000=0.42%
lat (msec): 2=0.13%, 4=2.41%, 10=0.25%, 20=0.02%, 1000=0.32%
lat (msec): 2000=0.25%
seqread.29: (groupid=1, jobs=1): err= 0: pid=8734
read : io=21812KiB, bw=741KiB/s, iops=181, runt= 30107msec
clat (usec): min=2, max=1078K, avg=5519.46, stdev=72877.90
bw (KiB/s) : min= 5, max= 900, per=3.05%, avg=737.00, stdev=162.01
cpu : usr=0.02%, sys=0.10%, ctx=221, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5453/0, short=0/0
lat (usec): 4=91.29%, 10=2.35%, 20=2.09%, 50=0.15%, 100=0.11%
lat (usec): 750=0.33%, 1000=0.31%
lat (msec): 2=0.26%, 4=2.31%, 10=0.24%, 20=0.02%, 1000=0.37%
lat (msec): 2000=0.18%
seqread.30: (groupid=1, jobs=1): err= 0: pid=8735
read : io=21812KiB, bw=740KiB/s, iops=180, runt= 30174msec
clat (usec): min=2, max=1060K, avg=5531.75, stdev=73067.76
bw (KiB/s) : min= 4, max= 1027, per=3.05%, avg=736.03, stdev=188.68
cpu : usr=0.01%, sys=0.09%, ctx=216, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5453/0, short=0/0
lat (usec): 4=81.35%, 10=14.38%, 20=0.26%, 50=0.02%, 100=0.04%
lat (usec): 250=0.02%, 750=0.22%, 1000=0.28%
lat (msec): 2=0.28%, 4=2.42%, 10=0.18%, 20=0.02%, 1000=0.29%
lat (msec): 2000=0.26%
seqread.31: (groupid=1, jobs=1): err= 0: pid=8736
read : io=20260KiB, bw=689KiB/s, iops=168, runt= 30071msec
clat (usec): min=2, max=1076K, avg=5935.35, stdev=75588.75
bw (KiB/s) : min= 4, max= 915, per=2.82%, avg=681.47, stdev=240.98
cpu : usr=0.01%, sys=0.07%, ctx=213, majf=0, minf=18
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5065/0, short=0/0
lat (usec): 4=91.45%, 10=2.11%, 20=2.15%, 50=0.04%, 100=0.08%
lat (usec): 250=0.04%, 750=0.45%, 1000=0.32%
lat (msec): 2=0.14%, 4=2.33%, 10=0.26%, 20=0.02%, 50=0.02%
lat (msec): 1000=0.38%, 2000=0.22%
seqread.32: (groupid=1, jobs=1): err= 0: pid=8737
read : io=20652KiB, bw=700KiB/s, iops=170, runt= 30210msec
clat (usec): min=2, max=1044K, avg=5849.67, stdev=75241.09
bw (KiB/s) : min= 4, max= 976, per=2.87%, avg=692.93, stdev=228.48
cpu : usr=0.03%, sys=0.07%, ctx=217, majf=0, minf=17
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued r/w: total=5163/0, short=0/0
lat (usec): 4=90.78%, 10=3.12%, 20=1.92%, 50=0.17%, 100=0.06%
lat (usec): 250=0.02%, 750=0.15%, 1000=0.27%
lat (msec): 2=0.27%, 4=2.50%, 10=0.14%, 50=0.02%, 1000=0.27%
lat (msec): 2000=0.31%
Run status group 0 (all jobs):
READ: io=14444KiB, aggrb=488KiB/s, minb=12KiB/s, maxb=17KiB/s, mint=30003msec, maxt=30298msec
Run status group 1 (all jobs):
READ: io=721324KiB, aggrb=24140KiB/s, minb=689KiB/s, maxb=795KiB/s, mint=30003msec, maxt=30598msec
Disk stats (read/write):
sdc: ios=11745/7, merge=125/9, ticks=4375842/19438, in_queue=4403535, util=99.55%
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 11:07 [RFC] cfq: adapt slice to number of processes doing I/O Corrado Zoccolo
@ 2009-09-03 13:01 ` Jeff Moyer
2009-09-03 13:07 ` Jens Axboe
` (2 more replies)
0 siblings, 3 replies; 12+ messages in thread
From: Jeff Moyer @ 2009-09-03 13:01 UTC (permalink / raw)
To: Corrado Zoccolo; +Cc: Linux-Kernel, Jens Axboe
Corrado Zoccolo <czoccolo@gmail.com> writes:
> When the number of processes performing I/O concurrently increases, a
> fixed time slice per process will cause large latencies.
> In the patch, if there are more than 3 processes performing concurrent
> I/O, we scale the time slice down proportionally.
> To safeguard sequential bandwidth, we impose a minimum time slice,
> computed from cfq_slice_idle (the idea is that cfq_slice_idle
> approximates the cost for a seek).
>
> I performed two tests, on a rotational disk:
> * 32 concurrent processes performing random reads
> ** the bandwidth is improved from 466KB/s to 477KB/s
> ** the maximum latency is reduced from 7.667s to 1.728
> * 32 concurrent processes performing sequential reads
> ** the bandwidth is reduced from 28093KB/s to 24393KB/s
> ** the maximum latency is reduced from 3.781s to 1.115s
>
> I expect numbers to be even better on SSDs, where the penalty to
> disrupt sequential read is much less.
Interesting approach. I'm not sure what the benefits will be on SSDs,
as the idling logic is disabled for them (when nonrot is set and they
support ncq). See cfq_arm_slice_timer.
> Signed-off-by: Corrado Zoccolo <czoccolo@gmail-com>
>
> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
> index fd7080e..cff4ca8 100644
> --- a/block/cfq-iosched.c
> +++ b/block/cfq-iosched.c
> @@ -306,7 +306,15 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct
> cfq_queue *cfqq)
> static inline void
> cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
> {
> - cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
> + unsigned low_slice = cfqd->cfq_slice_idle * (1 + cfq_cfqq_sync(cfqq));
> + unsigned interested_queues = cfq_class_rt(cfqq) ?
> cfqd->busy_rt_queues : cfqd->busy_queues;
Either my mailer displayed this wrong, or yours wraps lines.
> + unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
> + if (interested_queues > 3) {
> + slice *= 3;
How did you come to this magic number of 3, both for the number of
competing tasks and the multiplier for the slice time? Did you
experiment with this number at all?
> + slice /= interested_queues;
Of course you realize this could disable the idling logic completely,
right? I'll run this patch through some tests and let you know how it
goes.
Thanks!
-Jeff
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 13:01 ` Jeff Moyer
@ 2009-09-03 13:07 ` Jens Axboe
2009-09-03 16:36 ` Corrado Zoccolo
2009-09-03 15:38 ` Jeff Moyer
2009-09-03 16:26 ` Corrado Zoccolo
2 siblings, 1 reply; 12+ messages in thread
From: Jens Axboe @ 2009-09-03 13:07 UTC (permalink / raw)
To: Jeff Moyer; +Cc: Corrado Zoccolo, Linux-Kernel
On Thu, Sep 03 2009, Jeff Moyer wrote:
> Corrado Zoccolo <czoccolo@gmail.com> writes:
>
> > When the number of processes performing I/O concurrently increases, a
> > fixed time slice per process will cause large latencies.
> > In the patch, if there are more than 3 processes performing concurrent
> > I/O, we scale the time slice down proportionally.
> > To safeguard sequential bandwidth, we impose a minimum time slice,
> > computed from cfq_slice_idle (the idea is that cfq_slice_idle
> > approximates the cost for a seek).
> >
> > I performed two tests, on a rotational disk:
> > * 32 concurrent processes performing random reads
> > ** the bandwidth is improved from 466KB/s to 477KB/s
> > ** the maximum latency is reduced from 7.667s to 1.728
> > * 32 concurrent processes performing sequential reads
> > ** the bandwidth is reduced from 28093KB/s to 24393KB/s
> > ** the maximum latency is reduced from 3.781s to 1.115s
> >
> > I expect numbers to be even better on SSDs, where the penalty to
> > disrupt sequential read is much less.
>
> Interesting approach. I'm not sure what the benefits will be on SSDs,
> as the idling logic is disabled for them (when nonrot is set and they
> support ncq). See cfq_arm_slice_timer.
Also, the problem with scaling the slice a lot is that throughput has a
tendency to fall off a cliff at some point. Have you tried benchmarking
buffered writes with reads?
--
Jens Axboe
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 13:01 ` Jeff Moyer
2009-09-03 13:07 ` Jens Axboe
@ 2009-09-03 15:38 ` Jeff Moyer
2009-09-03 16:47 ` Corrado Zoccolo
2009-09-03 16:26 ` Corrado Zoccolo
2 siblings, 1 reply; 12+ messages in thread
From: Jeff Moyer @ 2009-09-03 15:38 UTC (permalink / raw)
To: Corrado Zoccolo; +Cc: Linux-Kernel, Jens Axboe
Jeff Moyer <jmoyer@redhat.com> writes:
> Corrado Zoccolo <czoccolo@gmail.com> writes:
>
>> When the number of processes performing I/O concurrently increases, a
>> fixed time slice per process will cause large latencies.
>> In the patch, if there are more than 3 processes performing concurrent
>> I/O, we scale the time slice down proportionally.
>> To safeguard sequential bandwidth, we impose a minimum time slice,
>> computed from cfq_slice_idle (the idea is that cfq_slice_idle
>> approximates the cost for a seek).
>>
>> I performed two tests, on a rotational disk:
>> * 32 concurrent processes performing random reads
>> ** the bandwidth is improved from 466KB/s to 477KB/s
>> ** the maximum latency is reduced from 7.667s to 1.728
>> * 32 concurrent processes performing sequential reads
>> ** the bandwidth is reduced from 28093KB/s to 24393KB/s
>> ** the maximum latency is reduced from 3.781s to 1.115s
>>
>> I expect numbers to be even better on SSDs, where the penalty to
>> disrupt sequential read is much less.
>
> Interesting approach. I'm not sure what the benefits will be on SSDs,
> as the idling logic is disabled for them (when nonrot is set and they
> support ncq). See cfq_arm_slice_timer.
>
>> Signed-off-by: Corrado Zoccolo <czoccolo@gmail-com>
>>
>> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
>> index fd7080e..cff4ca8 100644
>> --- a/block/cfq-iosched.c
>> +++ b/block/cfq-iosched.c
>> @@ -306,7 +306,15 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct
>> cfq_queue *cfqq)
>> static inline void
>> cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
>> {
>> - cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
>> + unsigned low_slice = cfqd->cfq_slice_idle * (1 + cfq_cfqq_sync(cfqq));
>> + unsigned interested_queues = cfq_class_rt(cfqq) ?
>> cfqd->busy_rt_queues : cfqd->busy_queues;
>
> Either my mailer displayed this wrong, or yours wraps lines.
>
>> + unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
>> + if (interested_queues > 3) {
>> + slice *= 3;
>
> How did you come to this magic number of 3, both for the number of
> competing tasks and the multiplier for the slice time? Did you
> experiment with this number at all?
>
>> + slice /= interested_queues;
>
> Of course you realize this could disable the idling logic completely,
> right? I'll run this patch through some tests and let you know how it
> goes.
I missed that you updated the slice end based on a max of slice and
low_slice. Sorry about that.
This patch does not fare well when judging fairness between processes.
I have several fio jobs that generate read workloads, and I try to
figure out whether the I/O scheduler is providing fairness based on the
I/O priorities of the processes. With your patch applied, we get the
following results:
total priority: 880
total data transferred: 1045920
class prio ideal xferred %diff
be 0 213938 352500 64
be 1 190167 193012 1
be 2 166396 123380 -26
be 3 142625 86260 -40
be 4 118854 62964 -48
be 5 95083 40180 -58
be 6 71312 74484 4
be 7 47541 113140 137
Class and prio should be self-explanatory. ideal is my cooked up
version of the ideal number of bytes the given priority should have
transferred based on the total data transferred and all processes
weighted by priority competing for the disk. xferred is the actual
amount of data transferred, and %diff is the difference between those
last two columns.
Notice that best effort priority 7 managed to transfer more data than be
prio 3. That's bad. Now, let's look at 8 processes all at the same
priority level:
total priority: 800
total data transferred: 1071036
class prio ideal xferred %diff
be 4 133879 222452 66
be 4 133879 243188 81
be 4 133879 187380 39
be 4 133879 42512 -69
be 4 133879 39156 -71
be 4 133879 47604 -65
be 4 133879 37364 -73
be 4 133879 251380 87
Hmm. That doesn't look good.
For comparison, here is the output from the vanilla kernel for those two
runs:
total priority: 880
total data transferred: 954272
class prio ideal xferred %diff
be 0 195192 229108 17
be 1 173504 202740 16
be 2 151816 156660 3
be 3 130128 152052 16
be 4 108440 91636 -16
be 5 86752 64244 -26
be 6 65064 34292 -48
be 7 43376 23540 -46
total priority: 800
total data transferred: 887264
class prio ideal xferred %diff
be 4 110908 124404 12
be 4 110908 123380 11
be 4 110908 118004 6
be 4 110908 113396 2
be 4 110908 107252 -4
be 4 110908 98356 -12
be 4 110908 96244 -14
be 4 110908 106228 -5
It's worth noting that the overall throughput went up in the patched
kernel for this second case. However, if we care at all about the
notion of I/O priorities, I think your patch needs more work.
Cheers,
Jeff
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 13:01 ` Jeff Moyer
2009-09-03 13:07 ` Jens Axboe
2009-09-03 15:38 ` Jeff Moyer
@ 2009-09-03 16:26 ` Corrado Zoccolo
2009-09-03 18:29 ` Jeff Moyer
2 siblings, 1 reply; 12+ messages in thread
From: Corrado Zoccolo @ 2009-09-03 16:26 UTC (permalink / raw)
To: Jeff Moyer; +Cc: Linux-Kernel, Jens Axboe
Hi Jeff,
On Thu, Sep 3, 2009 at 3:01 PM, Jeff Moyer<jmoyer@redhat.com> wrote:
> Corrado Zoccolo <czoccolo@gmail.com> writes:
>
>> When the number of processes performing I/O concurrently increases, a
>> fixed time slice per process will cause large latencies.
>> In the patch, if there are more than 3 processes performing concurrent
>> I/O, we scale the time slice down proportionally.
>> To safeguard sequential bandwidth, we impose a minimum time slice,
>> computed from cfq_slice_idle (the idea is that cfq_slice_idle
>> approximates the cost for a seek).
>>
>> I performed two tests, on a rotational disk:
>> * 32 concurrent processes performing random reads
>> ** the bandwidth is improved from 466KB/s to 477KB/s
>> ** the maximum latency is reduced from 7.667s to 1.728
>> * 32 concurrent processes performing sequential reads
>> ** the bandwidth is reduced from 28093KB/s to 24393KB/s
>> ** the maximum latency is reduced from 3.781s to 1.115s
>>
>> I expect numbers to be even better on SSDs, where the penalty to
>> disrupt sequential read is much less.
>
> Interesting approach. I'm not sure what the benefits will be on SSDs,
> as the idling logic is disabled for them (when nonrot is set and they
> support ncq). See cfq_arm_slice_timer.
>
Yes, I know. Unfortunately, not all SSD devices have ncq.
Moreover, idling is disabled only for seeky processes, and the current
threshold to identify seeky processes is so high, that it rarely kicks
in.
>> Signed-off-by: Corrado Zoccolo <czoccolo@gmail-com>
>>
>> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
>> index fd7080e..cff4ca8 100644
>> --- a/block/cfq-iosched.c
>> +++ b/block/cfq-iosched.c
>> @@ -306,7 +306,15 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct
>> cfq_queue *cfqq)
>> static inline void
>> cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
>> {
>> - cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
>> + unsigned low_slice = cfqd->cfq_slice_idle * (1 + cfq_cfqq_sync(cfqq));
>> + unsigned interested_queues = cfq_class_rt(cfqq) ?
>> cfqd->busy_rt_queues : cfqd->busy_queues;
>
> Either my mailer displayed this wrong, or yours wraps lines.
>
>> + unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
>> + if (interested_queues > 3) {
>> + slice *= 3;
>
> How did you come to this magic number of 3, both for the number of
> competing tasks and the multiplier for the slice time? Did you
> experiment with this number at all?
>
The number is quickly explained. The base slice is 100ms, and on the
mailing list it was mentioned that the latency becomes annoing for an
user when it is above 300ms.
This means that, up to 3 processes, the current thresholds are good,
and for more we have to scale.
This is good, because we don't change the behaviour unless we have
many competing processes.
>> + slice /= interested_queues;
>
> Of course you realize this could disable the idling logic completely,
> right? I'll run this patch through some tests and let you know how it
> goes.
>
You already found that this is not the case.
Thanks for the interest
Corrado
> Thanks!
>
> -Jeff
>
--
__________________________________________________________________________
dott. Corrado Zoccolo mailto:czoccolo@gmail.com
PhD - Department of Computer Science - University of Pisa, Italy
--------------------------------------------------------------------------
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 13:07 ` Jens Axboe
@ 2009-09-03 16:36 ` Corrado Zoccolo
2009-09-05 16:16 ` Jens Axboe
0 siblings, 1 reply; 12+ messages in thread
From: Corrado Zoccolo @ 2009-09-03 16:36 UTC (permalink / raw)
To: Jens Axboe; +Cc: Jeff Moyer, Linux-Kernel
Hi Jens,
On Thu, Sep 3, 2009 at 3:07 PM, Jens Axboe<jens.axboe@oracle.com> wrote:
> On Thu, Sep 03 2009, Jeff Moyer wrote:
>> Corrado Zoccolo <czoccolo@gmail.com> writes:
>>
>> > When the number of processes performing I/O concurrently increases, a
>> > fixed time slice per process will cause large latencies.
>> > In the patch, if there are more than 3 processes performing concurrent
>> > I/O, we scale the time slice down proportionally.
>> > To safeguard sequential bandwidth, we impose a minimum time slice,
>> > computed from cfq_slice_idle (the idea is that cfq_slice_idle
>> > approximates the cost for a seek).
>> >
>> > I performed two tests, on a rotational disk:
>> > * 32 concurrent processes performing random reads
>> > ** the bandwidth is improved from 466KB/s to 477KB/s
>> > ** the maximum latency is reduced from 7.667s to 1.728
>> > * 32 concurrent processes performing sequential reads
>> > ** the bandwidth is reduced from 28093KB/s to 24393KB/s
>> > ** the maximum latency is reduced from 3.781s to 1.115s
>> >
>> > I expect numbers to be even better on SSDs, where the penalty to
>> > disrupt sequential read is much less.
>>
>> Interesting approach. I'm not sure what the benefits will be on SSDs,
>> as the idling logic is disabled for them (when nonrot is set and they
>> support ncq). See cfq_arm_slice_timer.
>
> Also, the problem with scaling the slice a lot is that throughput has a
> tendency to fall off a cliff at some point.
This is the reason that I have a minimum slice. It is already reached
for 32 processes as in my example, so the throughput drop is at most
20%.
Currently it is computed as 2*slice_idle for sync, and 1*slice_idle
for async queues.
I think this causes the leveling of data transferred regardless of
priorities. I'll cook up a formula to better scale also the minimum
slice according to priority, to fix this issue.
> Have you tried benchmarking
> buffered writes with reads?
Yes. I used that workload for benchmarks while tuning the patch.
Adding async writes doesn't change the results, mostly because cfq
preempts async queues when sync queues have new requests, and with
many readers, there are always plenty of incoming reads. Writes almost
have no chance to happen.
Corrado
>
> --
> Jens Axboe
>
>
--
__________________________________________________________________________
dott. Corrado Zoccolo mailto:czoccolo@gmail.com
PhD - Department of Computer Science - University of Pisa, Italy
--------------------------------------------------------------------------
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 15:38 ` Jeff Moyer
@ 2009-09-03 16:47 ` Corrado Zoccolo
2009-09-03 17:16 ` Jeff Moyer
0 siblings, 1 reply; 12+ messages in thread
From: Corrado Zoccolo @ 2009-09-03 16:47 UTC (permalink / raw)
To: Jeff Moyer; +Cc: Linux-Kernel, Jens Axboe
Hi Jeff,
can you share the benchmark?
I think I have to fix the min slice to consider priority, too, to
respect the priorities when there are many processes.
For the fairness at a single priority level, my tests show that
fairness is improved with the patches (comparing minimum and maximum
bandwidth for a set of 32 processes):
Original:
Run status group 0 (all jobs):
READ: io=14192KiB, aggrb=480KiB/s, minb=7KiB/s, maxb=20KiB/s,
mint=30001msec, maxt=30258msec
Run status group 1 (all jobs):
READ: io=829292KiB, aggrb=27816KiB/s, minb=723KiB/s,
maxb=1004KiB/s, mint=30004msec, maxt=30529msec
Adaptive:
Run status group 0 (all jobs):
READ: io=14444KiB, aggrb=488KiB/s, minb=12KiB/s, maxb=17KiB/s,
mint=30003msec, maxt=30298msec
Run status group 1 (all jobs):
READ: io=721324KiB, aggrb=24140KiB/s, minb=689KiB/s, maxb=795KiB/s,
mint=30003msec, maxt=30598msec
Are you using random think times? This could explain the discrepancy.
Corrado
On Thu, Sep 3, 2009 at 5:38 PM, Jeff Moyer<jmoyer@redhat.com> wrote:
> Jeff Moyer <jmoyer@redhat.com> writes:
>
>> Corrado Zoccolo <czoccolo@gmail.com> writes:
>>
>>> When the number of processes performing I/O concurrently increases, a
>>> fixed time slice per process will cause large latencies.
>>> In the patch, if there are more than 3 processes performing concurrent
>>> I/O, we scale the time slice down proportionally.
>>> To safeguard sequential bandwidth, we impose a minimum time slice,
>>> computed from cfq_slice_idle (the idea is that cfq_slice_idle
>>> approximates the cost for a seek).
>>>
>>> I performed two tests, on a rotational disk:
>>> * 32 concurrent processes performing random reads
>>> ** the bandwidth is improved from 466KB/s to 477KB/s
>>> ** the maximum latency is reduced from 7.667s to 1.728
>>> * 32 concurrent processes performing sequential reads
>>> ** the bandwidth is reduced from 28093KB/s to 24393KB/s
>>> ** the maximum latency is reduced from 3.781s to 1.115s
>>>
>>> I expect numbers to be even better on SSDs, where the penalty to
>>> disrupt sequential read is much less.
>>
>> Interesting approach. I'm not sure what the benefits will be on SSDs,
>> as the idling logic is disabled for them (when nonrot is set and they
>> support ncq). See cfq_arm_slice_timer.
>>
>>> Signed-off-by: Corrado Zoccolo <czoccolo@gmail-com>
>>>
>>> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
>>> index fd7080e..cff4ca8 100644
>>> --- a/block/cfq-iosched.c
>>> +++ b/block/cfq-iosched.c
>>> @@ -306,7 +306,15 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct
>>> cfq_queue *cfqq)
>>> static inline void
>>> cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
>>> {
>>> - cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
>>> + unsigned low_slice = cfqd->cfq_slice_idle * (1 + cfq_cfqq_sync(cfqq));
>>> + unsigned interested_queues = cfq_class_rt(cfqq) ?
>>> cfqd->busy_rt_queues : cfqd->busy_queues;
>>
>> Either my mailer displayed this wrong, or yours wraps lines.
>>
>>> + unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
>>> + if (interested_queues > 3) {
>>> + slice *= 3;
>>
>> How did you come to this magic number of 3, both for the number of
>> competing tasks and the multiplier for the slice time? Did you
>> experiment with this number at all?
>>
>>> + slice /= interested_queues;
>>
>> Of course you realize this could disable the idling logic completely,
>> right? I'll run this patch through some tests and let you know how it
>> goes.
>
> I missed that you updated the slice end based on a max of slice and
> low_slice. Sorry about that.
>
> This patch does not fare well when judging fairness between processes.
> I have several fio jobs that generate read workloads, and I try to
> figure out whether the I/O scheduler is providing fairness based on the
> I/O priorities of the processes. With your patch applied, we get the
> following results:
>
> total priority: 880
> total data transferred: 1045920
> class prio ideal xferred %diff
> be 0 213938 352500 64
> be 1 190167 193012 1
> be 2 166396 123380 -26
> be 3 142625 86260 -40
> be 4 118854 62964 -48
> be 5 95083 40180 -58
> be 6 71312 74484 4
> be 7 47541 113140 137
>
> Class and prio should be self-explanatory. ideal is my cooked up
> version of the ideal number of bytes the given priority should have
> transferred based on the total data transferred and all processes
> weighted by priority competing for the disk. xferred is the actual
> amount of data transferred, and %diff is the difference between those
> last two columns.
>
> Notice that best effort priority 7 managed to transfer more data than be
> prio 3. That's bad. Now, let's look at 8 processes all at the same
> priority level:
>
> total priority: 800
> total data transferred: 1071036
> class prio ideal xferred %diff
> be 4 133879 222452 66
> be 4 133879 243188 81
> be 4 133879 187380 39
> be 4 133879 42512 -69
> be 4 133879 39156 -71
> be 4 133879 47604 -65
> be 4 133879 37364 -73
> be 4 133879 251380 87
>
> Hmm. That doesn't look good.
>
> For comparison, here is the output from the vanilla kernel for those two
> runs:
>
> total priority: 880
> total data transferred: 954272
> class prio ideal xferred %diff
> be 0 195192 229108 17
> be 1 173504 202740 16
> be 2 151816 156660 3
> be 3 130128 152052 16
> be 4 108440 91636 -16
> be 5 86752 64244 -26
> be 6 65064 34292 -48
> be 7 43376 23540 -46
>
> total priority: 800
> total data transferred: 887264
> class prio ideal xferred %diff
> be 4 110908 124404 12
> be 4 110908 123380 11
> be 4 110908 118004 6
> be 4 110908 113396 2
> be 4 110908 107252 -4
> be 4 110908 98356 -12
> be 4 110908 96244 -14
> be 4 110908 106228 -5
>
> It's worth noting that the overall throughput went up in the patched
> kernel for this second case. However, if we care at all about the
> notion of I/O priorities, I think your patch needs more work.
>
> Cheers,
> Jeff
>
--
__________________________________________________________________________
dott. Corrado Zoccolo mailto:czoccolo@gmail.com
PhD - Department of Computer Science - University of Pisa, Italy
--------------------------------------------------------------------------
The self-confidence of a warrior is not the self-confidence of the average
man. The average man seeks certainty in the eyes of the onlooker and calls
that self-confidence. The warrior seeks impeccability in his own eyes and
calls that humbleness.
Tales of Power - C. Castaneda
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 16:47 ` Corrado Zoccolo
@ 2009-09-03 17:16 ` Jeff Moyer
2009-09-04 7:22 ` Corrado Zoccolo
0 siblings, 1 reply; 12+ messages in thread
From: Jeff Moyer @ 2009-09-03 17:16 UTC (permalink / raw)
To: Corrado Zoccolo; +Cc: Linux-Kernel, Jens Axboe
Corrado Zoccolo <czoccolo@gmail.com> writes:
> Hi Jeff,
> can you share the benchmark?
Of course, how miserly of me!
http://people.redhat.com/jmoyer/cfq-regression-tests-0.0.1.tar.gz
> I think I have to fix the min slice to consider priority, too, to
> respect the priorities when there are many processes.
>
> For the fairness at a single priority level, my tests show that
> fairness is improved with the patches (comparing minimum and maximum
> bandwidth for a set of 32 processes):
>
> Original:
> Run status group 0 (all jobs):
> READ: io=14192KiB, aggrb=480KiB/s, minb=7KiB/s, maxb=20KiB/s,
> mint=30001msec, maxt=30258msec
>
> Run status group 1 (all jobs):
> READ: io=829292KiB, aggrb=27816KiB/s, minb=723KiB/s,
> maxb=1004KiB/s, mint=30004msec, maxt=30529msec
>
> Adaptive:
> Run status group 0 (all jobs):
> READ: io=14444KiB, aggrb=488KiB/s, minb=12KiB/s, maxb=17KiB/s,
> mint=30003msec, maxt=30298msec
>
> Run status group 1 (all jobs):
> READ: io=721324KiB, aggrb=24140KiB/s, minb=689KiB/s, maxb=795KiB/s,
> mint=30003msec, maxt=30598msec
>
> Are you using random think times? This could explain the discrepancy.
No, it's just a sync read benchmark. It's the be4-x-8.fio job file in
the tarball mentioned above. Note that the run-time is only 10 seconds,
so maybe that's not enough to get accurate data? If you try increasing
it, be careful that you don't read in the entire file and wrap back
around, as this is a buffered read test, that will skew the results.
Cheers,
Jeff
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 16:26 ` Corrado Zoccolo
@ 2009-09-03 18:29 ` Jeff Moyer
0 siblings, 0 replies; 12+ messages in thread
From: Jeff Moyer @ 2009-09-03 18:29 UTC (permalink / raw)
To: Corrado Zoccolo; +Cc: Linux-Kernel, Jens Axboe
Corrado Zoccolo <czoccolo@gmail.com> writes:
> Hi Jeff,
>
>> How did you come to this magic number of 3, both for the number of
>> competing tasks and the multiplier for the slice time? Did you
>> experiment with this number at all?
>
> The number is quickly explained. The base slice is 100ms, and on the
> mailing list it was mentioned that the latency becomes annoing for an
> user when it is above 300ms. This means that, up to 3 processes, the
> current thresholds are good, and for more we have to scale. This is
> good, because we don't change the behaviour unless we have many
> competing processes.
OK, then in your next patch, could you make this more clear? Maybe
define a LATENCY_MAX and derive the number of processes from that,
instead of assuming that the base slice will always and forever be
100ms?
Cheers,
Jeff
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 17:16 ` Jeff Moyer
@ 2009-09-04 7:22 ` Corrado Zoccolo
0 siblings, 0 replies; 12+ messages in thread
From: Corrado Zoccolo @ 2009-09-04 7:22 UTC (permalink / raw)
To: Jeff Moyer; +Cc: Linux-Kernel, Jens Axboe
Hi Jeff,
On Thu, Sep 3, 2009 at 7:16 PM, Jeff Moyer<jmoyer@redhat.com> wrote:
> Corrado Zoccolo <czoccolo@gmail.com> writes:
>
>> Hi Jeff,
>> can you share the benchmark?
>
> Of course, how miserly of me!
>
> http://people.redhat.com/jmoyer/cfq-regression-tests-0.0.1.tar.gz
>
>> I think I have to fix the min slice to consider priority, too, to
>> respect the priorities when there are many processes.
>>
>> For the fairness at a single priority level, my tests show that
>> fairness is improved with the patches (comparing minimum and maximum
>> bandwidth for a set of 32 processes):
>>
>> Original:
>> Run status group 0 (all jobs):
>> READ: io=14192KiB, aggrb=480KiB/s, minb=7KiB/s, maxb=20KiB/s,
>> mint=30001msec, maxt=30258msec
>>
>> Run status group 1 (all jobs):
>> READ: io=829292KiB, aggrb=27816KiB/s, minb=723KiB/s,
>> maxb=1004KiB/s, mint=30004msec, maxt=30529msec
>>
>> Adaptive:
>> Run status group 0 (all jobs):
>> READ: io=14444KiB, aggrb=488KiB/s, minb=12KiB/s, maxb=17KiB/s,
>> mint=30003msec, maxt=30298msec
>>
>> Run status group 1 (all jobs):
>> READ: io=721324KiB, aggrb=24140KiB/s, minb=689KiB/s, maxb=795KiB/s,
>> mint=30003msec, maxt=30598msec
>>
>> Are you using random think times? This could explain the discrepancy.
>
> No, it's just a sync read benchmark. It's the be4-x-8.fio job file in
> the tarball mentioned above. Note that the run-time is only 10 seconds,
> so maybe that's not enough to get accurate data? If you try increasing
> it, be careful that you don't read in the entire file and wrap back
> around, as this is a buffered read test, that will skew the results.
>
I could reproduce the skew on my setup, but with a different pattern than yours.
I think the reason is, in my case, that, since the files are very
large (and the fs partially filled), they are allocated on places with
different read speed.
Even cutting file size by half, in order to fit the benchmark on my disk, I get:
[root@et2 cfq-regression-tests]# for i in `seq 1 8`; do sync; echo 3 >
/proc/sys/vm/drop_caches; dd if=/media/lacie/testfile$i of=/dev/null
bs=1M count=30; done
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 0,978737 s, 32,1 MB/s
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 0,971206 s, 32,4 MB/s
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 1,04638 s, 30,1 MB/s
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 1,18266 s, 26,6 MB/s
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 1,32781 s, 23,7 MB/s
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 1,33056 s, 23,6 MB/s
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 1,71092 s, 18,4 MB/s
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 1,56802 s, 20,1 MB/s
Original cfq gives:
/home/corrado/cfq-regression-tests/cfq_orig/be4-x-8.fio
/home/corrado/cfq-regression-tests
total priority: 800
total data transferred: 179072
class prio ideal xferred %diff
be 4 22384 28148 25
be 4 22384 29428 31
be 4 22384 27380 22
be 4 22384 23524 5
be 4 22384 20964 -7
be 4 22384 22004 -2
be 4 22384 12020 -47
be 4 22384 15604 -31
patched cfq is a bit more skewed, but follows the same pattern:
/home/corrado/cfq-regression-tests/cfq_adapt/be4-x-8.fio
/home/corrado/cfq-regression-tests
total priority: 800
total data transferred: 204088
class prio ideal xferred %diff
be 4 25511 34020 33
be 4 25511 34020 33
be 4 25511 32484 27
be 4 25511 28388 11
be 4 25511 24628 -4
be 4 25511 24940 -3
be 4 25511 10276 -60
be 4 25511 15332 -40
/home/corrado/cfq-regression-tests
I think it is more skewed because the seek time, with a smaller slice,
impacts more on the amount of transferred data for slower transfer
rates, as shown below:
The amount of data transferred per slice is given by:
(1) slice = seek + amount * rate
if we multiply slice by alpha < 1, we can transfer a different amount
of data (amount1):
(2) alpha* slice = seek + amount1 * rate
substituting slice from (1) into (2), to express amount1 as a function
of amount:
alpha* (seek + amount *rate) = seek + amount1 *rate
amount1 = amount - (1-alpha)* seek / rate
where we see that decreasing rate, increases the impact of seek time.
Your data, though, seems very different from mine, in fact vanilla
doesn't show the same difference.
Can you try running vanilla with smaller base slices, to see if the
same effect can be seen there?
echo 15 > /sys/block/sdb/queue/iosched/slice_async
echo 37 > /sys/block/sdb/queue/iosched/slice_sync
If this doesn't exhibit the same pattern, then maybe the problem is
that the number of busy queues observed by the processes is
consistently different, resulting in unequal time slices.
In that case, I'll add some averaging.
I also added a "sync; echo 3 > /proc/sys/vm/drop_caches" in ioprio.sh
before each test. It is not strictly necessary, because you already
unmount the test FS, but helps reducing the pressure on VM, so that
doing I/O on our files doesn't cause dependent I/O on others as well
to free up space.
Thanks,
Corrado
> Cheers,
> Jeff
>
--
__________________________________________________________________________
dott. Corrado Zoccolo mailto:czoccolo@gmail.com
PhD - Department of Computer Science - University of Pisa, Italy
--------------------------------------------------------------------------
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-03 16:36 ` Corrado Zoccolo
@ 2009-09-05 16:16 ` Jens Axboe
2009-09-07 14:57 ` Corrado Zoccolo
0 siblings, 1 reply; 12+ messages in thread
From: Jens Axboe @ 2009-09-05 16:16 UTC (permalink / raw)
To: Corrado Zoccolo; +Cc: Jeff Moyer, Linux-Kernel
On Thu, Sep 03 2009, Corrado Zoccolo wrote:
> Hi Jens,
> On Thu, Sep 3, 2009 at 3:07 PM, Jens Axboe<jens.axboe@oracle.com> wrote:
> > On Thu, Sep 03 2009, Jeff Moyer wrote:
> >> Corrado Zoccolo <czoccolo@gmail.com> writes:
> >>
> >> > When the number of processes performing I/O concurrently increases, a
> >> > fixed time slice per process will cause large latencies.
> >> > In the patch, if there are more than 3 processes performing concurrent
> >> > I/O, we scale the time slice down proportionally.
> >> > To safeguard sequential bandwidth, we impose a minimum time slice,
> >> > computed from cfq_slice_idle (the idea is that cfq_slice_idle
> >> > approximates the cost for a seek).
> >> >
> >> > I performed two tests, on a rotational disk:
> >> > * 32 concurrent processes performing random reads
> >> > ** the bandwidth is improved from 466KB/s to 477KB/s
> >> > ** the maximum latency is reduced from 7.667s to 1.728
> >> > * 32 concurrent processes performing sequential reads
> >> > ** the bandwidth is reduced from 28093KB/s to 24393KB/s
> >> > ** the maximum latency is reduced from 3.781s to 1.115s
> >> >
> >> > I expect numbers to be even better on SSDs, where the penalty to
> >> > disrupt sequential read is much less.
> >>
> >> Interesting approach. I'm not sure what the benefits will be on SSDs,
> >> as the idling logic is disabled for them (when nonrot is set and they
> >> support ncq). See cfq_arm_slice_timer.
> >
> > Also, the problem with scaling the slice a lot is that throughput has a
> > tendency to fall off a cliff at some point.
>
> This is the reason that I have a minimum slice. It is already reached
> for 32 processes as in my example, so the throughput drop is at most
> 20%.
> Currently it is computed as 2*slice_idle for sync, and 1*slice_idle
> for async queues.
> I think this causes the leveling of data transferred regardless of
> priorities. I'll cook up a formula to better scale also the minimum
> slice according to priority, to fix this issue.
For your case, it may be different for other hardware. But I think the
approach is sane to some degree, it'll require more work though. One
problem is that the count of busy queues will fluctuate a lot for sync
IO, so you'll have fairness issues. The number of potentially interested
processes needs to be a rolling average of some sort, not just looking
at ->busy_queues.
> > Have you tried benchmarking
> > buffered writes with reads?
>
> Yes. I used that workload for benchmarks while tuning the patch.
> Adding async writes doesn't change the results, mostly because cfq
> preempts async queues when sync queues have new requests, and with
> many readers, there are always plenty of incoming reads. Writes almost
> have no chance to happen.
OK, it should not, if the slice start logic is working. Just wanted to
make sure :-)
--
Jens Axboe
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [RFC] cfq: adapt slice to number of processes doing I/O
2009-09-05 16:16 ` Jens Axboe
@ 2009-09-07 14:57 ` Corrado Zoccolo
0 siblings, 0 replies; 12+ messages in thread
From: Corrado Zoccolo @ 2009-09-07 14:57 UTC (permalink / raw)
To: Jens Axboe; +Cc: Jeff Moyer, Linux-Kernel
Hi,
On Sat, Sep 05 2009 18:16:35, Jens Axboe wrote:
: > On Thu, Sep 03 2009, Corrado Zoccolo wrote:
> > Hi Jens,
> >
> > [snip]
> >
> > This is the reason that I have a minimum slice. It is already reached
> > for 32 processes as in my example, so the throughput drop is at most
> > 20%.
> > Currently it is computed as 2*slice_idle for sync, and 1*slice_idle
> > for async queues.
> > I think this causes the leveling of data transferred regardless of
> > priorities. I'll cook up a formula to better scale also the minimum
> > slice according to priority, to fix this issue.
>
> For your case, it may be different for other hardware. But I think the
> approach is sane to some degree, it'll require more work though. One
> problem is that the count of busy queues will fluctuate a lot for sync
> IO, so you'll have fairness issues. The number of potentially interested
> processes needs to be a rolling average of some sort, not just looking
> at ->busy_queues.
here is the new patch, with the improved formula for minimum time slice,
and with rolling average for number of processes doing I/O.
The average is computed in such a way that it can quickly follow
sudden increases (to keep latency low),
and decrease slowly (to have fairness in spite of rapid decreases of this value).
I added 2 tunables, for testing:
* the preferred latency, used to compute the threshold on number of processes (that was hardcoded in previous version).
* the divisor in the averaging formula, to alter the weights (weights are computed as 1/d and (d-1)/d).
they can be removed if/when we find the optimal values.
Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com>
---
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fd7080e..b200b13 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -27,6 +27,8 @@ static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
+static int cfq_preferred_latency = HZ * 3/10; /* 300 ms */
+static int cfq_queue_hist_divisor = 4;
/*
* offset from end of service tree
@@ -134,11 +136,13 @@ struct cfq_data {
struct rb_root prio_trees[CFQ_PRIO_LISTS];
unsigned int busy_queues;
+ unsigned int busy_queues_avg;
/*
* Used to track any pending rt requests so we can pre-empt current
* non-RT cfqq in service when this value is non-zero.
*/
unsigned int busy_rt_queues;
+ unsigned int busy_rt_queues_avg;
int rq_in_driver;
int sync_flight;
@@ -178,6 +182,8 @@ struct cfq_data {
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
+ unsigned int cfq_preferred_latency;
+ unsigned int cfq_queue_hist_divisor;
struct list_head cic_list;
@@ -303,10 +309,37 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
}
+static inline unsigned
+cfq_get_interested_queues(struct cfq_data *cfqd, bool rt) {
+ unsigned min_q, max_q;
+ unsigned mult = cfqd->cfq_queue_hist_divisor -1;
+ unsigned round = cfqd->cfq_queue_hist_divisor / 2;
+ if (rt) {
+ min_q = min(cfqd->busy_rt_queues_avg, cfqd->busy_rt_queues);
+ max_q = max(cfqd->busy_rt_queues_avg, cfqd->busy_rt_queues);
+ cfqd->busy_rt_queues_avg = (mult * max_q + min_q + round) / cfqd->cfq_queue_hist_divisor;
+ return cfqd->busy_rt_queues_avg;
+ } else {
+ min_q = min(cfqd->busy_queues_avg, cfqd->busy_queues);
+ max_q = max(cfqd->busy_queues_avg, cfqd->busy_queues);
+ cfqd->busy_queues_avg = (mult * max_q + min_q + round) / cfqd->cfq_queue_hist_divisor;
+ return cfqd->busy_queues_avg;
+ }
+}
+
static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
+ unsigned process_threshold = cfqd->cfq_preferred_latency / cfqd->cfq_slice[1];
+ unsigned interested_queues = cfq_get_interested_queues(cfqd, cfq_class_rt(cfqq));
+ unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
+
+ if (interested_queues > process_threshold) {
+ unsigned low_slice = min(slice, 2 * slice * cfqd->cfq_slice_idle / cfqd->cfq_slice[1]);
+ slice = max(slice * process_threshold / interested_queues, low_slice);
+ }
+
+ cfqq->slice_end = jiffies + slice;
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
}
@@ -2494,6 +2527,8 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->cfq_preferred_latency = cfq_preferred_latency;
+ cfqd->cfq_queue_hist_divisor = cfq_queue_hist_divisor;
cfqd->hw_tag = 1;
return cfqd;
@@ -2563,6 +2598,8 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_preferred_latency_show, cfqd->cfq_preferred_latency, 1);
+SHOW_FUNCTION(cfq_queue_hist_divisor_show, cfqd->cfq_queue_hist_divisor, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -2594,6 +2631,10 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
+
+STORE_FUNCTION(cfq_preferred_latency_store, &cfqd->cfq_preferred_latency, 1, 1000, 1);
+STORE_FUNCTION(cfq_queue_hist_divisor_store, &cfqd->cfq_queue_hist_divisor, 1, 100, 0);
+
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
@@ -2609,6 +2650,8 @@ static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
+ CFQ_ATTR(preferred_latency),
+ CFQ_ATTR(queue_hist_divisor),
__ATTR_NULL
};
^ permalink raw reply related [flat|nested] 12+ messages in thread
end of thread, other threads:[~2009-09-07 14:57 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-09-03 11:07 [RFC] cfq: adapt slice to number of processes doing I/O Corrado Zoccolo
2009-09-03 13:01 ` Jeff Moyer
2009-09-03 13:07 ` Jens Axboe
2009-09-03 16:36 ` Corrado Zoccolo
2009-09-05 16:16 ` Jens Axboe
2009-09-07 14:57 ` Corrado Zoccolo
2009-09-03 15:38 ` Jeff Moyer
2009-09-03 16:47 ` Corrado Zoccolo
2009-09-03 17:16 ` Jeff Moyer
2009-09-04 7:22 ` Corrado Zoccolo
2009-09-03 16:26 ` Corrado Zoccolo
2009-09-03 18:29 ` Jeff Moyer
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).