Hi:
I have a ceph cluster with 19 ssds as cache tier. Replication is 3. 100+ osd as
backend storage.
I make some performance tests use fio follow those steps
rbd create liuliangtest -p vms –size 20G
rbd map -p vms liuliangtest
mkfs.ext4 /dev/rbd0
mount /dev/rbd0 /mnt
The test command and results are as follows. My doubt is with different
ioengine. Other options are same. The iops with libaio is 7971 but psync is
only 299. Is this correct ?why?
[root@iaas01 mnt]# fio --bs=4k --ioengine=libaio --direct=1 --rw=randwrite
--runtime=60 --group_reporting --name=fio-write --size=1G
--filename=/mnt/test.db -numjobs=32 --iodepth=1
fio-write: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T)
4096B-4096B, ioengine=libaio, iodepth=1
...
fio-3.1
Starting 32 processes
Jobs: 32 (f=32): [w(32)][100.0%][r=0KiB/s,w=30.2MiB/s][r=0,w=7731 IOPS][eta
00m:00s]
fio-write: (groupid=0, jobs=32): err= 0: pid=3357231: Tue Dec 17 22:06:11 2019
write: IOPS=7971, BW=31.1MiB/s (32.7MB/s)(1869MiB/60012msec)
slat (usec): min=5, max=5723, avg=17.87, stdev=39.82
clat (usec): min=1149, max=428306, avg=3991.69, stdev=9606.04
lat (usec): min=1161, max=428320, avg=4009.88, stdev=9606.36
clat percentiles (usec):
| 1.00th=[ 1500], 5.00th=[ 1663], 10.00th=[ 1778], 20.00th=[ 1942],
| 30.00th=[ 2089], 40.00th=[ 2245], 50.00th=[ 2409], 60.00th=[ 2671],
| 70.00th=[ 3032], 80.00th=[ 3851], 90.00th=[ 6259], 95.00th=[ 9503],
| 99.00th=[ 23462], 99.50th=[ 39060], 99.90th=[160433], 99.95th=[185598],
| 99.99th=[256902]
bw ( KiB/s): min= 200, max= 1595, per=3.13%, avg=997.40, stdev=318.73,
samples=3840
iops : min= 50, max= 398, avg=249.17, stdev=79.62, samples=3840
lat (msec) : 2=23.70%, 4=57.33%, 10=14.40%, 20=3.32%, 50=0.88%
lat (msec) : 100=0.13%, 250=0.25%, 500=0.01%
cpu : usr=0.19%, sys=0.57%, ctx=480565, majf=0, minf=986
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued rwt: total=0,478401,0, short=0,0,0, dropped=0,0,0
latency : target=0, window=0, percentile=100.00%, depth=1
Run status group 0 (all jobs):
WRITE: bw=31.1MiB/s (32.7MB/s), 31.1MiB/s-31.1MiB/s (32.7MB/s-32.7MB/s),
io=1869MiB (1960MB), run=60012-60012msec
Disk stats (read/write):
rbd0: ios=0/477511, merge=0/11, ticks=0/1902034, in_queue=1903400, util=99.90%
[root@iaas01 mnt]# fio --bs=4k --ioengine=psync --direct=1 --rw=randwrite
--runtime=60 --group_reporting --name=fio-write --size=1G
--filename=/mnt/test.db -numjobs=32 --iodepth=1
fio-write: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T)
4096B-4096B, ioengine=psync, iodepth=1
...
fio-3.1
Starting 32 processes
Jobs: 32 (f=32): [w(32)][100.0%][r=0KiB/s,w=1001KiB/s][r=0,w=250 IOPS][eta
00m:00s]
fio-write: (groupid=0, jobs=32): err= 0: pid=3361544: Tue Dec 17 22:08:55 2019
write: IOPS=299, BW=1200KiB/s (1229kB/s)(70.5MiB/60168msec)
clat (usec): min=1583, max=426473, avg=106536.74, stdev=58040.24
lat (usec): min=1583, max=426474, avg=106537.41, stdev=58040.14
clat percentiles (usec):
| 1.00th=[ 1926], 5.00th=[ 2278], 10.00th=[ 3097], 20.00th=[ 86508],
| 30.00th=[ 93848], 40.00th=[ 99091], 50.00th=[104334], 60.00th=[110625],
| 70.00th=[117965], 80.00th=[130548], 90.00th=[158335], 95.00th=[208667],
| 99.00th=[295699], 99.50th=[350225], 99.90th=[404751], 99.95th=[413139],
| 99.99th=[425722]
bw ( KiB/s): min= 8, max= 112, per=3.13%, avg=37.54, stdev=11.62,
samples=3840
iops : min= 2, max= 28, avg= 9.38, stdev= 2.90, samples=3840
lat (msec) : 2=1.66%, 4=9.32%, 10=1.32%, 20=0.34%, 50=0.04%
lat (msec) : 100=28.89%, 250=55.22%, 500=3.21%
cpu : usr=0.01%, sys=0.06%, ctx=36190, majf=0, minf=1016
IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
issued rwt: total=0,18050,0, short=0,0,0, dropped=0,0,0
latency : target=0, window=0, percentile=100.00%, depth=1
Run status group 0 (all jobs):
WRITE: bw=1200KiB/s (1229kB/s), 1200KiB/s-1200KiB/s (1229kB/s-1229kB/s),
io=70.5MiB (73.9MB), run=60168-60168msec
Disk stats (read/write):
rbd0: ios=0/18050, merge=0/11, ticks=0/59434, in_queue=59422, util=98.68%
_______________________________________________
ceph-users mailing list
[email protected]
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com