Hi! I've made a setup for a 2 node cluster using drbd8.
Hardware are two 8core Cpu, 10Gbe Cluster link servers. # hdparm -tT /dev/md0p2 /dev/md0p2: Timing cached reads: 11750 MB in 1.99 seconds = 5894.71 MB/sec Timing buffered disk reads: 806 MB in 3.04 seconds = 265.02 MB/sec The raw harddisk device reaches >250MB/sec The Network for the drbd is 10Gbe. copying a huge vm file via nfs to an other partion on the md0 device reaches about 200 MB/sec. Syncing Result is: cat /proc/drbd version: 8.4.11 (api:1/proto:86-101) srcversion: C27D50EE6C67ED861348AA6 0: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r----- ns:3127296 nr:0 dw:0 dr:3127296 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:d oos:7222256 [=====>..............] sync'ed: 30.3% (7052/10104)M finish: 0:03:11 speed: 37,784 (37,676) K/sec I see only 37 MB/sec instead of expected 150 ... 250 MB/sec. This is less than 19% of the expected performance of 200 MB/sec. I have this setup: ---x----x---x-----X--- [root@cl1 drbd.d]# more cluster.res resource laf { disk { c-plan-ahead 15; c-fill-target 24; c-min-rate 150M; c-max-rate 720M; disk-barrier no; disk-flushes no; al-extents 3389; } net { protocol C; # max-epoch-size 20000; sndbuf-size 36k; sndbuf-size 1024k; rcvbuf-size 2048k; unplug-watermark 24; } on l1i { device /dev/drbd0; disk /dev/md0p2; address 192.168.254.11:7778; meta-disk /dev/sda4; } on l2i { device /dev/drbd0; disk /dev/md0p2; address 192.168.254.21:7778; meta-disk /dev/sda4; } } ---x----x---x-----X--- [root@l2i drbd.d]# more global_common.conf global { usage-count no; } common { protocol C; handlers { pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"; pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"; local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f"; split-brain "/usr/lib/drbd/notify-split-brain.sh root"; } startup { become-primary-on both; wfc-timeout 60; degr-wfc-timeout 60; } disk { on-io-error detach; disk-barrier no; disk-flushes no; al-extents 3389; no-disk-flushes ; no-disk-barrier ; no-md-flushes ; c-plan-ahead 128; c-fill-target 256M; c-min-rate 250M; c-max-rate 275M; } net { allow-two-primaries; after-sb-0pri discard-zero-changes; after-sb-1pri discard-secondary; after-sb-2pri disconnect; sndbuf-size 1024k; rcvbuf-size 2048k; max-epoch-size 20000; max-buffers 131072; unplug-watermark 24; } syncer { rate 1120M; } } ---x----x---x-----X--- Any Ideas, why drbd8 is so painfully slow? Any Hint ? TIA with kind regards Jürgen Sauer
<<attachment: juergen_sauer.vcf>>
signature.asc
Description: OpenPGP digital signature
_______________________________________________ drbd-user mailing list drbd-user@lists.linbit.com http://lists.linbit.com/mailman/listinfo/drbd-user