Hi,
i'm currently testing DRBD and i'm have read / write problems.
On the Local Raid 5 array with BBU i can read / write with around 800 MB/s.
If i use /dev/drbd100 it drops to 200 MB/s
resource drbd.test {
protocol C;
protocol C;
volume 0 {
device /dev/drbd100;
disk /dev/vgKVM.OS/drbdtest;
meta-disk internal;
}
device /dev/drbd100;
disk /dev/vgKVM.OS/drbdtest;
meta-disk internal;
}
net {
shared-secret "drbd.drbdtest";
}
shared-secret "drbd.drbdtest";
}
on KVMe001 {
address 172.16.0.1:7100;
}
address 172.16.0.1:7100;
}
on KVMe002 {
address 172.16.0.2:7100;
}
}
address 172.16.0.2:7100;
}
}
# DRBD is the result of over a decade of development by LINBIT.
# In case you need professional services for DRBD or have
# feature requests visit http://www.linbit.com
# In case you need professional services for DRBD or have
# feature requests visit http://www.linbit.com
global {
usage-count no;
# Decide what kind of udev symlinks you want for "implicit" volumes
# (those without explicit volume <vnr> {} block, implied vnr=0):
# /dev/drbd/by-resource/<resource>/<vnr> (explicit volumes)
# /dev/drbd/by-resource/<resource> (default for implict)
udev-always-use-vnr; # treat implicit the same as explicit volumes
# minor-count dialog-refresh disable-ip-verification
# cmd-timeout-short 5; cmd-timeout-medium 121; cmd-timeout-long 600;
}
usage-count no;
# Decide what kind of udev symlinks you want for "implicit" volumes
# (those without explicit volume <vnr> {} block, implied vnr=0):
# /dev/drbd/by-resource/<resource>/<vnr> (explicit volumes)
# /dev/drbd/by-resource/<resource> (default for implict)
udev-always-use-vnr; # treat implicit the same as explicit volumes
# minor-count dialog-refresh disable-ip-verification
# cmd-timeout-short 5; cmd-timeout-medium 121; cmd-timeout-long 600;
}
common {
handlers {
#fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
#after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
}
handlers {
#fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
#after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
}
startup {
# wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
wfc-timeout 30;
degr-wfc-timeout 120;
outdated-wfc-timeout 30;
}
# wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
wfc-timeout 30;
degr-wfc-timeout 120;
outdated-wfc-timeout 30;
}
syncer {
rate 300M;
verify-alg md5;
}
rate 300M;
verify-alg md5;
}
options {
cpu-mask 2;
}
cpu-mask 2;
}
disk {
on-io-error detach;
disk-flushes no;
disk-barrier no;
c-plan-ahead 0;
c-fill-target 24M;
c-min-rate 80M;
c-max-rate 720M;
al-extents 6007;
fencing resource-only;
}
on-io-error detach;
disk-flushes no;
disk-barrier no;
c-plan-ahead 0;
c-fill-target 24M;
c-min-rate 80M;
c-max-rate 720M;
al-extents 6007;
fencing resource-only;
}
net {
# protocol timeout max-epoch-size max-buffers
# connect-int ping-int sndbuf-size rcvbuf-size ko-count
# allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
# after-sb-1pri after-sb-2pri always-asbp rr-conflict
# ping-timeout data-integrity-alg tcp-cork on-congestion
# congestion-fill congestion-extents csums-alg verify-alg
# use-rle
allow-two-primaries no;
after-sb-0pri discard-zero-changes;
after-sb-1pri discard-secondary;
after-sb-2pri disconnect;
rr-conflict disconnect;
max-buffers 8000;
max-epoch-size 8000;
sndbuf-size 2M;
rcvbuf-size 2M;
unplug-watermark 16;
}
}
# protocol timeout max-epoch-size max-buffers
# connect-int ping-int sndbuf-size rcvbuf-size ko-count
# allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
# after-sb-1pri after-sb-2pri always-asbp rr-conflict
# ping-timeout data-integrity-alg tcp-cork on-congestion
# congestion-fill congestion-extents csums-alg verify-alg
# use-rle
allow-two-primaries no;
after-sb-0pri discard-zero-changes;
after-sb-1pri discard-secondary;
after-sb-2pri disconnect;
rr-conflict disconnect;
max-buffers 8000;
max-epoch-size 8000;
sndbuf-size 2M;
rcvbuf-size 2M;
unplug-watermark 16;
}
}
Ressource isn't connectet because need the other Server for another testings.
But finally both would are connected via 10GBe
Here some outputs from dd:
Bare metal
# dd if=/dev/vgKVM.OS/test of=/dev/null bs=1M count=10240
10240+0 Datensätze ein
10240+0 Datensätze aus
10737418240 bytes (11 GB, 10 GiB) copied, 13,6046 s, 789 MB/s
10240+0 Datensätze ein
10240+0 Datensätze aus
10737418240 bytes (11 GB, 10 GiB) copied, 13,6046 s, 789 MB/s
and with DRBD Device:
# dd if=/dev/drbd100 of=/dev/null bs=1M count=10240
10240+0 Datensätze ein
10240+0 Datensätze aus
10737418240 bytes (11 GB, 10 GiB) copied, 47,678 s, 225 MB/s
10240+0 Datensätze ein
10240+0 Datensätze aus
10737418240 bytes (11 GB, 10 GiB) copied, 47,678 s, 225 MB/s
Thanks Alex
_______________________________________________ Star us on GITHUB: https://github.com/LINBIT drbd-user mailing list [email protected] http://lists.linbit.com/mailman/listinfo/drbd-user
