I have the following setup:

2 hosts working as glusterfs servers, and 2 shared luns on SAN, that can be mounted on any of the nodes at the same time (currently they are formatted as xfs).

I would like to use both luns accessible on the same glusterfs server at the time, but have the ability to switch automatically to second glusterfs server in case of failure.

Currently I setup servers to export both luns, and then use them as distributed, and on client to use them as ha translator..

You can see all from config files. Now, when I mount glusterfs, I can see volume, but get input/output error, obviously due to some error in configuration.

Server1 file:

volume posix1
  type storage/posix
  option directory /data1/
end-volume

volume posix2
  type storage/posix
  option directory /data2/
end-volume

volume locks1
    type features/locks
    subvolumes posix1
end-volume

volume locks2
    type features/locks
    subvolumes posix2
end-volume

volume brick1
    type performance/io-threads
    option thread-count 8
    subvolumes locks1
end-volume

volume brick2
    type performance/io-threads
    option thread-count 8
    subvolumes locks2
end-volume

volume distribute
    type cluster/distribute
    subvolumes brick1 brick2
end-volume

volume server-tcp
    type protocol/server
    option transport-type tcp
    option auth.addr.brick1.allow *
    option auth.addr.brick2.allow *
    option auth.addr.distribute.allow *
    option transport.socket.listen-port 6996
    option transport.socket.nodelay on
    subvolumes brick1 brick2 distribute
end-volume

Server2 file:

volume posix1
  type storage/posix
  option directory /data1/
end-volume

volume posix2
  type storage/posix
  option directory /data2/
end-volume

volume locks1
    type features/locks
    subvolumes posix1
end-volume

volume locks2
    type features/locks
    subvolumes posix2
end-volume

volume brick1
    type performance/io-threads
    option thread-count 8
    subvolumes locks1
end-volume

volume brick2
    type performance/io-threads
    option thread-count 8
    subvolumes locks2
end-volume

volume distribute
    type cluster/distribute
    subvolumes brick1 brick2
end-volume

volume server-tcp
    type protocol/server
    option transport-type tcp
    option auth.addr.brick1.allow *
    option auth.addr.brick2.allow *
    option auth.addr.distribute.allow *
    option transport.socket.listen-port 6996
    option transport.socket.nodelay on
    subvolumes brick1 brick2 distribute
end-volume

Client:

volume glusterfs01-1
    type protocol/client
    option transport-type tcp
    option remote-host 192.168.10.11
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume distribute
end-volume

volume glusterfs02-1
    type protocol/client
    option transport-type tcp
    option remote-host 192.168.10.12
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume distribute
end-volume

volume ha
    type testing/cluster/ha
    subvolumes glusterfs01-1 glusterfs02-1
    option preferred-subvolume glusterfs01-1
end-volume

volume readahead
    type performance/read-ahead
    option page-count 4
    subvolumes ha
end-volume

volume iocache
    type performance/io-cache
option cache-size `echo $(( $(grep 'MemTotal' /proc/meminfo | sed 's/[^0-9]//g') / 5120 ))`MB
    option cache-timeout 1
    subvolumes readahead
end-volume

volume quickread
    type performance/quick-read
    option cache-timeout 1
    option max-file-size 64kB
    subvolumes iocache
end-volume

volume writebehind
    type performance/write-behind
    option cache-size 4MB
    subvolumes quickread
end-volume

Regards,
Nikola
_______________________________________________
Gluster-users mailing list
[email protected]
http://gluster.org/cgi-bin/mailman/listinfo/gluster-users

Reply via email to