Hi,

i try to create a server vol file which exports 4 local disks via distribute translator.
all examples i find use the distribute translator on the client side.
the thing is, i want to be sure that the client doesnt use the disks in a wrong way
like misconfiguring a primary disk as a replicate or such.

so my primary goal is to export 4 disks as just one brick and all files are distributed (not striped)
over those disks.
later i will add more disks, but i dont want the client vol file to be changed!

using the following vol files i get an error:
r...@hostname:# ls /media/storage/Media
ls: cannot access /media/storage/Media: Input/output error

the vol's are started like this
server:
/usr/local/sbin/glusterfs --log-level=DEBUG --disable-direct-io-mode -l /media/74fd89bf-b8c7-4f3d-9092-826613fe1e85/glusterfsd/glusterfs.server.log -f /media/74fd89bf-b8c7-4f3d-9092-826613fe1e85/glusterfsd/server.vol 2>&1

client:
mount -t glusterfs /media/74fd89bf-b8c7-4f3d-9092-826613fe1e85/glusterfsd/client.vol -o log-file=/media/74fd89bf-b8c7-4f3d-9092-826613fe1e85/glusterfsd/glusterfs.client.log,volfile-name=Media,log-level=DEBUG /media/storage/Media

the server doesnt log anything after the startup
and the client logs from the 'ls' are

[2010-03-14 17:04:04] D [client-protocol.c:4908:client_lookup_cbk] serverMedia: LOOKUP 0/ (/): inode number changed from {5445936622006697985,1} to {0,1} [2010-03-14 17:04:04] W [fuse-bridge.c:722:fuse_attr_cbk] glusterfs-fuse: 2: LOOKUP() / => -1 (Success)

any idea how i can export the server data with only one sub-volumename, distributing files over the disks and having the possibility to expand the storage by adding new disks, maybe including replication, in the next
months?

thx ll and
kind regards




#################server.vol#############################
volume posix_68d5f83c-bbae-40c7-9a3d-e2a52c3c0ecd
 type storage/posix
 option directory /media/68d5f83c-bbae-40c7-9a3d-e2a52c3c0ecd/storage/Media
end-volume
volume locks_68d5f83c-bbae-40c7-9a3d-e2a52c3c0ecd
 type features/locks
 subvolumes posix_68d5f83c-bbae-40c7-9a3d-e2a52c3c0ecd
end-volume


volume posix_74fd89bf-b8c7-4f3d-9092-826613fe1e85
 type storage/posix
 option directory /media/74fd89bf-b8c7-4f3d-9092-826613fe1e85/storage/Media
end-volume
volume locks_74fd89bf-b8c7-4f3d-9092-826613fe1e85
 type features/locks
 subvolumes posix_74fd89bf-b8c7-4f3d-9092-826613fe1e85
end-volume


volume posix_ed14c1d5-0560-46a5-a8cd-b67bbb77386a
 type storage/posix
 option directory /media/ed14c1d5-0560-46a5-a8cd-b67bbb77386a/storage/Media
end-volume
volume locks_ed14c1d5-0560-46a5-a8cd-b67bbb77386a
 type features/locks
 subvolumes posix_ed14c1d5-0560-46a5-a8cd-b67bbb77386a
end-volume


volume posix_f2db5144-ee3f-4047-b845-2e08c6bc4a2e
 type storage/posix
 option directory /media/f2db5144-ee3f-4047-b845-2e08c6bc4a2e/storage/Media
end-volume
volume locks_f2db5144-ee3f-4047-b845-2e08c6bc4a2e
 type features/locks
 subvolumes posix_f2db5144-ee3f-4047-b845-2e08c6bc4a2e
end-volume


volume distributeMedia
  type cluster/distribute
subvolumes locks_68d5f83c-bbae-40c7-9a3d-e2a52c3c0ecd locks_74fd89bf-b8c7-4f3d-9092-826613fe1e85 locks_ed14c1d5-0560-46a5-a8cd-b67bbb77386a locks_f2db5144-ee3f-4047-b845-2e08c6bc4a2e
end-volume

volume Media
 type performance/io-threads
 option thread-count 16
 subvolumes distributeMedia
end-volume

volume storageServer
  type protocol/server
  option transport-type tcp
  option transport.socket.listen-port 6996
  option auth.addr.Media.allow *
  subvolumes Media
end-volume

#######################client.vol#############################
volume serverMedia
 type protocol/client
 option transport-type tcp
 option remote-host 127.0.0.1
 option remote-port 6996
 option remote-subvolume Media
end-volume

volume writebehindMedia
   type performance/write-behind
   option cache-size 4MB
   subvolumes serverMedia
end-volume

volume readaheadMedia
   type performance/read-ahead
   option page-count 4
   subvolumes writebehindMedia
end-volume

volume Media
 type performance/io-cache
 option cache-size 512MB
 subvolumes readaheadMedia
end-volume

_______________________________________________
Gluster-users mailing list
[email protected]
http://gluster.org/cgi-bin/mailman/listinfo/gluster-users

Reply via email to