To All,Looks like I got nicked by Occam's Razor when I "simplified" my cluster config file... :-) A "less simplified" version is below.
My question still stands, however. What does "cluster.conf" look like if you're trying to deploy a "highly available" NFS configuration. And, again, by "highly available" I mean that NFS Clients never get the dreaded "stale nfs file handle" message unless the entire cluster has failed.
-RZ
p.s. A better, but still simplified, cluster.conf for EL5.
<?xml version="1.0"?>
<cluster alias="ha-nfs-el5" config_version="357" name="ha-nfs-el5">
<fence_daemon clean_start="0" post_fail_delay="0" post_join_delay="3"/>
<clusternodes>
<clusternode name="node01.arlut.utexas.edu" nodeid="1"
votes="1">
<fence>
<method name="1">
<device name="node01-ilo"/>
</method>
<method name="2">
<device name="sanbox01" port="0"/>
</method>
</fence>
</clusternode>
<clusternode name="node02.arlut.utexas.edu" nodeid="2"
votes="1">
<fence>
<method name="1">
<device name="node02-ilo"/>
</method>
<method name="2">
<device name="sanbox02" port="0"/>
</method>
</fence>
</clusternode>
<clusternode name="node03.arlut.utexas.edu" nodeid="3"
votes="1">
<fence>
<method name="1">
<device name="node03-ilo"/>
</method>
<method name="2">
<device name="sanbox03" port="0"/>
</method>
</fence>
</clusternode>
</clusternodes>
<cman/>
<fencedevices>
<fencedevice agent="fence_sanbox2" ipaddr="sanbox01.arlut.utexas.edu" login="admin"
name="sanbox01" passwd="password"/>
<fencedevice agent="fence_sanbox2" ipaddr="sanbox02.arlut.utexas.edu" login="admin"
name="sanbox02" passwd="password"/>
<fencedevice agent="fence_sanbox2" ipaddr="sanbox03.arlut.utexas.edu" login="admin"
name="sanbox03" passwd="password"/>
<fencedevice agent="fence_ilo" hostname="node01-ilo" login="Administrator"
name="node01-ilo" passwd="DUMMY"/>
<fencedevice agent="fence_ilo" hostname="node02-ilo" login="Administrator"
name="node02-ilo" passwd="DUMMY"/>
<fencedevice agent="fence_ilo" hostname="node03-ilo" login="Administrator"
name="node03-ilo" passwd="DUMMY"/>
</fencedevices>
<rm>
<failoverdomains>
<failoverdomain name="nfs1-domain" nofailback="1" ordered="1"
restricted="1">
<failoverdomainnode name="node01.arlut.utexas.edu"
priority="1"/>
<failoverdomainnode name="node02.arlut.utexas.edu"
priority="2"/>
<failoverdomainnode name="node03.arlut.utexas.edu"
priority="3"/>
</failoverdomain>
<failoverdomain name="nfs2-domain" nofailback="1" ordered="1"
restricted="1">
<failoverdomainnode name="node01.arlut.utexas.edu"
priority="3"/>
<failoverdomainnode name="node02.arlut.utexas.edu"
priority="1"/>
<failoverdomainnode name="node03.arlut.utexas.edu"
priority="2"/>
</failoverdomain>
<failoverdomain name="nfs3-domain" nofailback="1" ordered="1"
restricted="1">
<failoverdomainnode name="node01.arlut.utexas.edu"
priority="2"/>
<failoverdomainnode name="node02.arlut.utexas.edu"
priority="3"/>
<failoverdomainnode name="node03.arlut.utexas.edu"
priority="1"/>
</failoverdomain>
</failoverdomains>
<resources>
<ip address="192.168.1.1" monitor_link="1"/>
<ip address="192.168.1.2" monitor_link="1"/>
<ip address="192.168.1.3" monitor_link="1"/>
<fs device="/dev/cvg00/volume01" force_fsck="0" force_unmount="1" fsid="49388"
fstype="ext3" mountpoint="/lvm/volume01" name="volume01" self_fence="0"/>
<fs device="/dev/cvg00/volume02" force_fsck="0" force_unmount="1" fsid="58665"
fstype="ext3" mountpoint="/lvm/volume01" name="volume01" self_fence="0"/>
<fs device="/dev/cvg00/volume03" force_fsck="0" force_unmount="1" fsid="61028"
fstype="ext3" mountpoint="/lvm/volume01" name="volume01" self_fence="0"/>
<nfsclient allow_recover="1" name="local-subnet"
options="rw,insecure" target="192.168.1.0/24"/>
</resources>
<service autostart="1" domain="nfs1-domain" exclusive="0" name="nfs1"
nfslock="1" recovery="relocate">
<ip ref="192.168.1.1">
<fs __independent_subtree="1" ref="volume01">
<nfsexport name="nfs-volume01">
<nfsclient name=" "
ref="local-subnet"/>
</nfsexport>
</fs>
</ip>
</service>
<service autostart="1" domain="nfs2-domain" exclusive="0" name="nfs2"
nfslock="1" recovery="relocate">
<ip ref="192.168.1.2">
<fs __independent_subtree="1" ref="volume02">
<nfsexport name="nfs-volume02">
<nfsclient name=" "
ref="local-subnet"/>
</nfsexport>
</fs>
</ip>
</service>
<service autostart="1" domain="nfs3-domain" exclusive="0" name="nfs3"
nfslock="1" recovery="relocate">
<ip ref="192.168.1.3">
<fs __independent_subtree="1" ref="volume03">
<nfsexport name="nfs-volume03">
<nfsclient name=" "
ref="local-subnet"/>
</nfsexport>
</fs>
</ip>
</service>
</rm>
</cluster>
--
Randy Zagar Sr. Unix Systems Administrator
E-mail: [email protected] Applied Research Laboratories
Phone: 512 835-3131 Univ. of Texas at Austin
smime.p7s
Description: S/MIME Cryptographic Signature
-- Linux-cluster mailing list [email protected] https://www.redhat.com/mailman/listinfo/linux-cluster
