Hello.
I build 2 members cluster with qdisk.
I define 3 heuristic which each got score=1 .
1 to public network router & 2 to cross interconnect.
Its seems that only the public heuristic works.
Q1:
1. When i test the cross interconnect , nothing happen.
2. When i test the public heuristic i got insufficent score 0/1 & the
system is rebooted , which means that
the cluster dont recognize my 2 other heuristic.
Any hints about this ???
Q2:
Can i give the qdisk himself score=3 ?????
Thanks in Advance !!!!!!!!!!!!!
Sklemer.
ubierp2 /etc/cluster # cat cluster.conf
<?xml version="1.0"?>
<cluster alias="bicluster" config_version="70" name="bicluster">
<quorumd device="/dev/mapper/EmcQdisk" interval="1" label="qdisk"
min_score="1" tko="23" votes="1">
<heuristic interval="2" program="ping -c1 -t2 140.4.0.1"
tko="3" score="1"/>
<heuristic interval="2" program="ping -c1 -t2 10.0.0.1" tko="3"
score="1"/>
<heuristic interval="2" program="ping -c1 -t2 10.0.0.2" tko="3"
score="1"/>
</quorumd>
<fence_daemon clean_start="0" post_fail_delay="0" post_join_delay="3"/>
<clusternodes>
<clusternode name="ubierp1" nodeid="1" votes="1">
<fence>
<method name="1">
<device module_name=""
name="ubierp1-drac5"/>
</method>
</fence>
</clusternode>
<clusternode name="ubierp2" nodeid="2" votes="1">
<fence>
<method name="1">
<device module_name=""
name="ubierp2-drac5"/>
</method>
</fence>
</clusternode>
</clusternodes>
<cman expected_votes="3" two_node="0"/>
<fencedevices>
<fencedevice agent="fence_drac5" cmd_prompt="admin1-"
ipaddr="140.4.20.136" login="root" name="ubierp1-drac5" passwd="eds123"/>
<fencedevice agent="fence_drac5" cmd_prompt="admin1-"
ipaddr="140.4.20.137" login="root" name="ubierp2-drac5" passwd="eds123"/>
</fencedevices>
<rm>
<failoverdomains>
<failoverdomain name="etldom" nofailback="1"
ordered="1" restricted="0">
<failoverdomainnode name="ubierp1"
priority="1"/>
<failoverdomainnode name="ubierp2"
priority="2"/>
</failoverdomain>
<failoverdomain name="dwhdom" nofailback="1"
ordered="1" restricted="0">
<failoverdomainnode name="ubierp2"
priority="1"/>
<failoverdomainnode name="ubierp1"
priority="2"/>
</failoverdomain>
</failoverdomains>
<resources>
<ip address="140.4.20.132" monitor_link="1"/>
<ip address="140.4.20.135" monitor_link="1"/>
<lvm lv_name="" name="etl-vg" vg_name="vgetl"/>
<lvm lv_name="" name="dwh-vg" vg_name="vgdwh"/>
<fs device="/dev/mapper/vgetl-etlinfo" force_fsck="0"
force_unmount="1" fsid="34796" fstype="ext3" mountpoint="/etl/info"
name="etlinfo" options="" self_fence="0"/>
<fs device="/dev/mapper/vgetl-etldata" force_fsck="0"
force_unmount="1" fsid="17880" fstype="ext3" mountpoint="/etl/data"
name="etldata" options="" self_fence="0"/>
<fs device="/dev/mapper/vgdwh-dwhoradat" force_fsck="0"
force_unmount="1" fsid="19572" fstype="ext3" mountpoint="/dwh/data"
name="dwhoradat" options="" self_fence="0"/>
<fs device="/dev/mapper/vgdwh-dwhias" force_fsck="0"
force_unmount="1" fsid="34067" fstype="ext3" mountpoint="/dwh/ias"
name="dwhias" options="" self_fence="0"/>
<fs device="/dev/mapper/vgdwh-dwhbiee" force_fsck="0"
force_unmount="1" fsid="29551" fstype="ext3" mountpoint="/dwh/obiee"
name="dwhbiee" options="" self_fence="0"/>
</resources>
<service autostart="1" domain="dwhdom" exclusive="1"
name="dwhsrv" recovery="relocate">
<ip ref="140.4.20.135"/>
<lvm ref="dwh-vg"/>
<fs ref="dwhoradat"/>
<fs ref="dwhias"/>
<fs ref="dwhbiee"/>
</service>
<service autostart="1" domain="etldom" exclusive="1"
name="etlsrv" recovery="relocate">
<ip ref="140.4.20.132"/>
<lvm ref="etl-vg"/>
<fs ref="etlinfo"/>
<fs ref="etldata"/>
</service>
</rm>
</cluster>
--
Linux-cluster mailing list
Linux-cluster@redhat.com
https://www.redhat.com/mailman/listinfo/linux-cluster