[In reply to] 



>On Feb 6, 2008 Maxim Veksler wrote: 
>
>You haven't attached the full cib.xml you have, in general you should 
>define "default-resource-stickiness" to prevent your resources jump 
>without "true" failures. 

Here is the ha.cf file and the entire CIB file.

I have set the default-resource-stickiness to "INFINITY", but it after the 
primary fails (reboot or shutting down HA to simulate a failure), when it comes 
back into service, my resources switch back to the original primary server.

I have to be missing something somewhere.

Michael

---------------ha.cf

crm on
auto_failback off
logfacility     local0
keepalive 2
deadtime 10
mcast eth0 239.0.0.1 694 1 0
node nfs_server1.prodea.local.lab nfs_server2.prodea.local.lab

---------------cib.xml
 <cib generated="true" admin_epoch="0" have_quorum="true" ignore_dtd="false" 
num_peers="2" cib_feature_revision="2.0" epoch="150" num_updates="1" 
cib-last-written="Wed Feb  6 15:42:18 2008" ccm_transition="2" 
dc_uuid="1d040f02-a506-4c46-b661-319c5e024e10">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <attributes>
           <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" 
value="2.1.3-node: 552305612591183b1628baa5bc6e903e0f1e26a3"/>
           <nvpair id="cib-bootstrap-options-last-lrm-refresh" 
name="last-lrm-refresh" value="1202334057"/>
           <nvpair id="cib-bootstrap-options-default-resource-stickiness" 
name="default-resource-stickiness" value="INFINITY"/>
         </attributes>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="20f292a2-876b-4b71-a3c1-5802d4af9b2d" 
uname="nfs_server2.prodea.local.lab" type="normal">
         <instance_attributes id="nodes-20f292a2-876b-4b71-a3c1-5802d4af9b2d">
           <attributes>
             <nvpair id="standby-20f292a2-876b-4b71-a3c1-5802d4af9b2d" 
name="standby" value="off"/>
           </attributes>
         </instance_attributes>
       </node>
       <node id="1d040f02-a506-4c46-b661-319c5e024e10" 
uname="nfs_server1.prodea.local.lab" type="normal">
         <instance_attributes id="nodes-1d040f02-a506-4c46-b661-319c5e024e10">
           <attributes>
             <nvpair id="standby-1d040f02-a506-4c46-b661-319c5e024e10" 
name="standby" value="off"/>
           </attributes>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <master_slave id="ms-drbd0">
         <meta_attributes id="ma-ms-drbd0">
           <attributes>
             <nvpair id="ma-ms-drbd0-1" name="clone_max" value="2"/>
             <nvpair id="ma-ms-drbd0-2" name="clone_node_max" value="1"/>
             <nvpair id="ma-ms-drbd0-3" name="master_max" value="1"/>
             <nvpair id="ma-ms-drbd0-4" name="master_node_max" value="1"/>
             <nvpair id="ma-ms-drbd0-5" name="notify" value="yes"/>
             <nvpair id="ma-ms-drbd0-6" name="globally_unique" value="false"/>
             <nvpair id="ma-ms-drbd0-7" name="target_role" value="default"/>
           </attributes>
         </meta_attributes>
         <primitive id="drbd0" class="ocf" provider="heartbeat" type="drbd">
           <instance_attributes id="ia-drbd0">
             <attributes>
               <nvpair id="ia-drbd0-1" name="drbd_resource" value="drbd0"/>
             </attributes>
           </instance_attributes>
         </primitive>
       </master_slave>
       <primitive class="ocf" provider="heartbeat" type="Filesystem" id="fs0">
         <meta_attributes id="ma-fs0">
           <attributes>
             <nvpair name="target_role" id="ma-fs0-1" value="started"/>
           </attributes>
         </meta_attributes>
         <instance_attributes id="ia-fs0">
           <attributes>
             <nvpair id="ia-fs0-1" name="fstype" value="ext3"/>
             <nvpair id="ia-fs0-2" name="directory" value="/data/"/>
             <nvpair id="ia-fs0-3" name="device" value="/dev/drbd0"/>
           </attributes>
         </instance_attributes>
       </primitive>
       <primitive class="ocf" type="IPaddr" provider="heartbeat" 
id="ClusterIpAddress">
         <meta_attributes id="ClusterIpAddress_meta_attrs">
           <attributes>
             <nvpair name="target_role" 
id="ClusterIpAddress_metaattr_target_role" value="started"/>
           </attributes>
         </meta_attributes>
         <instance_attributes id="ClusterIpAddress_instance_attrs">
           <attributes>
             <nvpair id="032248b1-7331-4900-b30b-122ba8c0a037" name="ip" 
value="172.24.1.167"/>
             <nvpair id="556f2518-70b5-4a21-9219-d3874271f5f9" name="nic" 
value="eth0"/>
           </attributes>
         </instance_attributes>
       </primitive>
       <primitive class="ocf" type="nfs" provider="heartbeat" id="NFS">
         <meta_attributes id="NFS_meta_attrs">
           <attributes>
             <nvpair name="target_role" id="NFS_metaattr_target_role" 
value="started"/>
           </attributes>
         </meta_attributes>
       </primitive>
     </resources>
     <constraints>
       <rsc_order id="drbd0_before_fs0" from="fs0" action="start" to="ms-drbd0" 
to_action="promote"/>
       <rsc_order id="IpStart" from="ClusterIpAddress" type="after" to="fs0"/>
       <rsc_order id="drbd0_before_nfs" from="NFS" action="start" to="ms-drbd0" 
to_action="promote"/>
       <rsc_colocation id="fs0_on_drbd0" to="ms-drbd0" to_role="master" 
from="fs0" score="infinity"/>
       <rsc_colocation score="INFINITY" id="ClusterIpOnFS0" 
from="ClusterIpAddress" to="fs0"/>
       <rsc_colocation id="nfs_on_drbd0" to="ms-drbd0" to_role="master" 
from="NFS" score="infinity"/>
     </constraints>
   </configuration>
 </cib>


      
____________________________________________________________________________________
Be a better friend, newshound, and 
know-it-all with Yahoo! Mobile.  Try it now.  
http://mobile.yahoo.com/;_ylt=Ahu06i62sR8HDtDypao8Wcj9tAcJ 
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to