Hi,

In an active/active configuration I often use hb_standby and hb_takeover in
Heartbeat R1 to move resources in the cluster. It was very easy to "standby"
a node and to "takeover" resources on the initial node.

But with Heartbeat R2, I use crm_standby to set up a node in standby mode,
works fine, but I can't find any "command" to takeover resources. When all
resources are hosted by one node, due to standby or anything else, how can
we 'say' to a resource to come back at his initial node ?

I try with "crm_resources -M..." but this command create a rules that
prevent resource running on the node until "crm_resources -U...". I don't do
that !

So my question is : in such situation where resource are running on a node
that is not his initial node, how can we "say" to this resource to come back
at his initial node without making rules in CIB that prevent this resource
to run on actual node (that is his backup node) ?
 <cib>
   <configuration>
     <crm_config>
       <cluster_property_set id="cps_cluster">
         <attributes>
           <nvpair id="symmetric-cluster" name="symmetric-cluster" value="true"/>
           <nvpair id="default-resource-stickiness" name="default-resource-stickiness" value="-INFINITY"/>
           <nvpair id="transition-idle-timeout" name="transition-idle-timeout" value="15s"/>
           <nvpair id="stonith-enabled" name="stonith-enabled" value="false"/>
           <nvpair id="stonith-action" name="stonith-action" value="reboot"/>
           <nvpair id="no-quorum-policy" name="no-quorum-policy" value="ignore"/>
           <nvpair id="is-managed-default" name="is-managed-default" value="true"/>
           <nvpair id="stop-orphan-resources" name="stop-orphan-resources" value="true"/>
           <nvpair id="stop-orphan-actions" name="stop-orphan-actions" value="true"/>
         </attributes>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <group multiple_active="stop_start" ordered="true" collocated="true" id="collector">
         <primitive id="ip_collector" class="ocf" type="IPaddr" provider="heartbeat">
           <operations>
             <op id="11" name="monitor" interval="5s" timeout="5s" on_fail="stop"/>
             <op id="21" name="start" timeout="30s" on_fail="stop"/>
             <op id="31" name="stop" timeout="30s" prereq="nothing" on_fail="ignore"/>
           </operations>
           <instance_attributes id="inst_ip_collector">
             <attributes>
               <nvpair id="ip_collector_role" name="target_role" value="Started"/>
               <nvpair id="ip100" name="ip" value="10.24.230.100"/>
               <nvpair id="net100" name="netmask" value="255.255.254.0"/>
               <nvpair id="nic100" name="nic" value="eth0"/>
             </attributes>
           </instance_attributes>
         </primitive>
         <primitive class="ocf" type="Collector" provider="heartbeat" id="bin_collector">
           <operations>
             <op id="110" name="monitor" interval="5s" timeout="5s" on_fail="stop"/>
             <op id="210" name="start" timeout="30s" on_fail="stop"/>
             <op id="310" name="stop" timeout="30s" prereq="nothing" on_fail="ignore"/>
           </operations>
           <instance_attributes id="inst_bin_collector">
             <attributes>
               <nvpair name="target_role" id="bin_collector_role" value="Started"/>
               <nvpair id="param00001" name="multiprocess" value="4"/>
               <nvpair id="param00002" name="log" value="true"/>
             </attributes>
           </instance_attributes>
         </primitive>
       </group>
       <group id="database" multiple_active="stop_start" ordered="true">
         <primitive id="ip_mysql" class="ocf" type="IPaddr" provider="heartbeat">
           <operations>
             <op id="12" name="monitor" interval="5s" timeout="5s" on_fail="stop"/>
             <op id="22" name="start" timeout="30s" on_fail="stop"/>
             <op id="32" name="stop" timeout="30s" prereq="nothing" on_fail="ignore"/>
           </operations>
           <instance_attributes id="inst_ip_mysql">
             <attributes>
               <nvpair id="ip_mysql_role" name="target_role" value="Started"/>
               <nvpair id="ip101" name="ip" value="10.24.230.101"/>
               <nvpair id="net101" name="netmask" value="255.255.254.0"/>
               <nvpair id="nic101" name="nic" value="eth0"/>
             </attributes>
           </instance_attributes>
         </primitive>
       </group>
       <group id="nfsserver" multiple_active="stop_start" ordered="true">
         <primitive id="ip_nfs" class="ocf" type="IPaddr" provider="heartbeat">
           <operations>
             <op id="13" name="monitor" interval="5s" timeout="5s" on_fail="stop"/>
             <op id="23" name="start" timeout="30s" on_fail="stop"/>
             <op id="33" name="stop" timeout="30s" prereq="nothing" on_fail="ignore"/>
           </operations>
           <instance_attributes id="inst_ip_nfs">
             <attributes>
               <nvpair id="ip_nfs_role" name="target_role" value="Started"/>
               <nvpair id="ip102" name="ip" value="10.24.230.102"/>
               <nvpair id="net102" name="netmask" value="255.255.254.0"/>
               <nvpair id="nic102" name="nic" value="eth0"/>
             </attributes>
           </instance_attributes>
         </primitive>
       </group>
       <clone id="pingd">
         <instance_attributes id="pingd_attrs">
           <attributes>
             <nvpair id="pingd_role" name="target_role" value="Started"/>
             <nvpair id="pingd-clone_node_max" name="clone_node_max" value="1"/>
             <nvpair id="pingd-clone_max" name="clone_max" value="2"/>
             <nvpair id="pingd-dampen" name="dampen" value="5s"/>
             <nvpair id="pingd-multiplier" name="multiplier" value="100"/>
             <nvpair id="pingd-name" name="name" value="pingd"/>
           </attributes>
         </instance_attributes>
         <primitive id="pingd-child" class="ocf" type="pingd" provider="heartbeat">
           <operations>
             <op id="pingd-child-monitor" name="monitor" interval="20s" timeout="40s" prereq="nothing"/>
             <op id="pingd-child-start" name="start" prereq="nothing"/>
           </operations>
         </primitive>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="location_collector" rsc="collector">
         <rule id="pref_location_collector_node2" score="100">
           <expression id="pref_location_collector_01" attribute="#uname" operation="eq" value="othaii02s"/>
         </rule>
       </rsc_location>
       <rsc_location id="location_nfsserver" rsc="nfsserver">
         <rule id="pref_location_nfsserver_node2" score="100">
           <expression id="pref_location_nfsserver_01" attribute="#uname" operation="eq" value="othaii02s"/>
         </rule>
       </rsc_location>
       <rsc_location id="location_database" rsc="database">
         <rule id="pref_location_database_node1" score="100">
           <expression id="pref_location_database_01" attribute="#uname" operation="eq" value="othaii01s"/>
         </rule>
       </rsc_location>
       <rsc_location id="location_pingd" rsc="pingd">
         <rule id="pref_location_pingd" score="INFINITY">
           <expression id="pref_location_pingd_01" attribute="#uname" operation="eq" value="othaii01s"/>
           <expression id="pref_location_pingd_02" attribute="#uname" operation="eq" value="othaii02s"/>
         </rule>
       </rsc_location>
     </constraints>
   </configuration>
   <status/>
 </cib>

Attachment: ha.cf
Description: Binary data

_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to