Hi,

It seems that you face the same problem which I did before.
I think you shouldn't use 2.1.3.
Please refer to this list:
http://www.gossamer-threads.com/lists/linuxha/users/47008
http://developerbugs.linux-foundation.org/show_bug.cgi?id=1859

The latest package includes the above fix.
http://download.opensuse.org/repositories/server:/ha-clustering/

Thanks,
Junko

> I've run into a quite peculiar problem with rather a small,
> straightforward setup.
> My configuration consists of a group that contains two drdb resources,
> then LVM over them followed by filesystem mount, virtual IP addres and
> finally the service I want to server, vsftpd. For vsftpd I've created an
> OCF compliant script derived from the Pure-FTPd script. Does the job
> pretty well, and the monitoring is done by trying to retrieve a file
> with wget from the ftp server (just so it will notice any application
> hangs and so on). Besides the group, there's also a pingd clone to
> monitor network connectivity and switch to the node that can reach the
> gateway. Pingd works as expected, however, if there is a failure in the
> vsftpd monitor, here we have a problem. If the cluster is freshly
> started, it *will* cause a failover. But, on subsequent failures after
> the move, the failcount of the vsftpd resource is not increased above 1.
> Strange thing is that in the logs it says it actually intends to do so.
> Here is my current cib.xml file contents and a snippet from the log
> file. I tried leaving the port blocked (that's how I was doing the
> testing) for a while, and the resource just kept restarting on the node
> over and over again. Still, no failcount increase for the named
> resource. I used to check the failcount like this:
> 
>     /[EMAIL PROTECTED] crm]# crm_failcount -V -r resource_ftp_vsFTPd -G
>      name=fail-count-resource_ftp_vsFTPd value=1/
> 
> and after every subsequent failure the value was unchanged. Heartbeat
> version: 2.1.3.
> 
> *The cib.xml file:*
> 
>     / <cib admin_epoch="0" have_quorum="true" ignore_dtd="false"
>     num_peers="2" cib_feature_revision="2.0" generated="true"
>     epoch="108" num_updates="1" cib-last-written="Fri Jun 27 08:01:34
>     2008" ccm_transition="2"
> dc_uuid="d53610da-e239-4811-921b-cfad3c93ad99">
>        <configuration>
>          <crm_config>
>            <cluster_property_set id="cib-bootstrap-options">
>              <attributes>
>                <nvpair id="cib-bootstrap-options-dc-version"
>     name="dc-version" value="2.1.3-node:
>     552305612591183b1628baa5bc6e903e0f1e26a3"/>
>                <nvpair id="cib-bootstrap-options-last-lrm-refresh"
>     name="last-lrm-refresh" value="1214495001"/>
>                <nvpair name="default-resource-stickiness"
>     id="cib-bootstrap-options-default-resource-stickiness" value="0"/>
>                <nvpair name="default-resource-failure-stickiness"
>     id="cib-bootstrap-options-default-resource-failure-stickiness"
>     value="-100"/>
>              </attributes>
>            </cluster_property_set>
>          </crm_config>
>          <nodes>
>            <node id="d53610da-e239-4811-921b-cfad3c93ad99"
>     uname="leviathan" type="normal"/>
>            <node id="c91e7159-dc6f-412b-920a-6d463785c756"
>     uname="erebus" type="normal">
>              <instance_attributes
>     id="nodes-c91e7159-dc6f-412b-920a-6d463785c756">
>                <attributes>
>                  <nvpair
>     id="standby-c91e7159-dc6f-412b-920a-6d463785c756" name="standby"
>     value="off"/>
>                </attributes>
>              </instance_attributes>
>            </node>
>          </nodes>
>          <resources>
>            <group id="group_ftp">
>              <meta_attributes id="group_ftp_meta_attrs">
>                <attributes>
>                  <nvpair id="group_ftp_metaattr_ordered" name="ordered"
>     value="true"/>
>                  <nvpair id="group_ftp_metaattr_collocated"
>     name="collocated" value="true"/>
>                  <nvpair id="group_ftp_metaattr_target_role"
>     name="target_role" value="started"/>
>                  <nvpair
>     id="group_ftp_metaattr_resource_failure_stickiness"
>     name="resource_failure_stickiness" value="-300"/>
>                </attributes>
>              </meta_attributes>
>              <primitive id="resource_ftp_drbd0" class="heartbeat"
>     type="drbddisk" provider="heartbeat">
>                <instance_attributes
> id="resource_ftp_drbd0_instance_attrs">
>                  <attributes>
>                    <nvpair id="4e45d56e-c3f5-4e94-b059-13f5e68a3510"
>     name="1" value="drbd0"/>
>                  </attributes>
>                </instance_attributes>
>                <meta_attributes id="resource_ftp_drbd0_meta_attrs">
>                  <attributes/>
>                </meta_attributes>
>              </primitive>
>              <primitive id="resource_ftp_drbd1" class="heartbeat"
>     type="drbddisk" provider="heartbeat">
>                <instance_attributes
> id="resource_ftp_drbd1_instance_attrs">
>                  <attributes>
>                    <nvpair id="7aba9408-44c7-4875-b6d6-884ef55178cb"
>     name="1" value="drbd1"/>
>                  </attributes>
>                </instance_attributes>
>                <meta_attributes id="resource_ftp_drbd1_meta_attrs">
>                  <attributes/>
>                </meta_attributes>
>              </primitive>
>              <primitive id="resource_ftp_lvm" class="ocf" type="LVM"
>     provider="heartbeat">
>                <instance_attributes id="resource_ftp_lvm_instance_attrs">
>                  <attributes>
>                    <nvpair id="97f8e505-2749-4902-a1a1-1914f59a1f2d"
>     name="volgrpname" value="ftp"/>
>                  </attributes>
>                </instance_attributes>
>                <meta_attributes id="resource_ftp_lvm_meta_attrs">
>                  <attributes/>
>                </meta_attributes>
>              </primitive>
>              <primitive id="resource_ftp_filesystem" class="ocf"
>     type="Filesystem" provider="heartbeat">
>                <instance_attributes
>     id="resource_ftp_filesystem_instance_attrs">
>                  <attributes>
>                    <nvpair id="d6e70cb0-e80f-44c7-9259-3e41b9009d64"
>     name="device" value="/dev/ftp/ftp"/>
>                    <nvpair id="bc9e43a6-2af5-4fa7-8c06-e18d8d9e2a4a"
>     name="directory" value="/var/ftp"/>
>                    <nvpair id="912c9a01-f2fb-4940-b4c8-ab254aef416f"
>     name="fstype" value="ext3"/>
>                  </attributes>
>                </instance_attributes>
>                <meta_attributes id="resource_ftp_filesystem_meta_attrs">
>                  <attributes/>
>                </meta_attributes>
>              </primitive>
>              <primitive id="resource_ftp_IPaddr" class="ocf"
>     type="IPaddr" provider="heartbeat">
>                <instance_attributes
> id="resource_ftp_IPaddr_instance_attrs">
>                  <attributes>
>                    <nvpair id="807da10b-685a-4140-b776-8e515b347139"
>     name="ip" value="192.168.180.226"/>
>                    <nvpair id="b59ceed5-0e26-44f2-991b-fad218864f80"
>     name="nic" value="eth0"/>
>                    <nvpair id="5447b796-d512-4906-baa8-92e383996eb3"
>     name="cidr_netmask" value="24"/>
>                    <nvpair id="7b70936b-ad0f-4c7d-9117-b51fc0ee8750"
>     name="broadcast" value="192.168.180.255"/>
>                  </attributes>
>                </instance_attributes>
>                <meta_attributes id="resource_ftp_IPaddr_meta_attrs">
>                  <attributes>
>                    <nvpair id="resource_ftp_IPaddr_metaattr_target_role"
>     name="target_role" value="started"/>
>                  </attributes>
>                </meta_attributes>
>                <operations>
>                  <op id="50566ef0-e39a-4cba-8b7a-43eaa174d837"
>     name="monitor" interval="5s" timeout="20s" start_delay="1s"
>     disabled="false" role="Started" on_fail="restart"/>
>                </operations>
>              </primitive>
>              <primitive id="resource_ftp_vsFTPd" class="ocf"
>     type="vsftpd" provider="heartbeat">
>                <operations>
>                  <op id="b71441a8-1388-43e4-990b-f4e0a97c5880"
>     name="monitor" description="FTP file retrieval test" interval="30s"
>     timeout="20s" start_delay="1s" disabled="false" role="Started"
>     on_fail="restart"/>
>                </operations>
>                <meta_attributes id="resource_ftp_vsFTPd_meta_attrs">
>                  <attributes/>
>                </meta_attributes>
>              </primitive>
>            </group>
>            <clone id="IPMON">
>              <instance_attributes id="IPMON_instance_attrs">
>                <attributes>
>                  <nvpair id="IPMON_clone_max" name="clone_max" value="2"/>
>                  <nvpair id="IPMON_clone_node_max" name="clone_node_max"
>     value="1"/>
>                  <nvpair id="IPMON_target_role" name="target_role"
>     value="started"/>
>                </attributes>
>              </instance_attributes>
>              <primitive id="GWMON" class="ocf" type="pingd"
>     provider="heartbeat">
>                <instance_attributes id="GWMON_instance_attrs">
>                  <attributes>
>                    <nvpair id="GWMON_01" name="target_role"
>     value="started"/>
>                    <nvpair id="GWMON_02" name="user" value="root"/>
>                    <nvpair id="GWMON_03" name="dampen" value="5s"/>
>                    <nvpair id="GWMON_04" name="multiplier" value="1000"/>
>                    <nvpair id="GWMON_05" name="pidfile"
>     value="/var/run/heartbeat/ping.pid"/>
>                  </attributes>
>                </instance_attributes>
>                <operations>
>                  <op id="GWMON_OP_01" name="monitor" interval="5s"
>     timeout="5s"/>
>                </operations>
>              </primitive>
>            </clone>
>          </resources>
>          <constraints>
>            <rsc_location rsc="group_ftp" id="location_ftp_connected">
>              <rule id="location_ftp_connected_rule"
> score_attribute="pingd">
>                <expression id="location_ftp_expr_defined"
>     attribute="pingd" operation="defined"/>
>              </rule>
>              <rule id="location_ftp-rule-1" score="6000">
>                <expression id="rscloc-webserver-expr-1"
>     attribute="#uname" operation="eq" value="leviathan"/>
>              </rule>
>              <rule id="location_ftp-rule-2" score="6000">
>                <expression id="rscloc-webserver-expr-2"
>     attribute="#uname" operation="eq" value="erebus"/>
>              </rule>
>            </rsc_location>
>          </constraints>
>        </configuration>
>      </cib>
>     /
> 
> *and something from the log file:*
> 
> Jun 27 08:15:40 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_monitor_30000 (call=888, rc=7) complete
> Jun 27 08:15:40 leviathan tengine: [17604]: info: process_graph_event:
> Detected action resource_ftp_vsFTPd_monitor_30000 from a different
> transition: 106 vs. 108
> Jun 27 08:15:40 leviathan tengine: [17604]: info: update_abort_priority:
> Abort priority upgraded to 1000000
> Jun 27 08:15:40 leviathan tengine: [17604]: WARN: update_failcount:
> Updating failcount for resource_ftp_vsFTPd on
> d53610da-e239-4811-921b-cfad3c93ad99 after failed monitor: rc=7
> Jun 27 08:15:40 leviathan crmd: [17593]: info: do_state_transition:
> State transition S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC
> cause=C_IPC_MESSAGE origin=route_message ]
> Jun 27 08:15:40 leviathan crmd: [17593]: info: do_state_transition: All
> 2 cluster nodes are eligible to run resources.
> Jun 27 08:15:40 leviathan pengine: [17605]: info:
> determine_online_status: Node erebus is online
> Jun 27 08:15:40 leviathan pengine: [17605]: info:
> common_apply_stickiness: Setting failure stickiness for
> resource_ftp_vsFTPd on erebus: -300
> Jun 27 08:15:40 leviathan pengine: [17605]: info:
> determine_online_status: Node leviathan is online
> Jun 27 08:15:40 leviathan pengine: [17605]: info:
> common_apply_stickiness: Setting failure stickiness for
> resource_ftp_vsFTPd on leviathan: -300
> Jun 27 08:15:40 leviathan pengine: [17605]: WARN: unpack_rsc_op:
> Processing failed op resource_ftp_vsFTPd_monitor_30000 on leviathan: Error
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: group_print:
> Resource Group: group_ftp
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_drbd0        (heartbeat:drbddisk):   Started leviathan
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_drbd1        (heartbeat:drbddisk):   Started leviathan
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_lvm  (heartbeat::ocf:LVM):   Started leviathan
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_filesystem   (heartbeat::ocf:Filesystem):    Started
> leviathan
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_IPaddr       (heartbeat::ocf:IPaddr):        Started
> leviathan
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_vsFTPd       (heartbeat::ocf:vsftpd):        Started
> leviathan FAILED
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: clone_print: Clone
> Set: IPMON
> Jun 27 08:15:40 leviathan pengine: [17605]: notice: native_print:
> GWMON:0   (heartbeat::ocf:pingd): Started erebus
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: native_print:
> GWMON:1   (heartbeat::ocf:pingd): Started leviathan
> Jun 27 08:15:41 leviathan pengine: [17605]: info: native_assign_node: 2
> nodes with equal score (6700) for running the listed resources (chose
> leviathan):
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_drbd0     (leviathan)
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_drbd1     (leviathan)
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_lvm       (leviathan)
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_filesystem        (leviathan)
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_IPaddr    (leviathan)
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange:
> Recover resource resource_ftp_vsFTPd  (leviathan)
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: StopRsc:
> leviathan        Stop resource_ftp_vsFTPd
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: StartRsc:
> leviathan        Start resource_ftp_vsFTPd
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: RecurringOp:
> leviathan         resource_ftp_vsFTPd_monitor_30000
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource GWMON:0        (erebus)
> Jun 27 08:15:41 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource GWMON:1        (leviathan)
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_state_transition:
> State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [
> input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=route_message ]
> Jun 27 08:15:41 leviathan tengine: [17604]: info: unpack_graph: Unpacked
> transition 109: 14 actions in 14 synapses
> Jun 27 08:15:41 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 23 fired and confirmed
> Jun 27 08:15:41 leviathan pengine: [17605]: info: process_pe_message:
> Transition 109: PEngine Input stored in:
> /var/lib/heartbeat/pengine/pe-input-453.bz2
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 5: resource_ftp_vsFTPd_stop_0 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_stop_0
> key=5:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: rsc:resource_ftp_vsFTPd:
stop
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_monitor_30000 (call=888, rc=-2) Cancelled
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) Shutting down vsftpd:
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) [
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)   OK
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) ]
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_stop_0 (call=889, rc=0) complete
> Jun 27 08:15:41 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_stop_0 (5) confirmed on leviathan (rc=0)
> Jun 27 08:15:41 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 24 fired and confirmed
> Jun 27 08:15:41 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 9 fired and confirmed
> Jun 27 08:15:41 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 21 fired and confirmed
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 11: resource_ftp_drbd0_start_0 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_drbd0_start_0
> key=11:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: rsc:resource_ftp_drbd0:
start
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_drbd0_start_0 (call=890, rc=0) complete
> Jun 27 08:15:41 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_drbd0_start_0 (11) confirmed on leviathan (rc=0)
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 13: resource_ftp_drbd1_start_0 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_drbd1_start_0
> key=13:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: rsc:resource_ftp_drbd1:
start
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_drbd1_start_0 (call=891, rc=0) complete
> Jun 27 08:15:41 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_drbd1_start_0 (13) confirmed on leviathan (rc=0)
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 15: resource_ftp_lvm_start_0 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_lvm_start_0
> key=15:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: rsc:resource_ftp_lvm: start
> Jun 27 08:15:41 leviathan LVM[27240]: INFO: Activating volume group ftp
> Jun 27 08:15:41 leviathan LVM[27240]: INFO: File descriptor 4 left open
> File descriptor 5 left open File descriptor 6 left open File descriptor
> 7 left open File descriptor 8 left open File descriptor 9 left open
> Reading all physical volumes. This may take a while... Found volume
> group "ftp" using metadata type lvm2
> Jun 27 08:15:41 leviathan LVM[27240]: INFO: File descriptor 4 left open
> File descriptor 5 left open File descriptor 6 left open File descriptor
> 7 left open File descriptor 8 left open File descriptor 9 left open 1
> logical volume(s) in volume group "ftp" now active
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 4 left open File
> descriptor 5 left open File descriptor 6 left open File descriptor 7
> left open File descriptor 8 left open File descriptor 9 left open
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr)     Using volume group(s) on command line
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr)     Finding volume group "ftp"
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_lvm_start_0 (call=892, rc=0) complete
> Jun 27 08:15:41 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_lvm_start_0 (15) confirmed on leviathan (rc=0)
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 17: resource_ftp_filesystem_start_0 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_filesystem_start_0
> key=17:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info:
> rsc:resource_ftp_filesystem: start
> Jun 27 08:15:41 leviathan Filesystem[27274]: INFO: Running start for
> /dev/ftp/ftp on /var/ftp
> Jun 27 08:15:41 leviathan Filesystem[27274]: INFO: Filesystem /var/ftp
> is already mounted.
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_filesystem_start_0 (call=893, rc=0) complete
> Jun 27 08:15:41 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_filesystem_start_0 (17) confirmed on leviathan (rc=0)
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 19: resource_ftp_IPaddr_start_0 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_IPaddr_start_0
> key=19:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: rsc:resource_ftp_IPaddr:
> start
> Jun 27 08:15:41 leviathan IPaddr[27317]: INFO: Using calculated netmask
> for 192.168.180.226: 255.255.255.0
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_start_0 (call=894, rc=0) complete
> Jun 27 08:15:41 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_IPaddr_start_0 (19) confirmed on leviathan (rc=0)
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 2: resource_ftp_IPaddr_monitor_5000 on leviathan
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 20: resource_ftp_vsFTPd_start_0 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_IPaddr_monitor_5000
> key=2:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_start_0
> key=20:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: rsc:resource_ftp_vsFTPd:
> start
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_monitor_5000 (call=886, rc=-2) Cancelled
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) Starting vsftpd for vsftpd:
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) [
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)   OK
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) ]
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)
> Jun 27 08:15:41 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)
> Jun 27 08:15:41 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_start_0 (call=896, rc=0) complete
> Jun 27 08:15:41 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_start_0 (20) confirmed on leviathan (rc=0)
> Jun 27 08:15:41 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 22 fired and confirmed
> Jun 27 08:15:41 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 4: resource_ftp_vsFTPd_monitor_30000 on leviathan
> Jun 27 08:15:41 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_monitor_30000
> key=4:109:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:42 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_monitor_5000 (call=895, rc=0) complete
> Jun 27 08:15:42 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_IPaddr_monitor_5000 (2) confirmed on leviathan (rc=0)
> Jun 27 08:15:45 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_monitor_30000 (call=897, rc=7) complete
> Jun 27 08:15:45 leviathan tengine: [17604]: info: status_from_rc:
> Re-mapping op status to LRM_OP_ERROR for rc=7
> Jun 27 08:15:45 leviathan tengine: [17604]: WARN: status_from_rc: Action
> monitor on leviathan failed (target: <null> vs. rc: 7): Error
> Jun 27 08:15:45 leviathan tengine: [17604]: WARN: update_failcount:
> Updating failcount for resource_ftp_vsFTPd on
> d53610da-e239-4811-921b-cfad3c93ad99 after failed monitor: rc=7
> Jun 27 08:15:45 leviathan tengine: [17604]: info: update_abort_priority:
> Abort priority upgraded to 1
> Jun 27 08:15:45 leviathan tengine: [17604]: info: update_abort_priority:
> Abort action 0 superceeded by 2
> Jun 27 08:15:45 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_monitor_30000 (4) confirmed on leviathan (rc=4)
> Jun 27 08:15:45 leviathan tengine: [17604]: info: run_graph: Transition
> 109: (Complete=14, Pending=0, Fired=0, Skipped=0, Incomplete=0)
> Jun 27 08:15:45 leviathan crmd: [17593]: info: do_state_transition:
> State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [
> input=I_PE_CALC cause=C_IPC_MESSAGE origin=route_message ]
> Jun 27 08:15:45 leviathan crmd: [17593]: info: do_state_transition: All
> 2 cluster nodes are eligible to run resources.
> Jun 27 08:15:45 leviathan pengine: [17605]: info:
> determine_online_status: Node erebus is online
> Jun 27 08:15:45 leviathan pengine: [17605]: info:
> common_apply_stickiness: Setting failure stickiness for
> resource_ftp_vsFTPd on erebus: -300
> Jun 27 08:15:45 leviathan pengine: [17605]: info:
> determine_online_status: Node leviathan is online
> Jun 27 08:15:45 leviathan pengine: [17605]: info:
> common_apply_stickiness: Setting failure stickiness for
> resource_ftp_vsFTPd on leviathan: -300
> Jun 27 08:15:45 leviathan pengine: [17605]: WARN: unpack_rsc_op:
> Processing failed op resource_ftp_vsFTPd_monitor_30000 on leviathan: Error
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: group_print:
> Resource Group: group_ftp
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_drbd0        (heartbeat:drbddisk):   Started leviathan
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_drbd1        (heartbeat:drbddisk):   Started leviathan
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_lvm  (heartbeat::ocf:LVM):   Started leviathan
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_filesystem   (heartbeat::ocf:Filesystem):    Started
> leviathan
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_IPaddr       (heartbeat::ocf:IPaddr):        Started
> leviathan
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_vsFTPd       (heartbeat::ocf:vsftpd):        Started
> leviathan FAILED
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: clone_print: Clone
> Set: IPMON
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> GWMON:0   (heartbeat::ocf:pingd): Started erebus
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: native_print:
> GWMON:1   (heartbeat::ocf:pingd): Started leviathan
> Jun 27 08:15:45 leviathan pengine: [17605]: info: native_assign_node: 2
> nodes with equal score (6700) for running the listed resources (chose
> leviathan):
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_drbd0     (leviathan)
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_drbd1     (leviathan)
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_lvm       (leviathan)
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_filesystem        (leviathan)
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_IPaddr    (leviathan)
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange:
> Recover resource resource_ftp_vsFTPd  (leviathan)
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: StopRsc:
> leviathan        Stop resource_ftp_vsFTPd
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: StartRsc:
> leviathan        Start resource_ftp_vsFTPd
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: RecurringOp:
> leviathan         resource_ftp_vsFTPd_monitor_30000
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource GWMON:0        (erebus)
> Jun 27 08:15:45 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource GWMON:1        (leviathan)
> Jun 27 08:15:45 leviathan crmd: [17593]: info: do_state_transition:
> State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [
> input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=route_message ]
> Jun 27 08:15:45 leviathan tengine: [17604]: info: unpack_graph: Unpacked
> transition 110: 14 actions in 14 synapses
> Jun 27 08:15:45 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 23 fired and confirmed
> Jun 27 08:15:45 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 5: resource_ftp_vsFTPd_stop_0 on leviathan
> Jun 27 08:15:46 leviathan pengine: [17605]: info: process_pe_message:
> Transition 110: PEngine Input stored in:
> /var/lib/heartbeat/pengine/pe-input-454.bz2
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_stop_0
> key=5:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: rsc:resource_ftp_vsFTPd:
stop
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_monitor_30000 (call=897, rc=-2) Cancelled
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) Shutting down vsftpd:
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) [
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)   OK  ]
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_stop_0 (call=898, rc=0) complete
> Jun 27 08:15:46 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_stop_0 (5) confirmed on leviathan (rc=0)
> Jun 27 08:15:46 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 24 fired and confirmed
> Jun 27 08:15:46 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 9 fired and confirmed
> Jun 27 08:15:46 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 21 fired and confirmed
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 11: resource_ftp_drbd0_start_0 on leviathan
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_drbd0_start_0
> key=11:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: rsc:resource_ftp_drbd0:
start
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_drbd0_start_0 (call=899, rc=0) complete
> Jun 27 08:15:46 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_drbd0_start_0 (11) confirmed on leviathan (rc=0)
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 13: resource_ftp_drbd1_start_0 on leviathan
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_drbd1_start_0
> key=13:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: rsc:resource_ftp_drbd1:
start
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_drbd1_start_0 (call=900, rc=0) complete
> Jun 27 08:15:46 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_drbd1_start_0 (13) confirmed on leviathan (rc=0)
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 15: resource_ftp_lvm_start_0 on leviathan
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_lvm_start_0
> key=15:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: rsc:resource_ftp_lvm: start
> Jun 27 08:15:46 leviathan LVM[27474]: INFO: Activating volume group ftp
> Jun 27 08:15:46 leviathan LVM[27474]: INFO: File descriptor 4 left open
> File descriptor 5 left open File descriptor 6 left open File descriptor
> 7 left open File descriptor 8 left open File descriptor 9 left open
> Reading all physical volumes. This may take a while... Found volume
> group "ftp" using metadata type lvm2
> Jun 27 08:15:46 leviathan LVM[27474]: INFO: File descriptor 4 left open
> File descriptor 5 left open File descriptor 6 left open File descriptor
> 7 left open File descriptor 8 left open File descriptor 9 left open 1
> logical volume(s) in volume group "ftp" now active
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 4 left open
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 5 left open
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 6 left open
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 7 left open
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 8 left open
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 9 left open
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr)   Using volume group(s) on command
> line     Finding volume group "ftp"
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_lvm_start_0 (call=901, rc=0) complete
> Jun 27 08:15:46 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_lvm_start_0 (15) confirmed on leviathan (rc=0)
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 17: resource_ftp_filesystem_start_0 on leviathan
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_filesystem_start_0
> key=17:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info:
> rsc:resource_ftp_filesystem: start
> Jun 27 08:15:46 leviathan Filesystem[27508]: INFO: Running start for
> /dev/ftp/ftp on /var/ftp
> Jun 27 08:15:46 leviathan Filesystem[27508]: INFO: Filesystem /var/ftp
> is already mounted.
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_filesystem_start_0 (call=902, rc=0) complete
> Jun 27 08:15:46 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_filesystem_start_0 (17) confirmed on leviathan (rc=0)
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 19: resource_ftp_IPaddr_start_0 on leviathan
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_IPaddr_start_0
> key=19:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: rsc:resource_ftp_IPaddr:
> start
> Jun 27 08:15:46 leviathan IPaddr[27551]: INFO: Using calculated netmask
> for 192.168.180.226: 255.255.255.0
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_start_0 (call=903, rc=0) complete
> Jun 27 08:15:46 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_IPaddr_start_0 (19) confirmed on leviathan (rc=0)
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 2: resource_ftp_IPaddr_monitor_5000 on leviathan
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 20: resource_ftp_vsFTPd_start_0 on leviathan
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_IPaddr_monitor_5000
> key=2:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_start_0
> key=20:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: rsc:resource_ftp_vsFTPd:
> start
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_monitor_5000 (call=895, rc=-2) Cancelled
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) Starting vsftpd for vsftpd:
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) [
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)   OK
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) ]
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)
> Jun 27 08:15:46 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)
> Jun 27 08:15:46 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_start_0 (call=905, rc=0) complete
> Jun 27 08:15:46 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_start_0 (20) confirmed on leviathan (rc=0)
> Jun 27 08:15:46 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 22 fired and confirmed
> Jun 27 08:15:46 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 4: resource_ftp_vsFTPd_monitor_30000 on leviathan
> Jun 27 08:15:46 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_monitor_30000
> key=4:110:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:47 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_monitor_5000 (call=904, rc=0) complete
> Jun 27 08:15:47 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_IPaddr_monitor_5000 (2) confirmed on leviathan (rc=0)
> Jun 27 08:15:50 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_monitor_30000 (call=906, rc=7) complete
> Jun 27 08:15:50 leviathan tengine: [17604]: info: status_from_rc:
> Re-mapping op status to LRM_OP_ERROR for rc=7
> Jun 27 08:15:50 leviathan tengine: [17604]: WARN: status_from_rc: Action
> monitor on leviathan failed (target: <null> vs. rc: 7): Error
> Jun 27 08:15:50 leviathan tengine: [17604]: WARN: update_failcount:
> Updating failcount for resource_ftp_vsFTPd on
> d53610da-e239-4811-921b-cfad3c93ad99 after failed monitor: rc=7
> Jun 27 08:15:50 leviathan tengine: [17604]: info: update_abort_priority:
> Abort priority upgraded to 1
> Jun 27 08:15:50 leviathan tengine: [17604]: info: update_abort_priority:
> Abort action 0 superceeded by 2
> Jun 27 08:15:50 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_monitor_30000 (4) confirmed on leviathan (rc=4)
> Jun 27 08:15:50 leviathan tengine: [17604]: info: run_graph: Transition
> 110: (Complete=14, Pending=0, Fired=0, Skipped=0, Incomplete=0)
> Jun 27 08:15:50 leviathan crmd: [17593]: info: do_state_transition:
> State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [
> input=I_PE_CALC cause=C_IPC_MESSAGE origin=route_message ]
> Jun 27 08:15:50 leviathan crmd: [17593]: info: do_state_transition: All
> 2 cluster nodes are eligible to run resources.
> Jun 27 08:15:50 leviathan pengine: [17605]: info:
> determine_online_status: Node erebus is online
> Jun 27 08:15:50 leviathan pengine: [17605]: info:
> common_apply_stickiness: Setting failure stickiness for
> resource_ftp_vsFTPd on erebus: -300
> Jun 27 08:15:50 leviathan pengine: [17605]: info:
> determine_online_status: Node leviathan is online
> Jun 27 08:15:50 leviathan pengine: [17605]: info:
> common_apply_stickiness: Setting failure stickiness for
> resource_ftp_vsFTPd on leviathan: -300
> Jun 27 08:15:50 leviathan pengine: [17605]: WARN: unpack_rsc_op:
> Processing failed op resource_ftp_vsFTPd_monitor_30000 on leviathan: Error
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: group_print:
> Resource Group: group_ftp
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_drbd0        (heartbeat:drbddisk):   Started leviathan
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_drbd1        (heartbeat:drbddisk):   Started leviathan
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_lvm  (heartbeat::ocf:LVM):   Started leviathan
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_filesystem   (heartbeat::ocf:Filesystem):    Started
> leviathan
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_IPaddr       (heartbeat::ocf:IPaddr):        Started
> leviathan
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> resource_ftp_vsFTPd       (heartbeat::ocf:vsftpd):        Started
> leviathan FAILED
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: clone_print: Clone
> Set: IPMON
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> GWMON:0   (heartbeat::ocf:pingd): Started erebus
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: native_print:
> GWMON:1   (heartbeat::ocf:pingd): Started leviathan
> Jun 27 08:15:50 leviathan pengine: [17605]: info: native_assign_node: 2
> nodes with equal score (6700) for running the listed resources (chose
> leviathan):
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_drbd0     (leviathan)
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_drbd1     (leviathan)
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_lvm       (leviathan)
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_filesystem        (leviathan)
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource resource_ftp_IPaddr    (leviathan)
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange:
> Recover resource resource_ftp_vsFTPd  (leviathan)
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: StopRsc:
> leviathan        Stop resource_ftp_vsFTPd
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: StartRsc:
> leviathan        Start resource_ftp_vsFTPd
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: RecurringOp:
> leviathan         resource_ftp_vsFTPd_monitor_30000
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource GWMON:0        (erebus)
> Jun 27 08:15:50 leviathan pengine: [17605]: notice: NoRoleChange: Leave
> resource GWMON:1        (leviathan)
> Jun 27 08:15:50 leviathan crmd: [17593]: info: do_state_transition:
> State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [
> input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=route_message ]
> Jun 27 08:15:50 leviathan tengine: [17604]: info: unpack_graph: Unpacked
> transition 111: 14 actions in 14 synapses
> Jun 27 08:15:50 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 23 fired and confirmed
> Jun 27 08:15:50 leviathan pengine: [17605]: info: process_pe_message:
> Transition 111: PEngine Input stored in:
> /var/lib/heartbeat/pengine/pe-input-455.bz2
> Jun 27 08:15:50 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 5: resource_ftp_vsFTPd_stop_0 on leviathan
> Jun 27 08:15:50 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_stop_0
> key=5:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:50 leviathan lrmd: [17590]: info: rsc:resource_ftp_vsFTPd:
stop
> Jun 27 08:15:50 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_monitor_30000 (call=906, rc=-2) Cancelled
> Jun 27 08:15:50 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) Shutting down vsftpd:
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) [
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)   OK
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout) ]
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:stop:stdout)
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_stop_0 (call=907, rc=0) complete
> Jun 27 08:15:51 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_stop_0 (5) confirmed on leviathan (rc=0)
> Jun 27 08:15:51 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 24 fired and confirmed
> Jun 27 08:15:51 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 9 fired and confirmed
> Jun 27 08:15:51 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 21 fired and confirmed
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 11: resource_ftp_drbd0_start_0 on leviathan
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_drbd0_start_0
> key=11:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: rsc:resource_ftp_drbd0:
start
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_drbd0_start_0 (call=908, rc=0) complete
> Jun 27 08:15:51 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_drbd0_start_0 (11) confirmed on leviathan (rc=0)
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 13: resource_ftp_drbd1_start_0 on leviathan
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_drbd1_start_0
> key=13:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: rsc:resource_ftp_drbd1:
start
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_drbd1_start_0 (call=909, rc=0) complete
> Jun 27 08:15:51 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_drbd1_start_0 (13) confirmed on leviathan (rc=0)
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 15: resource_ftp_lvm_start_0 on leviathan
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_lvm_start_0
> key=15:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: rsc:resource_ftp_lvm: start
> Jun 27 08:15:51 leviathan LVM[27708]: INFO: Activating volume group ftp
> Jun 27 08:15:51 leviathan LVM[27708]: INFO: File descriptor 4 left open
> File descriptor 5 left open File descriptor 6 left open File descriptor
> 7 left open File descriptor 8 left open File descriptor 9 left open
> Reading all physical volumes. This may take a while... Found volume
> group "ftp" using metadata type lvm2
> Jun 27 08:15:51 leviathan LVM[27708]: INFO: File descriptor 4 left open
> File descriptor 5 left open File descriptor 6 left open File descriptor
> 7 left open File descriptor 8 left open File descriptor 9 left open 1
> logical volume(s) in volume group "ftp" now active
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr) File descriptor 4 left open File
> descriptor 5 left open File descriptor 6 left open File descriptor 7
> left open File descriptor 8 left open File descriptor 9 left open
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr)     Using volume group(s) on command line
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_lvm:start:stderr)     Finding volume group "ftp"
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_lvm_start_0 (call=910, rc=0) complete
> Jun 27 08:15:51 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_lvm_start_0 (15) confirmed on leviathan (rc=0)
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 17: resource_ftp_filesystem_start_0 on leviathan
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_filesystem_start_0
> key=17:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info:
> rsc:resource_ftp_filesystem: start
> Jun 27 08:15:51 leviathan Filesystem[27742]: INFO: Running start for
> /dev/ftp/ftp on /var/ftp
> Jun 27 08:15:51 leviathan Filesystem[27742]: INFO: Filesystem /var/ftp
> is already mounted.
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_filesystem_start_0 (call=911, rc=0) complete
> Jun 27 08:15:51 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_filesystem_start_0 (17) confirmed on leviathan (rc=0)
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 19: resource_ftp_IPaddr_start_0 on leviathan
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_IPaddr_start_0
> key=19:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: rsc:resource_ftp_IPaddr:
> start
> Jun 27 08:15:51 leviathan IPaddr[27785]: INFO: Using calculated netmask
> for 192.168.180.226: 255.255.255.0
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_start_0 (call=912, rc=0) complete
> Jun 27 08:15:51 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_IPaddr_start_0 (19) confirmed on leviathan (rc=0)
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 2: resource_ftp_IPaddr_monitor_5000 on leviathan
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 20: resource_ftp_vsFTPd_start_0 on leviathan
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_IPaddr_monitor_5000
> key=2:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_start_0
> key=20:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: rsc:resource_ftp_vsFTPd:
> start
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_monitor_5000 (call=904, rc=-2) Cancelled
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) Starting vsftpd for vsftpd:
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) [
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)   OK
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout) ]
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)
> Jun 27 08:15:51 leviathan lrmd: [17590]: info: RA output:
> (resource_ftp_vsFTPd:start:stdout)
> Jun 27 08:15:51 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_start_0 (call=914, rc=0) complete
> Jun 27 08:15:51 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_vsFTPd_start_0 (20) confirmed on leviathan (rc=0)
> Jun 27 08:15:51 leviathan tengine: [17604]: info: te_pseudo_action:
> Pseudo action 22 fired and confirmed
> Jun 27 08:15:51 leviathan tengine: [17604]: info: send_rsc_command:
> Initiating action 4: resource_ftp_vsFTPd_monitor_30000 on leviathan
> Jun 27 08:15:51 leviathan crmd: [17593]: info: do_lrm_rsc_op: Performing
> op=resource_ftp_vsFTPd_monitor_30000
> key=4:111:14ae0028-3403-494e-a276-9eb43c8afe4c)
> Jun 27 08:15:52 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_IPaddr_monitor_5000 (call=913, rc=0) complete
> Jun 27 08:15:52 leviathan tengine: [17604]: info: match_graph_event:
> Action resource_ftp_IPaddr_monitor_5000 (2) confirmed on leviathan (rc=0)
> Jun 27 08:15:55 leviathan crmd: [17593]: info: process_lrm_event: LRM
> operation resource_ftp_vsFTPd_monitor_30000 (call=915, rc=7) complete
> Jun 27 08:15:55 leviathan tengine: [17604]: info: status_from_rc:
> Re-mapping op status to LRM_OP_ERROR for rc=7
> Jun 27 08:15:55 leviathan tengine: [17604]: WARN: status_from_rc: Action
> monitor on leviathan failed (target: <null> vs. rc: 7): Error
> Jun 27 08:15:55 leviathan tengine: [17604]: WARN: update_failcount:
> Updating failcount for resource_ftp_vsFTPd on
> d53610da-e239-4811-921b-cfad3c93ad99 after failed monitor: rc=7
> 
> *ha.cf file:*
> 
>     logfacility local0
>     keepalive 2
>     deadtime 30
>     warntime 10
>     initdead 60
>     udpport 694
>     ucast eth1 10.0.0.20
>     auto_failback off
>     node leviathan
>     node erebus
>     apiauth ipfail uid=hacluster
>     apiauth ccm uid=hacluster
>     apiauth cms uid=hacluster
>     apiauth ping gid=haclient uid=root
>     apiauth default gid=haclient
>     crm yes
>     ping 192.168.180.253
> 
> 
> At this point I'm pretty stuck... basically I just want the cluster to
> try to restart the resource a couple of times and if it's not successful
> just move it to the other node.
> Any ideeas?
> 
> Thanks,
> 
> Andrei Neagoe.
> _______________________________________________
> Linux-HA mailing list
> Linux-HA@lists.linux-ha.org
> http://lists.linux-ha.org/mailman/listinfo/linux-ha
> See also: http://linux-ha.org/ReportingProblems

_______________________________________________
Linux-HA mailing list
Linux-HA@lists.linux-ha.org
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to