<div dir="ltr">here is the config ....<br><br><br>&lt;cib epoch=&quot;20&quot; num_updates=&quot;0&quot; admin_epoch=&quot;0&quot; validate-with=&quot;pacemaker-1.2&quot; cib-last-written=&quot;Wed Mar  9 00:56:57 2016&quot; update-origin=&quot;server02&quot; update-client=&quot;cibadmin&quot; update-user=&quot;hacluster&quot; crm_feature_set=&quot;3.0.8&quot; have-quorum=&quot;1&quot; dc-uuid=&quot;server01&quot;&gt;<br>  &lt;configuration&gt;<br>    &lt;crm_config&gt;<br>      &lt;cluster_property_set id=&quot;cib-bootstrap-options&quot;&gt;<br>        &lt;nvpair name=&quot;stonith-enabled&quot; value=&quot;true&quot; id=&quot;cib-bootstrap-options-stonith-enabled&quot;/&gt;<br>        &lt;nvpair name=&quot;no-quorum-policy&quot; value=&quot;ignore&quot; id=&quot;cib-bootstrap-options-no-quorum-policy&quot;/&gt;<br>        &lt;nvpair id=&quot;cib-bootstrap-options-dc-version&quot; name=&quot;dc-version&quot; value=&quot;1.1.11-3ca8c3b&quot;/&gt;<br>        &lt;nvpair id=&quot;cib-bootstrap-options-cluster-infrastructure&quot; name=&quot;cluster-infrastructure&quot; value=&quot;classic openais (with plugin)&quot;/&gt;<br>        &lt;nvpair id=&quot;cib-bootstrap-options-expected-quorum-votes&quot; name=&quot;expected-quorum-votes&quot; value=&quot;2&quot;/&gt;<br>        &lt;nvpair name=&quot;stonith-action&quot; value=&quot;reboot&quot; id=&quot;cib-bootstrap-options-stonith-action&quot;/&gt;<br>        &lt;nvpair name=&quot;stonith-timeout&quot; value=&quot;150s&quot; id=&quot;cib-bootstrap-options-stonith-timeout&quot;/&gt;<br>      &lt;/cluster_property_set&gt;<br>    &lt;/crm_config&gt;<br>    &lt;nodes&gt;<br>      &lt;node id=&quot;server02&quot; uname=&quot;server02&quot;/&gt;<br>      &lt;node id=&quot;server01&quot; uname=&quot;server01&quot;/&gt;<br>    &lt;/nodes&gt;<br>    &lt;resources&gt;<br>      &lt;primitive id=&quot;STONITH-server01&quot; class=&quot;stonith&quot; type=&quot;external/ipmi&quot;&gt;<br>        &lt;operations&gt;<br>          &lt;op name=&quot;monitor&quot; interval=&quot;0&quot; timeout=&quot;60s&quot; id=&quot;STONITH-server01-monitor-0&quot;/&gt;<br>          &lt;op name=&quot;monitor&quot; interval=&quot;300s&quot; timeout=&quot;60s&quot; on-fail=&quot;restart&quot; id=&quot;STONITH-server01-monitor-300s&quot;/&gt;<br>          &lt;op name=&quot;start&quot; interval=&quot;0&quot; timeout=&quot;60s&quot; on-fail=&quot;restart&quot; id=&quot;STONITH-server01-start-0&quot;/&gt;<br>        &lt;/operations&gt;<br>        &lt;instance_attributes id=&quot;STONITH-server01-instance_attributes&quot;&gt;<br>          &lt;nvpair name=&quot;hostname&quot; value=&quot;server01&quot; id=&quot;STONITH-server01-instance_attributes-hostname&quot;/&gt;<br>          &lt;nvpair name=&quot;ipaddr&quot; value=&quot;server01-ipmi&quot; id=&quot;STONITH-server01-instance_attributes-ipaddr&quot;/&gt;<br>          &lt;nvpair name=&quot;userid&quot; value=&quot;administrator&quot; id=&quot;STONITH-server01-instance_attributes-userid&quot;/&gt;<br>          &lt;nvpair name=&quot;passwd&quot; value=&quot;To12&quot; id=&quot;STONITH-server01-instance_attributes-passwd&quot;/&gt;<br>          &lt;nvpair name=&quot;interface&quot; value=&quot;lanplus&quot; id=&quot;STONITH-server01-instance_attributes-interface&quot;/&gt;<br>        &lt;/instance_attributes&gt;<br>      &lt;/primitive&gt;<br>      &lt;primitive id=&quot;STONITH-server02&quot; class=&quot;stonith&quot; type=&quot;external/ipmi&quot;&gt;<br>        &lt;operations&gt;<br>          &lt;op name=&quot;monitor&quot; interval=&quot;0&quot; timeout=&quot;60s&quot; id=&quot;STONITH-server02-monitor-0&quot;/&gt;<br>          &lt;op name=&quot;monitor&quot; interval=&quot;300s&quot; timeout=&quot;60s&quot; on-fail=&quot;restart&quot; id=&quot;STONITH-server02-monitor-300s&quot;/&gt;<br>          &lt;op name=&quot;start&quot; interval=&quot;0&quot; timeout=&quot;60s&quot; on-fail=&quot;restart&quot; id=&quot;STONITH-server02-start-0&quot;/&gt;<br>        &lt;/operations&gt;<br>        &lt;instance_attributes id=&quot;STONITH-server02-instance_attributes&quot;&gt;<br>          &lt;nvpair name=&quot;hostname&quot; value=&quot;server02&quot; id=&quot;STONITH-server02-instance_attributes-hostname&quot;/&gt;<br>          &lt;nvpair name=&quot;ipaddr&quot; value=&quot;server02-ipmi&quot; id=&quot;STONITH-server02-instance_attributes-ipaddr&quot;/&gt;<br>          &lt;nvpair name=&quot;userid&quot; value=&quot;administrator&quot; id=&quot;STONITH-server02-instance_attributes-userid&quot;/&gt;<br>          &lt;nvpair name=&quot;passwd&quot; value=&quot;To12&quot; id=&quot;STONITH-server02-instance_attributes-passwd&quot;/&gt;<br>          &lt;nvpair name=&quot;interface&quot; value=&quot;lanplus&quot; id=&quot;STONITH-server02-instance_attributes-interface&quot;/&gt;<br>        &lt;/instance_attributes&gt;<br>      &lt;/primitive&gt;<br>      &lt;primitive id=&quot;VIRTUAL-IP&quot; class=&quot;ocf&quot; provider=&quot;heartbeat&quot; type=&quot;IPaddr2&quot;&gt;<br>        &lt;instance_attributes id=&quot;VIRTUAL-IP-instance_attributes&quot;&gt;<br>          &lt;nvpair name=&quot;ip&quot; value=&quot;10.0.0.44&quot; id=&quot;VIRTUAL-IP-instance_attributes-ip&quot;/&gt;<br>        &lt;/instance_attributes&gt;<br>        &lt;operations&gt;<br>          &lt;op name=&quot;monitor&quot; timeout=&quot;20s&quot; interval=&quot;10s&quot; id=&quot;VIRTUAL-IP-monitor-10s&quot;/&gt;<br>        &lt;/operations&gt;<br>        &lt;meta_attributes id=&quot;VIRTUAL-IP-meta_attributes&quot;&gt;<br>          &lt;nvpair name=&quot;is-managed&quot; value=&quot;true&quot; id=&quot;VIRTUAL-IP-meta_attributes-is-managed&quot;/&gt;<br>          &lt;nvpair name=&quot;target-role&quot; value=&quot;Started&quot; id=&quot;VIRTUAL-IP-meta_attributes-target-role&quot;/&gt;<br>        &lt;/meta_attributes&gt;<br>      &lt;/primitive&gt;<br>    &lt;/resources&gt;<br>    &lt;constraints&gt;<br>      &lt;rsc_location id=&quot;LOC_STONITH_server01&quot; rsc=&quot;STONITH-server01&quot; score=&quot;INFINITY&quot; node=&quot;server02&quot;/&gt;<br>      &lt;rsc_location id=&quot;LOC_STONITH_server02&quot; rsc=&quot;STONITH-server02&quot; score=&quot;INFINITY&quot; node=&quot;server01&quot;/&gt;<br>    &lt;/constraints&gt;<br>    &lt;rsc_defaults&gt;<br>      &lt;meta_attributes id=&quot;rsc-options&quot;&gt;<br>        &lt;nvpair name=&quot;migration-threshold&quot; value=&quot;5000&quot; id=&quot;rsc-options-migration-threshold&quot;/&gt;<br>        &lt;nvpair name=&quot;resource-stickiness&quot; value=&quot;1000&quot; id=&quot;rsc-options-resource-stickiness&quot;/&gt;<br>      &lt;/meta_attributes&gt;<br>    &lt;/rsc_defaults&gt;<br>    &lt;op_defaults&gt;<br>      &lt;meta_attributes id=&quot;op-options&quot;&gt;<br>        &lt;nvpair name=&quot;timeout&quot; value=&quot;600&quot; id=&quot;op-options-timeout&quot;/&gt;<br>        &lt;nvpair name=&quot;record-pending&quot; value=&quot;false&quot; id=&quot;op-options-record-pending&quot;/&gt;<br>      &lt;/meta_attributes&gt;<br>    &lt;/op_defaults&gt;<br>  &lt;/configuration&gt;<br>&lt;/cib&gt;<br><br></div><div class="gmail_extra"><br><div class="gmail_quote">On Wed, Mar 9, 2016 at 1:25 PM, emmanuel segura <span dir="ltr">&lt;<a href="mailto:emi2fast@gmail.com" target="_blank">emi2fast@gmail.com</a>&gt;</span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">I think you should give the parameters to the stonith agent, anyway<br>
show your config.<br>
<div><div class="h5"><br>
2016-03-09 5:29 GMT+01:00 vija ar &lt;<a href="mailto:vjav78@gmail.com">vjav78@gmail.com</a>&gt;:<br>
&gt; I have configured SLEHA cluster on cisco ucs boxes with ipmi configured, i<br>
&gt; have tested IPMI using impitool, however ipmitool to function neatly i have<br>
&gt; to pass parameter -y i.e. &lt;hex key&gt; along with username and password,<br>
&gt;<br>
&gt; however  to configure stonith there is no parameter in pacemaker to pass<br>
&gt; &lt;hex key&gt;? and due to which stonith is failing<br>
&gt;<br>
&gt; can you please let me know if there is any way to add it or is this a bug?<br>
&gt;<br>
&gt; *******************<br>
&gt;<br>
&gt;<br>
&gt;<br>
&gt; Mar  9 00:26:28 server02 stonith: external_status: &#39;ipmi status&#39; failed with<br>
&gt; rc 1<br>
&gt; Mar  9 00:26:28 server02 stonith: external/ipmi device not accessible.<br>
&gt; Mar  9 00:26:28 server02 stonith-ng[99114]:   notice: log_operation:<br>
&gt; Operation &#39;monitor&#39; [99200] for device &#39;STONITH-server02&#39; returned: -201<br>
&gt; (Generic Pacemaker error)<br>
&gt; Mar  9 00:26:28 server02 stonith-ng[99114]:  warning: log_operation:<br>
&gt; STONITH-server02:99200 [ Performing: stonith -t external/ipmi -S ]<br>
&gt; Mar  9 00:26:28 server02 stonith-ng[99114]:  warning: log_operation:<br>
&gt; STONITH-server02:99200 [ logd is not runningfailed:  1 ]<br>
&gt; Mar  9 00:26:28 server02 crmd[99118]:    error: process_lrm_event: LRM<br>
&gt; operation STONITH-server02_start_0 (call=13, status=4, cib-update=13,<br>
&gt; confirmed=true) Error<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: fail-count-STONITH-server02 (INFINITY)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 35: fail-count-STONITH-server02=INFINITY<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: last-failure-STONITH-server02<br>
&gt; (1457463388)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 37: last-failure-STONITH-server02=1457463388<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: fail-count-STONITH-server02 (INFINITY)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 39: fail-count-STONITH-server02=INFINITY<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: last-failure-STONITH-server02<br>
&gt; (1457463388)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 41: last-failure-STONITH-server02=1457463388<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: fail-count-STONITH-server02 (INFINITY)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 43: fail-count-STONITH-server02=INFINITY<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: last-failure-STONITH-server02<br>
&gt; (1457463388)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 45: last-failure-STONITH-server02=1457463388<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: fail-count-STONITH-server02 (INFINITY)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 47: fail-count-STONITH-server02=INFINITY<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_cs_dispatch: Update<br>
&gt; relayed from server01<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: last-failure-STONITH-server02<br>
&gt; (1457463388)<br>
&gt; Mar  9 00:26:28 server02 attrd[99116]:   notice: attrd_perform_update: Sent<br>
&gt; update 49: last-failure-STONITH-server02=1457463388<br>
&gt; Mar  9 00:26:28 server02 crmd[99118]:   notice: process_lrm_event: LRM<br>
&gt; operation STONITH-server02_stop_0 (call=14, rc=0, cib-update=14,<br>
&gt; confirmed=true) ok<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: status_from_rc: Action 9<br>
&gt; (STONITH-server02_start_0) on server02 failed (target: 0 vs. rc: 1): Error<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server02 on server02 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server02 on server02 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: status_from_rc: Action 9<br>
&gt; (STONITH-server02_start_0) on server02 failed (target: 0 vs. rc: 1): Error<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server02 on server02 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server02 on server02 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 stonith: external_status: &#39;ipmi status&#39; failed with<br>
&gt; rc 1<br>
&gt; Mar  9 00:26:28 server01 stonith: external/ipmi device not accessible.<br>
&gt; Mar  9 00:26:28 server01 stonith-ng[16805]:   notice: log_operation:<br>
&gt; Operation &#39;monitor&#39; [16891] for device &#39;STONITH-server01&#39; returned: -201<br>
&gt; (Generic Pacemaker error)<br>
&gt; Mar  9 00:26:28 server01 stonith-ng[16805]:  warning: log_operation:<br>
&gt; STONITH-server01:16891 [ Performing: stonith -t external/ipmi -S ]<br>
&gt; Mar  9 00:26:28 server01 stonith-ng[16805]:  warning: log_operation:<br>
&gt; STONITH-server01:16891 [ logd is not runningfailed:  1 ]<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:    error: process_lrm_event: LRM<br>
&gt; operation STONITH-server01_start_0 (call=13, status=4, cib-update=49,<br>
&gt; confirmed=true) Error<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: status_from_rc: Action 7<br>
&gt; (STONITH-server01_start_0) on server01 failed (target: 0 vs. rc: 1): Error<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server01 on server01 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: fail-count-STONITH-server01 (INFINITY)<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server01 on server01 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: status_from_rc: Action 7<br>
&gt; (STONITH-server01_start_0) on server01 failed (target: 0 vs. rc: 1): Error<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server01 on server01 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:  warning: update_failcount: Updating<br>
&gt; failcount for STONITH-server01 on server01 after failed start: rc=1<br>
&gt; (update=INFINITY, time=1457463388)<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_perform_update: Sent<br>
&gt; update 47: fail-count-STONITH-server01=INFINITY<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:   notice: run_graph: Transition 3<br>
&gt; (Complete=5, Pending=0, Fired=0, Skipped=2, Incomplete=0,<br>
&gt; Source=/var/lib/pacemaker/pengine/pe-input-70.bz2): Stopped<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: last-failure-STONITH-server01<br>
&gt; (1457463388)<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_perform_update: Sent<br>
&gt; update 49: last-failure-STONITH-server01=1457463388<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: fail-count-STONITH-server01 (INFINITY)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: unpack_config: On loss of<br>
&gt; CCM Quorum: Ignore<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server02 away from server01 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server01 away from server02 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server02 away from server02 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: LogActions: Recover<br>
&gt; STONITH-server01    (Started server01)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: LogActions: Stop<br>
&gt; STONITH-server02    (server02)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: process_pe_message:<br>
&gt; Calculated Transition 4: /var/lib/pacemaker/pengine/pe-input-71.bz2<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_perform_update: Sent<br>
&gt; update 51: fail-count-STONITH-server01=INFINITY<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: last-failure-STONITH-server01<br>
&gt; (1457463388)<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_perform_update: Sent<br>
&gt; update 53: last-failure-STONITH-server01=1457463388<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: fail-count-STONITH-server01 (INFINITY)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: unpack_config: On loss of<br>
&gt; CCM Quorum: Ignore<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server01 away from server01 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server02 away from server01 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server01 away from server02 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server02 away from server02 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: LogActions: Stop<br>
&gt; STONITH-server01    (server01)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: LogActions: Stop<br>
&gt; STONITH-server02    (server02)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: process_pe_message:<br>
&gt; Calculated Transition 5: /var/lib/pacemaker/pengine/pe-input-72.bz2<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_perform_update: Sent<br>
&gt; update 55: fail-count-STONITH-server01=INFINITY<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_trigger_update:<br>
&gt; Sending flush op to all hosts for: last-failure-STONITH-server01<br>
&gt; (1457463388)<br>
&gt; Mar  9 00:26:28 server01 attrd[16807]:   notice: attrd_perform_update: Sent<br>
&gt; update 57: last-failure-STONITH-server01=1457463388<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: unpack_config: On loss of<br>
&gt; CCM Quorum: Ignore<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server01: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server02 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: unpack_rsc_op_failure:<br>
&gt; Processing failed op start for STONITH-server01 on server02: unknown error<br>
&gt; (1)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server01 away from server01 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server02 away from server01 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server01 away from server02 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:  warning: common_apply_stickiness:<br>
&gt; Forcing STONITH-server02 away from server02 after 1000000 failures (max=3)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: LogActions: Stop<br>
&gt; STONITH-server01    (server01)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: LogActions: Stop<br>
&gt; STONITH-server02    (server02)<br>
&gt; Mar  9 00:26:28 server01 pengine[16808]:   notice: process_pe_message:<br>
&gt; Calculated Transition 6: /var/lib/pacemaker/pengine/pe-input-73.bz2<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:   notice: do_te_invoke: Processing<br>
&gt; graph 6 (ref=pe_calc-dc-1457463388-32) derived from<br>
&gt; /var/lib/pacemaker/pengine/pe-input-73.bz2<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:   notice: te_rsc_command: Initiating<br>
&gt; action 1: stop STONITH-server01_stop_0 on server01 (local)<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:   notice: te_rsc_command: Initiating<br>
&gt; action 2: stop STONITH-server02_stop_0 on server02<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:   notice: process_lrm_event: LRM<br>
&gt; operation STONITH-server01_stop_0 (call=14, rc=0, cib-update=55,<br>
&gt; confirmed=true) ok<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:   notice: run_graph: Transition 6<br>
&gt; (Complete=3, Pending=0, Fired=0, Skipped=0, Incomplete=0,<br>
&gt; Source=/var/lib/pacemaker/pengine/pe-input-73.bz2): Complete<br>
&gt; Mar  9 00:26:28 server01 crmd[16809]:   notice: do_state_transition: State<br>
&gt; transition S_TRANSITION_ENGINE -&gt; S_IDLE [ input=I_TE_SUCCESS<br>
&gt; cause=C_FSA_INTERNAL origin=notify_crmd ]<br>
&gt;<br>
&gt;<br>
&gt;<br>
</div></div>&gt; _______________________________________________<br>
&gt; Users mailing list: <a href="mailto:Users@clusterlabs.org">Users@clusterlabs.org</a><br>
&gt; <a href="http://clusterlabs.org/mailman/listinfo/users" rel="noreferrer" target="_blank">http://clusterlabs.org/mailman/listinfo/users</a><br>
&gt;<br>
&gt; Project Home: <a href="http://www.clusterlabs.org" rel="noreferrer" target="_blank">http://www.clusterlabs.org</a><br>
&gt; Getting started: <a href="http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf" rel="noreferrer" target="_blank">http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf</a><br>
&gt; Bugs: <a href="http://bugs.clusterlabs.org" rel="noreferrer" target="_blank">http://bugs.clusterlabs.org</a><br>
&gt;<br>
<br>
<br>
<br>
--<br>
  .~.<br>
  /V\<br>
 //  \\<br>
/(   )\<br>
^`~&#39;^<br>
<br>
_______________________________________________<br>
Users mailing list: <a href="mailto:Users@clusterlabs.org">Users@clusterlabs.org</a><br>
<a href="http://clusterlabs.org/mailman/listinfo/users" rel="noreferrer" target="_blank">http://clusterlabs.org/mailman/listinfo/users</a><br>
<br>
Project Home: <a href="http://www.clusterlabs.org" rel="noreferrer" target="_blank">http://www.clusterlabs.org</a><br>
Getting started: <a href="http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf" rel="noreferrer" target="_blank">http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf</a><br>
Bugs: <a href="http://bugs.clusterlabs.org" rel="noreferrer" target="_blank">http://bugs.clusterlabs.org</a><br>
</blockquote></div><br></div>