Apr 12 14:04:32 node1 cib: [2469]: debug: cib_common_callback_worker: Setting cib_diff_notify callbacks for 3442 (fa5456f0-2078-4dbb-8a73-4e687cc23f98): off
Apr 12 14:04:32 node1 cib: [2469]: debug: cib_common_callback_worker: Setting cib_diff_notify callbacks for 3442 (fa5456f0-2078-4dbb-8a73-4e687cc23f98): on
Apr 12 14:05:04 node1 mgmtd: [2474]: debug: recv msg: login hacluster **** 2.0
Apr 12 14:05:04 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:04 node1 mgmtd: [2474]: debug: recv msg: regevt#012evt:cib_changed
Apr 12 14:05:04 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:04 node1 mgmtd: [2474]: debug: recv msg: regevt#012evt:disconnected
Apr 12 14:05:04 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:05 node1 mgmtd: [2474]: debug: recv msg: cib_query#012cib
Apr 12 14:05:05 node1 mgmtd: [2474]: info: CIB query: cib
Apr 12 14:05:05 node1 mgmtd: [2474]: debug: send msg: ok#012<cib validate-with="pacemaker-1.0" crm_feature_set="3.0.1" have-quorum="1" admin_epoch="0" epoch="246" dc-uuid="node3" num_updates="49">#012  <configuration>#012    <crm_config>#012      <cluster_property_set id="cib-bootstrap-options">#012        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.0.8-9881a7350d6182bae9e8e557cf20a3cc5dac3ee7"/>#012        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="openais"/>#012        <nvpair id="cib-bootstrap-options-expected-quorum-votes" name="expected-quorum-votes" value="4"/>#012        <nvpair id="nvpair-2c777244-5613-4220-95bb-b1fcb861f961" name="node-health-red" value="0"/>#012        <nvpair id="nvpair-32d1de32-4963-41c1-b590-ea4045e1640c" name="stonith-enabled" value="false"/>#012        <nvpair name="last-lrm-refresh" id="cib-bootstrap-options-last-lrm-refresh" value="1271073063"/>#012      </cluster_property_set>#012    </crm_config>#012    <nodes>#012      <node id="node1" type="normal" uname="node1">#012        <instance_attributes id="nodes-node1">#012          <nvpair id="standby-node1" name="standby" value="false"/>#012        </instance_attributes>#012      </node>#012      <node id="node2" uname="node2" type="normal"/>#012      <node id="node3" uname="node3" type="normal"/>#012      <node id="node4" uname="node4" type="normal"/>#012    </nodes>#012    <resources>#012      <clone id="dlm-clone">#012        <meta_attributes id="dlm-clone-meta_attributes">#012          <nvpair id="dlm-clone-meta_attributes-interleave" name="interleave" value="true"/>#012          <nvpair name="target-role" id="dlm-clone-meta_attributes-target-role" value="stopped"/>#012        </meta_attributes>#012        <primitive class="ocf" provider="pacemaker" type="controld" id="dlm">#012          <operations>#012            <op id="dlm-monitor-120s" interval="120s" name="monitor"/>#012          </operations>#012          <meta_attributes id="dlm-meta_attribut
Apr 12 14:05:05 node1 mgmtd: [2474]: debug: recv msg: crm_schema#012pacemaker-1.0#012pacemaker-1.0.rng
Apr 12 14:05:06 node1 mgmtd: [2474]: debug: send msg: ok#012<?xml version="1.0" encoding="utf-8"?>#012<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->#012<grammar xmlns="http://relaxng.org/ns/structure/1.0" #012         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">#012  <start>#012    <element name="cib">#012      <ref name="element-cib"/>#012    </element>#012  </start>#012#012  <define name="element-cib">#012    <ref name="attribute-options"/>#012    <element name="configuration">#012      <interleave>#012#011<element name="crm_config">#012#011  <zeroOrMore>#012#011    <element name="cluster_property_set">#012#011      <externalRef href="nvset-1.0.rng"/>#012#011    </element>#012#011  </zeroOrMore>#012#011</element>#012#011<optional>#012#011  <element name="rsc_defaults">#012#011    <zeroOrMore>#012#011      <element name="meta_attributes">#012#011#011<externalRef href="nvset-1.0.rng"/>#012#011      </element>#012#011    </zeroOrMore>#012#011  </element>#012#011</optional>#012#011<optional>#012#011  <element name="op_defaults">#012#011    <zeroOrMore>#012#011      <element name="meta_attributes">#012#011#011<externalRef href="nvset-1.0.rng"/>#012#011      </element>#012#011    </zeroOrMore>#012#011  </element>#012#011</optional>#012#011<ref name="element-nodes"/>#012#011<element name="resources">#012#011  <externalRef href="resources-1.0.rng"/>#012#011</element>#012#011<element name="constraints">#012#011  <externalRef href="constraints-1.0.rng"/>#012#011</element>#012      </interleave>#012    </element>#012    <element name="status">#012      <ref name="element-status"/>#012    </element>#012  </define>#012#012  <define name="attribute-options">#012    <attribute name="validate-with">#012      <choice>#012#011<value>none</value>#012#011<value>pacemaker-0.6</value>#012#011<value>transitional-0.6</value>#012#011<value>pacemaker-0.7</value>#012#011<value>pacemaker-1.0</value>#012      </choice>#012    </attribute>#012    <optional>#012      <attribute name="crm_feature_set"><text/></attri
Apr 12 14:05:06 node1 mgmtd: [2474]: debug: recv msg: crm_schema#012pacemaker-1.0#012nvset-1.0.rng
Apr 12 14:05:07 node1 mgmtd: [2474]: debug: send msg: ok#012<?xml version="1.0" encoding="utf-8"?>#012<!-- types: http://www.w3.org/TR/xmlschema-2/#dateTime -->#012<grammar xmlns="http://relaxng.org/ns/structure/1.0" #012         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">#012  <start>#012    <ref name="element-nvset"/>#012  </start>#012#012  <define name="element-nvset">#012   <choice>#012   <attribute name="id-ref"><data type="IDREF"/></attribute>#012   <group>#012    <attribute name="id"><data type="ID"/></attribute>#012    <interleave>#012      <optional>#012#011<externalRef href="rule-1.0.rng"/>#012      </optional>#012#011<zeroOrMore>#012#011  <element name="nvpair">#012#011    <attribute name="id"><data type="ID"/></attribute>#012#011    <attribute name="name"><text/></attribute>#012#011    <optional>#012#011      <attribute name="value"><text/></attribute>#012#011    </optional>#012#011  </element>#012#011</zeroOrMore>#012      <optional>#012#011<externalRef href="score.rng"/>#012      </optional>#012    </interleave>#012   </group>#012   </choice>#012  </define>#012#012</grammar>
Apr 12 14:05:07 node1 mgmtd: [2474]: debug: recv msg: crm_schema#012pacemaker-1.0#012rule-1.0.rng
Apr 12 14:05:08 node1 mgmtd: [2474]: debug: send msg: ok#012<?xml version="1.0" encoding="utf-8"?>#012<grammar xmlns="http://relaxng.org/ns/structure/1.0" #012#011 xmlns:ann="http://relaxng.org/ns/compatibility/annotations/1.0"#012         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">#012  <start>#012      <ref name="element-rule"/>#012  </start>#012#012  <define name="element-rule">#012    <element name="rule">#012     <choice>#012     <attribute name="id-ref"><data type="IDREF"/></attribute>#012     <group>#012      <attribute name="id"><data type="ID"/></attribute>#012      <choice>#012#011<externalRef href="score.rng"/>#012#011<attribute name="score-attribute"><text/></attribute>#012      </choice>#012      <optional>#012#011<attribute name="boolean-op">#012#011  <choice>#012#011    <value>or</value>#012#011    <value>and</value>#012#011  </choice>#012#011</attribute>#012      </optional>#012      <optional>#012#011<attribute name="role"><text/></attribute>#012      </optional>#012      <oneOrMore>#012#011<choice>#012          <element name="expression">#012#011    <attribute name="id"><data type="ID"/></attribute>#012#011    <attribute name="attribute"><text/></attribute>#012#011    <attribute name="operation">#012#011      <choice>#012#011#011<value>lt</value>#012#011#011<value>gt</value>#012#011#011<value>lte</value>#012#011#011<value>gte</value>#012#011#011<value>eq</value>#012#011#011<value>ne</value>#012#011#011<value>defined</value>#012#011#011<value>not_defined</value>#012#011      </choice>#012#011    </attribute>#012#011    <optional>#012#011      <attribute name="value"><text/></attribute>#012#011    </optional>#012#011    <optional>#012#011      <attribute name="type" ann:defaultValue="string">#012#011#011<choice>#012#011#011  <value>string</value>#012#011#011  <value>number</value>#012#011#011  <value>version</value>#012#011#011</choice>#012#011      </attribute>#012#011    </optional>#012#011  </element>#012          <element name="date_expression">#012#011    <attribute name="id">
Apr 12 14:05:08 node1 mgmtd: [2474]: debug: recv msg: crm_schema#012pacemaker-1.0#012score.rng
Apr 12 14:05:09 node1 mgmtd: [2474]: debug: send msg: ok#012<?xml version="1.0" encoding="utf-8"?>#012<grammar xmlns="http://relaxng.org/ns/structure/1.0" #012         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">#012  <start>#012      <ref name="attribute-score"/>#012  </start>#012#012  <define name="attribute-score">#012    <attribute name="score">#012      <choice>#012#011<data type="integer"/>#012#011<value>INFINITY</value>#012#011<value>+INFINITY</value>#012#011<value>-INFINITY</value>#012      </choice>#012    </attribute>#012  </define>#012</grammar>
Apr 12 14:05:09 node1 mgmtd: [2474]: debug: recv msg: crm_schema#012pacemaker-1.0#012resources-1.0.rng
Apr 12 14:05:10 node1 mgmtd: [2474]: debug: send msg: ok#012<?xml version="1.0" encoding="utf-8"?>#012<grammar xmlns="http://relaxng.org/ns/structure/1.0" #012         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">#012  <start>#012      <ref name="element-resources"/>#012  </start>#012#012  <define name="element-resources">#012      <zeroOrMore>#012#011<choice>#012#011  <ref name="element-primitive"/>#012#011  <ref name="element-group"/>#012#011  <ref name="element-clone"/>#012#011  <ref name="element-master"/>#012#011</choice>#012      </zeroOrMore>#012  </define>#012#012  <define name="element-primitive">#012    <element name="primitive">#012      <interleave>#012#011<attribute name="id"><data type="ID"/></attribute>#012#011<choice>#012#011  <group>#012#011    <attribute name="class"><value>ocf</value></attribute>#012#011    <attribute name="provider"><text/></attribute>#012#011  </group>#012#011  <attribute name="class">#012#011    <choice>#012#011      <value>lsb</value>#012#011      <value>heartbeat</value>#012#011      <value>stonith</value>#012#011    </choice>#012#011  </attribute>#012#011</choice>#012#011<attribute name="type"><text/></attribute>#012#011<optional>#012#011  <attribute name="description"><text/></attribute>#012#011</optional>#012#011<ref name="element-resource-extra"/>#012#011<ref name="element-operations"/>#012      </interleave>#012    </element>#012  </define>#012#012  <define name="element-group">#012    <element name="group">#012      <attribute name="id"><data type="ID"/></attribute>#012      <optional>#012#011<attribute name="description"><text/></attribute>#012      </optional>#012      <interleave>#012#011<ref name="element-resource-extra"/>#012#011<oneOrMore>#012#011  <ref name="element-primitive"/>#012#011</oneOrMore>#012      </interleave>#012    </element>#012  </define>#012 #012  <define name="element-clone">#012    <element name="clone">#012      <attribute name="id"><data type="ID"/></attribute>#012      <optional>#012#011<attribute name="description"><text/></
Apr 12 14:05:10 node1 mgmtd: [2474]: debug: recv msg: crm_schema#012pacemaker-1.0#012constraints-1.0.rng
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012<?xml version="1.0" encoding="utf-8"?>#012<grammar xmlns="http://relaxng.org/ns/structure/1.0" #012         datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">#012  <start>#012      <ref name="element-constraints"/>#012  </start>#012#012  <define name="element-constraints">#012      <zeroOrMore>#012#011<choice>#012#011  <ref name="element-location"/>#012#011  <ref name="element-colocation"/>#012#011  <ref name="element-order"/>#012#011</choice>#012      </zeroOrMore>#012  </define>#012#012  <define name="element-location">#012    <element name="rsc_location">#012      <attribute name="id"><data type="ID"/></attribute>#012      <attribute name="rsc"><data type="IDREF"/></attribute>#012      <choice>#012#011<group>#012#011  <externalRef href="score.rng"/>#012#011  <attribute name="node"><text/></attribute>#012#011</group>#012#011<oneOrMore>#012#011  <externalRef href="rule-1.0.rng"/>#012#011</oneOrMore>#012      </choice>#012      <optional>#012#011<ref name="element-lifetime"/>#012      </optional>#012    </element>#012  </define>#012#012  <define name="element-resource-set">#012    <element name="resource_set">#012      <attribute name="id"><data type="ID"/></attribute>#012      <optional>#012#011<attribute name="sequential"><data type="boolean"/></attribute>#012      </optional>#012      <optional>#012#011<attribute name="action">#012#011  <ref name="attribute-actions"/>#012#011</attribute>#012      </optional>#012      <optional>#012#011<attribute name="role">#012#011  <ref name="attribute-roles"/>#012#011</attribute>#012      </optional>#012      <optional>#012#011<externalRef href="score.rng"/>#012      </optional>#012      <oneOrMore>#012#011<element name="resource_ref">#012#011  <attribute name="id"><data type="IDREF"/></attribute>#012#011</element>#012      </oneOrMore>#012    </element>#012  </define>#012#012  <define name="element-colocation">#012    <element name="rsc_colocation">#012      <attribute name="id"><data type="ID"/></attribu
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: all_nodes
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: fail
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: crm_nodes
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node1#012node2#012node3#012node4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: active_nodes
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node1#012node2#012node3#012node4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: cluster_type
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012openais
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: dc
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: node_config#012node1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node1#012True#012False#012False#012False#012True#012False#012member
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: node_config#012node2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node2#012True#012False#012False#012False#012True#012False#012member
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: node_config#012node3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node3#012True#012False#012False#012False#012True#012True#012member
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: node_config#012node4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node4#012True#012False#012False#012False#012True#012False#012member
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: all_rsc
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012dlm-clone#012o2cb-clone#012rsa-fencing-1#012rsa-fencing-2#012rsa-fencing-3#012rsa-fencing-4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm-clone
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012clone
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012dlm-clone
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012dlm:0#012dlm:1#012dlm:2#012dlm:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:0
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:0
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:0
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb-clone
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012clone
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012o2cb-clone
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012o2cb:0#012o2cb:1#012o2cb:2#012o2cb:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:0
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:0
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:0
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node1
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-2
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-3
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-4
Apr 12 14:05:11 node1 mgmtd: [2474]: debug: send msg: ok#012node1
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012dlm:0
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012dlm:1
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012dlm:2
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012dlm:3
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:40 node1 mgmtd: [2474]: debug: recv msg: get_rsc_attr#012dlm#012meta#012target-role
Apr 12 14:05:40 node1 crm_resource: [3446]: info: Invoked: crm_resource --meta -r dlm -g target-role 
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: init_client_ipc_comms_nodispatch: Attempting to talk on: /var/run/crm/cib_rw
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: init_client_ipc_comms_nodispatch: Attempting to talk on: /var/run/crm/cib_callback
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cib_native_signon_raw: Connection to CIB successful
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'stop' for cluster option 'no-quorum-policy'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'true' for cluster option 'symmetric-cluster'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '0' for cluster option 'default-resource-stickiness'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'true' for cluster option 'is-managed-default'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'false' for cluster option 'maintenance-mode'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'true' for cluster option 'start-failure-is-fatal'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'reboot' for cluster option 'stonith-action'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '60s' for cluster option 'stonith-timeout'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'true' for cluster option 'startup-fencing'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '60s' for cluster option 'cluster-delay'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '30' for cluster option 'batch-limit'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '20s' for cluster option 'default-action-timeout'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'false' for cluster option 'stop-all-resources'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'true' for cluster option 'stop-orphan-resources'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'true' for cluster option 'stop-orphan-actions'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'false' for cluster option 'remove-after-stop'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '-1' for cluster option 'pe-error-series-max'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '-1' for cluster option 'pe-warn-series-max'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '-1' for cluster option 'pe-input-series-max'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value 'none' for cluster option 'node-health-strategy'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '0' for cluster option 'node-health-green'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cluster_option: Using default value '0' for cluster option 'node-health-yellow'
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_config: STONITH timeout: 60000
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_config: STONITH of failed nodes is disabled
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_config: Stop all active resources: false
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_config: Cluster is symmetric - resources can run anywhere by default
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_config: Default stickiness: 0
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_config: On loss of CCM Quorum: Stop ALL resources
Apr 12 14:05:40 node1 crm_resource: [3446]: info: unpack_config: Node scores: 'red' = 0, 'yellow' = 0, 'green' = 0
Apr 12 14:05:40 node1 crm_resource: [3446]: info: determine_online_status: Node node2 is online
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_rsc_op: rsa-fencing-4_start_0 on node2 returned 1 (unknown error) instead of the expected value: 0 (ok)
Apr 12 14:05:40 node1 crm_resource: [3446]: WARN: unpack_rsc_op: Processing failed op rsa-fencing-4_start_0 on node2: unknown error (1)
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_rsc_op: rsa-fencing-1_start_0 on node2 returned 1 (unknown error) instead of the expected value: 0 (ok)
Apr 12 14:05:40 node1 crm_resource: [3446]: WARN: unpack_rsc_op: Processing failed op rsa-fencing-1_start_0 on node2: unknown error (1)
Apr 12 14:05:40 node1 crm_resource: [3446]: info: determine_online_status: Node node3 is online
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:40 node1 crm_resource: [3446]: info: determine_online_status: Node node4 is online
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:40 node1 crm_resource: [3446]: info: determine_online_status: Node node1 is online
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: dump_resource_attr: Looking up target-role in dlm:0
Apr 12 14:05:40 node1 crm_resource: [3446]: debug: cib_native_signoff: Signing out of the CIB Service
Apr 12 14:05:41 node1 mgmtd: [2474]: debug: send msg: ok#012stopped
Apr 12 14:05:52 node1 mgmtd: [2474]: debug: recv msg: del_rsc_attr#012dlm#012meta#012target-role
Apr 12 14:05:52 node1 crm_resource: [3448]: info: Invoked: crm_resource --meta -r dlm -d target-role 
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: init_client_ipc_comms_nodispatch: Attempting to talk on: /var/run/crm/cib_rw
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: init_client_ipc_comms_nodispatch: Attempting to talk on: /var/run/crm/cib_callback
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cib_native_signon_raw: Connection to CIB successful
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'stop' for cluster option 'no-quorum-policy'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'true' for cluster option 'symmetric-cluster'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '0' for cluster option 'default-resource-stickiness'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'true' for cluster option 'is-managed-default'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'false' for cluster option 'maintenance-mode'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'true' for cluster option 'start-failure-is-fatal'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'reboot' for cluster option 'stonith-action'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '60s' for cluster option 'stonith-timeout'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'true' for cluster option 'startup-fencing'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '60s' for cluster option 'cluster-delay'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '30' for cluster option 'batch-limit'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '20s' for cluster option 'default-action-timeout'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'false' for cluster option 'stop-all-resources'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'true' for cluster option 'stop-orphan-resources'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'true' for cluster option 'stop-orphan-actions'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'false' for cluster option 'remove-after-stop'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '-1' for cluster option 'pe-error-series-max'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '-1' for cluster option 'pe-warn-series-max'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '-1' for cluster option 'pe-input-series-max'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value 'none' for cluster option 'node-health-strategy'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '0' for cluster option 'node-health-green'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cluster_option: Using default value '0' for cluster option 'node-health-yellow'
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_config: STONITH timeout: 60000
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_config: STONITH of failed nodes is disabled
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_config: Stop all active resources: false
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_config: Cluster is symmetric - resources can run anywhere by default
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_config: Default stickiness: 0
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_config: On loss of CCM Quorum: Stop ALL resources
Apr 12 14:05:52 node1 crm_resource: [3448]: info: unpack_config: Node scores: 'red' = 0, 'yellow' = 0, 'green' = 0
Apr 12 14:05:52 node1 crm_resource: [3448]: info: determine_online_status: Node node2 is online
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_rsc_op: rsa-fencing-4_start_0 on node2 returned 1 (unknown error) instead of the expected value: 0 (ok)
Apr 12 14:05:52 node1 crm_resource: [3448]: WARN: unpack_rsc_op: Processing failed op rsa-fencing-4_start_0 on node2: unknown error (1)
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_rsc_op: rsa-fencing-1_start_0 on node2 returned 1 (unknown error) instead of the expected value: 0 (ok)
Apr 12 14:05:52 node1 crm_resource: [3448]: WARN: unpack_rsc_op: Processing failed op rsa-fencing-1_start_0 on node2: unknown error (1)
Apr 12 14:05:52 node1 crm_resource: [3448]: info: determine_online_status: Node node3 is online
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:52 node1 crm_resource: [3448]: info: determine_online_status: Node node4 is online
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:52 node1 crm_resource: [3448]: info: determine_online_status: Node node1 is online
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:52 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/configuration/resources//*[@id="dlm"]//meta_attributes//nvpair[@name="target-role"] does not exist
Apr 12 14:05:52 node1 crm_resource: [3448]: debug: cib_native_signoff: Signing out of the CIB Service
Apr 12 14:05:53 node1 mgmtd: [2474]: debug: send msg: fail
Apr 12 14:05:53 node1 mgmtd: [2474]: debug: recv msg: set_rsc_attr#012dlm-clone#012meta#012target-role#012started
Apr 12 14:05:53 node1 crm_resource: [3450]: info: Invoked: crm_resource --meta -r dlm-clone -p target-role -v started 
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: init_client_ipc_comms_nodispatch: Attempting to talk on: /var/run/crm/cib_rw
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: init_client_ipc_comms_nodispatch: Attempting to talk on: /var/run/crm/cib_callback
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cib_native_signon_raw: Connection to CIB successful
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'stop' for cluster option 'no-quorum-policy'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'true' for cluster option 'symmetric-cluster'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '0' for cluster option 'default-resource-stickiness'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'true' for cluster option 'is-managed-default'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'false' for cluster option 'maintenance-mode'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'true' for cluster option 'start-failure-is-fatal'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'reboot' for cluster option 'stonith-action'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '60s' for cluster option 'stonith-timeout'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'true' for cluster option 'startup-fencing'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '60s' for cluster option 'cluster-delay'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '30' for cluster option 'batch-limit'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '20s' for cluster option 'default-action-timeout'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'false' for cluster option 'stop-all-resources'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'true' for cluster option 'stop-orphan-resources'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'true' for cluster option 'stop-orphan-actions'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'false' for cluster option 'remove-after-stop'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '-1' for cluster option 'pe-error-series-max'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '-1' for cluster option 'pe-warn-series-max'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '-1' for cluster option 'pe-input-series-max'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value 'none' for cluster option 'node-health-strategy'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '0' for cluster option 'node-health-green'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cluster_option: Using default value '0' for cluster option 'node-health-yellow'
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_config: STONITH timeout: 60000
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_config: STONITH of failed nodes is disabled
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_config: Stop all active resources: false
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_config: Cluster is symmetric - resources can run anywhere by default
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_config: Default stickiness: 0
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_config: On loss of CCM Quorum: Stop ALL resources
Apr 12 14:05:53 node1 crm_resource: [3450]: info: unpack_config: Node scores: 'red' = 0, 'yellow' = 0, 'green' = 0
Apr 12 14:05:53 node1 crm_resource: [3450]: info: determine_online_status: Node node2 is online
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_rsc_op: rsa-fencing-4_start_0 on node2 returned 1 (unknown error) instead of the expected value: 0 (ok)
Apr 12 14:05:53 node1 crm_resource: [3450]: WARN: unpack_rsc_op: Processing failed op rsa-fencing-4_start_0 on node2: unknown error (1)
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_rsc_op: rsa-fencing-1_start_0 on node2 returned 1 (unknown error) instead of the expected value: 0 (ok)
Apr 12 14:05:53 node1 crm_resource: [3450]: WARN: unpack_rsc_op: Processing failed op rsa-fencing-1_start_0 on node2: unknown error (1)
Apr 12 14:05:53 node1 crm_resource: [3450]: info: determine_online_status: Node node3 is online
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:53 node1 crm_resource: [3450]: info: determine_online_status: Node node4 is online
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:53 node1 crm_resource: [3450]: info: determine_online_status: Node node1 is online
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: unpack_lrm_rsc_state: dlm:0: Overwriting calculated next role Unknown with requested next role Stopped
Apr 12 14:05:53 node1 cib: [2469]: debug: cib_process_xpath: Processing cib_query op for //cib/configuration/resources//*[@id="dlm-clone"]//meta_attributes//nvpair[@name="target-role"] (/cib/configuration/resources/clone[1]/meta_attributes/nvpair[2])
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: log_data_element: find_resource_attr: Match <nvpair name="target-role" id="dlm-clone-meta_attributes-target-role" value="stopped" />
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: set_resource_attr: Found a match for name=target-role: id=dlm-clone-meta_attributes-target-role
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: log_data_element: set_resource_attr: Update <nvpair id="dlm-clone-meta_attributes-target-role" name="target-role" value="started" />
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering 11d to 11e
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 11e to pending delivery queue
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 11e
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 11e
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 11f
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering 11e to 11f
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 11f to pending delivery queue
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 120
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering 11f to 120
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 120 to pending delivery queue
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 120
Apr 12 14:05:53 node1 cib: [2469]: debug: activateCibXml: Triggering CIB write for cib_apply_diff op
Apr 12 14:05:53 node1 cib: [2469]: debug: Forking temp process write_cib_contents
Apr 12 14:05:53 node1 crm_resource: [3450]: debug: cib_native_signoff: Signing out of the CIB Service
Apr 12 14:05:53 node1 cib: [3451]: info: write_cib_contents: Archived previous version as /var/lib/heartbeat/crm/cib-11.raw
Apr 12 14:05:53 node1 cib: [3451]: info: write_cib_contents: Wrote version 0.247.0 of the CIB to disk (digest: 9cdd4eacddf49f756a788456405f3457)
Apr 12 14:05:53 node1 cib: [3451]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.lynhJZ (digest: /var/lib/heartbeat/crm/cib.YsxIl9)
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 121
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering 120 to 121
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 121 to pending delivery queue
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 122
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering 121 to 122
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 122 to pending delivery queue
Apr 12 14:05:53 node1 crmd: [2473]: info: do_lrm_rsc_op: Performing key=7:4:0:d672cd06-6fba-41c7-b684-ee542f69d22a op=dlm:0_start_0 )
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 122
Apr 12 14:05:53 node1 lrmd: [2470]: debug: on_msg_perform_op:2351: copying parameters for rsc dlm:0
Apr 12 14:05:53 node1 lrmd: [2470]: debug: on_msg_perform_op: add an operation operation start[10] on ocf::controld::dlm:0 for client 2473, its parameters: CRM_meta_clone_max=[4] crm_feature_set=[3.0.1] CRM_meta_timeout=[20000] CRM_meta_globally_unique=[false] CRM_meta_clone=[0]  to the operation list.
Apr 12 14:05:53 node1 lrmd: [2470]: info: rsc:dlm:0: start
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 123
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering 122 to 123
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 123 to pending delivery queue
Apr 12 14:05:53 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 123
Apr 12 14:05:53 node1 cib: [2469]: info: Managed write_cib_contents process 3451 exited with return code 0.
Apr 12 14:05:53 node1 lrmd: [2470]: info: RA output: (dlm:0:start:stderr) dlm_controld.pcmk: no process found
Apr 12 14:05:53 node1 kernel: [  545.119249] DLM (built Mar 17 2010 15:35:28) installed
Apr 12 14:05:53 node1 cluster-dlm[3472]: main: dlm_controld 1256632743#012
Apr 12 14:05:53 node1 corosync[2461]:   [pcmk  ] debug: process_ais_message: Msg[0] (dest=local:ais, from=node1:unknown.3472, remote=true, size=5): 3472
Apr 12 14:05:53 node1 corosync[2461]:   [pcmk  ] info: pcmk_notify: Enabling node notifications for child 3472 (0x67c6a0)
Apr 12 14:05:53 node1 cluster-dlm[3472]: setup_misc_devices: found /dev/misc/dlm-control minor 57#012
Apr 12 14:05:53 node1 cluster-dlm[3472]: setup_misc_devices: found /dev/misc/dlm-monitor minor 56#012
Apr 12 14:05:53 node1 cluster-dlm[3472]: setup_misc_devices: found /dev/misc/dlm_plock minor 55#012
Apr 12 14:05:53 node1 cluster-dlm[3472]: setup_monitor: /dev/misc/dlm-monitor fd 9#012
Apr 12 14:05:53 node1 cluster-dlm[3472]: update_comms_nodes: /sys/kernel/config/dlm/cluster/comms: opendir failed: 2#012
Apr 12 14:05:53 node1 cluster-dlm[3472]: clear_configfs_spaces: /sys/kernel/config/dlm/cluster/spaces: opendir failed: 2#012
Apr 12 14:05:53 node1 cluster-dlm[3472]: setup_cpg: daemon cpg_initialize error 6#012
Apr 12 14:05:53 node1 cluster-dlm[3472]: close_cpg: daemon cpg_leave error 9#012
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: fail
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 124
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 123 to 124
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 124 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 125
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 124 to 125
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 125 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 125
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 126
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 125 to 126
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 126 to pending delivery queue
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for fail-count-dlm:2
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 126
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 127
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 126 to 127
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 127 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 128
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 127 to 128
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 128 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 129
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 128 to 129
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 129 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 128
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 129
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 12a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 129 to 12a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 12a to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 12b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 12a to 12b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 12b to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 12b
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='fail-count-dlm:2'] does not exist
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update -22 for fail-count-dlm:2=(null) passed
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for last-failure-dlm:2
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='last-failure-dlm:2'] does not exist
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update -22 for last-failure-dlm:2=(null) passed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 12c
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 12b to 12c
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 12c to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 12d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 12c to 12d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 12d to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 12d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 12e
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 12d to 12e
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 12e to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 12f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 12e to 12f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 12f to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 12f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 130
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 12f to 130
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 130 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 131
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 130 to 131
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 131 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 130
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 132
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 131 to 132
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 132 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 133
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 132 to 133
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 133 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 131
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 134
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 133 to 134
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 134 to pending delivery queue
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for fail-count-dlm:1
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 135
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 134 to 135
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 135 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 133
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 135
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 136
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 135 to 136
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 136 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 137
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 136 to 137
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 137 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 137
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 138
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 137 to 138
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 138 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 139
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 138 to 139
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 139 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 13a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 139 to 13a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 13a to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 13b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 13a to 13b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 13b to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 139
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 13b
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='fail-count-dlm:1'] does not exist
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update -22 for fail-count-dlm:1=(null) passed
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 13c
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 13b to 13c
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for last-failure-dlm:1
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 13c to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 13d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 13c to 13d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 13d to pending delivery queue
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='last-failure-dlm:1'] does not exist
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 13e
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update -22 for last-failure-dlm:1=(null) passed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 13d to 13e
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 13e to pending delivery queue
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for fail-count-dlm:3
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='fail-count-dlm:3'] does not exist
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update -22 for fail-count-dlm:3=(null) passed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 13f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 13e to 13f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 13f to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 13e
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 13f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 140
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 13f to 140
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 140 to pending delivery queue
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for last-failure-dlm:3
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 141
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 140 to 141
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 141 to pending delivery queue
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='last-failure-dlm:3'] does not exist
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update -22 for last-failure-dlm:3=(null) passed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 141
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 142
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 141 to 142
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 142 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 143
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 142 to 143
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 143 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 143
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 144
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 143 to 144
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 144 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 145
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 144 to 145
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 145 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 145
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 146
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 145 to 146
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 146 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 147
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 146 to 147
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 147 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 147
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 lrmd: [2470]: info: RA output: (dlm:0:start:stderr) dlm_controld.pcmk: no process found
Apr 12 14:05:54 node1 lrmd: [2470]: WARN: Managed dlm:0:start process 3452 exited with return code 7.
Apr 12 14:05:54 node1 crmd: [2473]: info: process_lrm_event: LRM operation dlm:0_start_0 (call=10, rc=7, cib-update=16, confirmed=true) not running
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 147 to 149
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 148 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 149 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 148
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 149
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 149
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 14a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 149 to 14a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 14a to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 14b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 14a to 14b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 14b to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 14c
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 14b to 14c
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 14c to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 14d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 14c to 14d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 14d to pending delivery queue
Apr 12 14:05:54 node1 attrd: [2471]: info: attrd_ais_dispatch: Update relayed from node3
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 14b
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_local_callback: update message from node3: fail-count-dlm:0=INFINITY
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for fail-count-dlm:0
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_local_callback: Supplied: INFINITY, Current: (null), Stored: (null)
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_local_callback: New value of fail-count-dlm:0 is INFINITY
Apr 12 14:05:54 node1 attrd: [2471]: info: attrd_trigger_update: Sending flush op to all hosts for: fail-count-dlm:0 (INFINITY)
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 14d
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='fail-count-dlm:0'] does not exist
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: Processing cib_query op for /cib (/cib)
Apr 12 14:05:54 node1 attrd: [2471]: info: attrd_perform_update: Sent update 50: fail-count-dlm:0=INFINITY
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 attrd: [2471]: info: attrd_ais_dispatch: Update relayed from node3
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_local_callback: update message from node3: last-failure-dlm:0=1271073815
Apr 12 14:05:54 node1 attrd: [2471]: info: find_hash_entry: Creating hash entry for last-failure-dlm:0
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_local_callback: Supplied: 1271073815, Current: (null), Stored: (null)
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_local_callback: New value of last-failure-dlm:0 is 1271073815
Apr 12 14:05:54 node1 attrd: [2471]: info: attrd_trigger_update: Sending flush op to all hosts for: last-failure-dlm:0 (1271073815)
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: cib_query: //cib/status//node_state[@id='node1']//nvpair[@name='last-failure-dlm:0'] does not exist
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 14d to 14f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 14e to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 14f to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 14e
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 14f
Apr 12 14:05:54 node1 cib: [2469]: debug: cib_process_xpath: Processing cib_query op for /cib (/cib)
Apr 12 14:05:54 node1 attrd: [2471]: info: attrd_perform_update: Sent update 53: last-failure-dlm:0=1271073815
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 14f to 150
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 150 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 150
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 14f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 150 to 151
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 151 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 151
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 150
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 151
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 152
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 151 to 152
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 152 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 153
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 152 to 153
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 153 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 153
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 154
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 153 to 154
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 154 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 155
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 154 to 155
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 155 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 155
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update 50 for fail-count-dlm:0=INFINITY passed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 attrd: [2471]: debug: attrd_cib_callback: Update 53 for last-failure-dlm:0=1271073815 passed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 156
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 155 to 156
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 156 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 157
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 156 to 157
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 157 to pending delivery queue
Apr 12 14:05:54 node1 crmd: [2473]: info: do_lrm_rsc_op: Performing key=4:6:0:d672cd06-6fba-41c7-b684-ee542f69d22a op=dlm:0_stop_0 )
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 157
Apr 12 14:05:54 node1 lrmd: [2470]: debug: on_msg_perform_op: add an operation operation stop[11] on ocf::controld::dlm:0 for client 2473, its parameters: CRM_meta_clone_max=[4] crm_feature_set=[3.0.1] CRM_meta_timeout=[20000] CRM_meta_globally_unique=[false] CRM_meta_clone=[0]  to the operation list.
Apr 12 14:05:54 node1 lrmd: [2470]: info: rsc:dlm:0: stop
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 158
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 157 to 158
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 158 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 158
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 159
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 158 to 159
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 159 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 15a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 159 to 15a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 15a to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 15a
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 15b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 15a to 15b
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 15b to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 15c
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 15b to 15c
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 15c to pending delivery queue
Apr 12 14:05:54 node1 lrmd: [2470]: info: RA output: (dlm:0:stop:stderr) dlm_controld.pcmk: no process found
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 15c
Apr 12 14:05:54 node1 lrmd: [2470]: info: Managed dlm:0:stop process 3486 exited with return code 0.
Apr 12 14:05:54 node1 crmd: [2473]: info: process_lrm_event: LRM operation dlm:0_stop_0 (call=11, rc=0, cib-update=17, confirmed=true) ok
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 15d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 15c to 15d
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 15d to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 15e
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 15d to 15e
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 15e to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 15e
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 15f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 15e to 15f
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 15f to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 160
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 15f to 160
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 160 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] mcasted message added to pending queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 160 to 162
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 161 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 162 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 161
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 162
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 160
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 162
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 163
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 162 to 163
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 163 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 164
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 163 to 164
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 164 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 164
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 165
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 164 to 165
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 165 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Received ringid(192.168.1.106:760) seq 166
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering 165 to 166
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] Delivering MCAST message with seq 166 to pending delivery queue
Apr 12 14:05:54 node1 corosync[2461]:   [TOTEM ] releasing messages up to and including 166
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: update cib finished
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send evt: evt:cib_changed done
Apr 12 14:05:54 node1 haclient: on_event:evt:cib_changed
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: cib_query#012cib
Apr 12 14:05:54 node1 mgmtd: [2474]: info: CIB query: cib
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012<cib validate-with="pacemaker-1.0" crm_feature_set="3.0.1" have-quorum="1" dc-uuid="node3" admin_epoch="0" epoch="247" num_updates="17">#012  <configuration>#012    <crm_config>#012      <cluster_property_set id="cib-bootstrap-options">#012        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.0.8-9881a7350d6182bae9e8e557cf20a3cc5dac3ee7"/>#012        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="openais"/>#012        <nvpair id="cib-bootstrap-options-expected-quorum-votes" name="expected-quorum-votes" value="4"/>#012        <nvpair id="nvpair-2c777244-5613-4220-95bb-b1fcb861f961" name="node-health-red" value="0"/>#012        <nvpair id="nvpair-32d1de32-4963-41c1-b590-ea4045e1640c" name="stonith-enabled" value="false"/>#012        <nvpair name="last-lrm-refresh" id="cib-bootstrap-options-last-lrm-refresh" value="1271073063"/>#012      </cluster_property_set>#012    </crm_config>#012    <nodes>#012      <node id="node1" type="normal" uname="node1">#012        <instance_attributes id="nodes-node1">#012          <nvpair id="standby-node1" name="standby" value="false"/>#012        </instance_attributes>#012      </node>#012      <node id="node2" uname="node2" type="normal"/>#012      <node id="node3" uname="node3" type="normal"/>#012      <node id="node4" uname="node4" type="normal"/>#012    </nodes>#012    <resources>#012      <clone id="dlm-clone">#012        <meta_attributes id="dlm-clone-meta_attributes">#012          <nvpair id="dlm-clone-meta_attributes-interleave" name="interleave" value="true"/>#012          <nvpair name="target-role" id="dlm-clone-meta_attributes-target-role" value="started"/>#012        </meta_attributes>#012        <primitive class="ocf" provider="pacemaker" type="controld" id="dlm">#012          <operations>#012            <op id="dlm-monitor-120s" interval="120s" name="monitor"/>#012          </operations>#012          <meta_attributes id="dlm-meta_attribut
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: all_nodes
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: fail
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: crm_nodes
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012node1#012node2#012node3#012node4
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: active_nodes
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012node1#012node2#012node3#012node4
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: cluster_type
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012openais
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: dc
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012node3
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: node_config#012node1
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012node1#012True#012False#012False#012False#012True#012False#012member
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: node_config#012node2
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012node2#012True#012False#012False#012False#012True#012False#012member
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: node_config#012node3
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012node3#012True#012False#012False#012False#012True#012True#012member
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: node_config#012node4
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012node4#012True#012False#012False#012False#012True#012False#012member
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: all_rsc
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012dlm-clone#012o2cb-clone#012rsa-fencing-1#012rsa-fencing-2#012rsa-fencing-3#012rsa-fencing-4
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm-clone
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: send msg: ok#012clone
Apr 12 14:05:54 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012dlm-clone
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012dlm:0#012dlm:1#012dlm:2#012dlm:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:0
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:0
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:0
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012dlm:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012dlm:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012dlm:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb-clone
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012clone
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: sub_rsc#012o2cb-clone
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012o2cb:0#012o2cb:1#012o2cb:2#012o2cb:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:0
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:0
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:0
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012o2cb:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012o2cb:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012not running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012o2cb:3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012node1
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-2
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012node3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-3
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012node4
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_type#012rsa-fencing-4
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012native
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_status#012rsa-fencing-4
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012running#0121000000
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: recv msg: rsc_running_on#012rsa-fencing-4
Apr 12 14:05:55 node1 mgmtd: [2474]: debug: send msg: ok#012node1
Apr 12 14:06:04 node1 mgmtd: [2474]: debug: recv msg: logout
Apr 12 14:07:21 node1 cib: [2469]: info: cib_stats: Processed 167 operations (179.00us average, 0% utilization) in the last 10min
Apr 12 14:07:21 node1 cib: [2469]: debug: cib_stats: #011Detail: 167 operations (30ms total) (108 local, 102 updates, 0 failures, 0 timeouts, 0 bad connects)
