[ClusterLabs] cloned resource not deployed on all matching nodes

Radoslaw Garbacz radoslaw.garbacz at xtremedatainc.com
Tue Mar 28 14:26:32 EDT 2017


Hi,

I have a situation when a cloned resource is being deployed only on some of
the nodes, even though this resource is similar to others, which are being
deployed according to location rules properly.

Please take a look at the configuration below and let me know if there is
anything to do to make the resource "dbx_nfs_mounts_datas" (which is a
primitive of "dbx_nfs_mounts_datas-clone") being deployed on all 4 nodes
matching its location rules.


Thanks in advance,



* Configuration:
** Nodes:
    <nodes>
      <node uname="ip-10-180-224-134" id="4">
        <instance_attributes id="nodes-4">
          <nvpair id="nodes-4-STATE" value="Active" name="STATE"/>
          <nvpair id="nodes-4-ROLE" value="AD" name="ROLE"/>
          <nvpair id="nodes-4-Primary" value="True" name="Primary"/>
        </instance_attributes>
      </node>
      <node uname="ip-10-183-39-69" id="5">
        <instance_attributes id="nodes-5">
          <nvpair id="nodes-5-STATE" value="Active" name="STATE"/>
          <nvpair id="nodes-5-ROLE" value="AD" name="ROLE"/>
          <nvpair id="nodes-5-Primary" value="True" name="Primary"/>
        </instance_attributes>
      </node>
      <node uname="ip-10-13-191-168" id="3">
        <instance_attributes id="nodes-3">
          <nvpair id="nodes-3-STATE" value="Active" name="STATE"/>
          <nvpair id="nodes-3-ROLE" value="AD" name="ROLE"/>
          <nvpair id="nodes-3-Primary" value="True" name="Primary"/>
        </instance_attributes>
      </node>
      <node uname="ip-10-180-227-53" id="2">
        <instance_attributes id="nodes-2">
          <nvpair id="nodes-2-STATE" value="Active" name="STATE"/>
          <nvpair id="nodes-2-ROLE" value="AD" name="ROLE"/>
          <nvpair id="nodes-2-Primary" value="True" name="Primary"/>
        </instance_attributes>
      </node>
      <node uname="ip-10-182-69-89" id="1">
        <instance_attributes id="nodes-1">
          <nvpair id="nodes-1-STATE" value="Active" name="STATE"/>
          <nvpair id="nodes-1-ROLE" value="AH" name="ROLE"/>
          <nvpair id="nodes-1-Primary" value="True" name="Primary"/>
        </instance_attributes>
      </node>
    </nodes>



** Resource in question:
      <clone id="dbx_nfs_mounts_datas-clone">
        <primitive id="dbx_nfs_mounts_datas" type="dbx_mounts.ocf.sh"
class="ocf" provider="dbxcl">
          <instance_attributes
id="dbx_nfs_mounts_datas-instance_attributes">
             ...
          </instance_attributes>
          <operations>
             ...
          </operations>
        </primitive>
        <meta_attributes id="dbx_nfs_mounts_datas-meta_attributes">
          <nvpair name="target-role" value="Started"
id="dbx_nfs_mounts_datas-meta_attributes-target-role"/>
          <nvpair name="clone-max" value="4"
id="dbx_nfs_mounts_datas-meta_attributes-clone-max"/>
        </meta_attributes>
      </clone>



** Resource location
      <rsc_location id="nodes_dbx_nfs_mounts_datas"
rsc="dbx_nfs_mounts_datas">
        <rule score="INFINITY" id="on_nodes_dbx_nfs_mounts_datas-INFINITY"
boolean-op="and">
          <expression attribute="STATE" operation="eq" type="string"
id="on_nodes_dbx_nfs_mounts_datas-INFINITY-0-expr" value="Active"/>
          <expression attribute="ROLE" operation="eq" type="string"
id="on_nodes_dbx_nfs_mounts_datas-INFINITY-1-expr" value="AD"/>
        </rule>
        <rule score="-INFINITY"
id="on_nodes_dbx_nfs_mounts_datas--INFINITY" boolean-op="or">
          <expression attribute="STATE" operation="ne" type="string"
id="on_nodes_dbx_nfs_mounts_datas--INFINITY-0-expr" value="Active"/>
          <expression attribute="ROLE" operation="ne" type="string"
id="on_nodes_dbx_nfs_mounts_datas--INFINITY-1-expr" value="AD"/>
        </rule>
      </rsc_location>



** Status on properly deployed node:
          <lrm_resource id="dbx_nfs_mounts_datas" type="dbx_mounts.ocf.sh"
class="ocf" provider="dbxcl">
            <lrm_rsc_op id="dbx_nfs_mounts_datas_last_0"
operation_key="dbx_nfs_mounts_datas_start_0" operation="start"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.12"
transition-key="156:0:0:d817e2a2-50fb-4462-bd6b-118d1d7b8ecd"
transition-magic="0:0;156:0:0:d817e2a2-50fb-4462-bd6b-118d1d7b8ecd"
on_node="ip-10-180-227-53" call-id="85" rc-code="0" op-status="0"
interval="0" last-run="1490720995" last-rc-change="1490720995"
exec-time="733" queue-time="0" op-digest="e95785e3e2d043b0bda24c5bd4655317"
op-force-restart="" op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
            <lrm_rsc_op id="dbx_nfs_mounts_datas_monitor_137000"
operation_key="dbx_nfs_mounts_datas_monitor_137000" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.12"
transition-key="157:0:0:d817e2a2-50fb-4462-bd6b-118d1d7b8ecd"
transition-magic="0:0;157:0:0:d817e2a2-50fb-4462-bd6b-118d1d7b8ecd"
on_node="ip-10-180-227-53" call-id="86" rc-code="0" op-status="0"
interval="137000" last-rc-change="1490720995" exec-time="172"
queue-time="0" op-digest="a992d78564e6b3942742da0859d8c734"/>
          </lrm_resource>



** Status on not properly deployed node:
          <lrm_resource id="dbx_nfs_mounts_datas" type="dbx_mounts.ocf.sh"
class="ocf" provider="dbxcl">
            <lrm_rsc_op id="dbx_nfs_mounts_datas_last_0"
operation_key="dbx_nfs_mounts_datas_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.12"
transition-key="73:0:7:d817e2a2-50fb-4462-bd6b-118d1d7b8ecd"
transition-magic="0:7;73:0:7:d817e2a2-50fb-4462-bd6b-118d1d7b8ecd"
on_node="ip-10-183-39-69" call-id="39" rc-code="7" op-status="0"
interval="0" last-run="1490720950" last-rc-change="1490720950"
exec-time="172" queue-time="0" op-digest="e95785e3e2d043b0bda24c5bd4655317"
op-force-restart="" op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
          </lrm_resource>



-- 
Best Regards,

Radoslaw Garbacz
XtremeData Incorporated
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.clusterlabs.org/pipermail/users/attachments/20170328/e30ec25d/attachment-0002.html>


More information about the Users mailing list