[ClusterLabs] Location not working

Miro Igov miro.igov at pharmya.com
Mon Apr 10 07:56:19 EDT 2023


<cib crm_feature_set="3.2.1" validate-with="pacemaker-3.2" epoch="508" num_updates="0" admin_epoch="0" cib-last-written="Mon Apr 10 13:16:40 2023" update-origin="nas-sync-test1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="4">

  <configuration>

    <crm_config>

      <cluster_property_set id="cib-bootstrap-options">

        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="true"/>

        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-4b1f869f0f"/>

        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>

        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="debian"/>

        <nvpair id="cib-bootstrap-options-stonith-timeout" name="stonith-timeout" value="24"/>

        <nvpair name="stonith-enabled" value="true" id="cib-bootstrap-options-stonith-enabled"/>

        <nvpair name="concurrent-fencing" value="true" id="cib-bootstrap-options-concurrent-fencing"/>

        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1681125165"/>

      </cluster_property_set>

    </crm_config>

    <nodes>

      <node id="1" uname="nas-sync-test1">

        <instance_attributes id="nodes-1">

          <nvpair id="nodes-1-standby" name="standby" value="off"/>

        </instance_attributes>

      </node>

      <node id="2" uname="nas-sync-test2">

        <instance_attributes id="nodes-2">

          <nvpair id="nodes-2-standby" name="standby" value="off"/>

        </instance_attributes>

      </node>

      <node id="3" uname="intranet-test1">

        <instance_attributes id="nodes-3">

          <nvpair id="nodes-3-standby" name="standby" value="off"/>

        </instance_attributes>

      </node>

      <node id="4" uname="intranet-test2">

        <instance_attributes id="nodes-4">

          <nvpair id="nodes-4-standby" name="standby" value="off"/>

        </instance_attributes>

      </node>

    </nodes>

    <resources>

      <primitive id="stonith-sbd" class="stonith" type="external/sbd">

        <instance_attributes id="stonith-sbd-instance_attributes">

          <nvpair name="pcmk_delay_max" value="30" id="stonith-sbd-instance_attributes-pcmk_delay_max"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10s" timeout="30s" id="stonith-sbd-monitor-10s"/>

        </operations>

      </primitive>

      <primitive id="admin-ip" class="ocf" provider="heartbeat" type="IPaddr2">

        <instance_attributes id="admin-ip-instance_attributes">

          <nvpair name="ip" value="192.168.2.58" id="admin-ip-instance_attributes-ip"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10" timeout="20" id="admin-ip-monitor-10"/>

        </operations>

      </primitive>

      <primitive id="cron_symlink" class="ocf" provider="heartbeat" type="symlink">

        <instance_attributes id="cron_symlink-instance_attributes">

          <nvpair name="target" value="/home/pharmya/crontab" id="cron_symlink-instance_attributes-target"/>

          <nvpair name="link" value="/etc/cron.d/intranet" id="cron_symlink-instance_attributes-link"/>

          <nvpair name="backup_suffix" value=".disabled" id="cron_symlink-instance_attributes-backup_suffix"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10s" id="cron_symlink-monitor-10s"/>

        </operations>

        <meta_attributes id="cron_symlink-meta_attributes">

          <nvpair id="cron_symlink-meta_attributes-target-role" name="target-role" value="Started"/>

        </meta_attributes>

      </primitive>

      <primitive id="intranet-ip" class="ocf" provider="heartbeat" type="IPaddr2">

        <instance_attributes id="intranet-ip-instance_attributes">

          <nvpair name="ip" value="192.168.2.50" id="intranet-ip-instance_attributes-ip"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10" timeout="20" id="intranet-ip-monitor-10"/>

        </operations>

        <meta_attributes id="intranet-ip-meta_attributes">

          <nvpair id="intranet-ip-meta_attributes-target-role" name="target-role" value="Started"/>

       </meta_attributes>

      </primitive>

      <primitive id="mysql_1" class="systemd" type="mariadb at intranet-test1">

        <operations>

          <op name="monitor" interval="15s" id="mysql_1-monitor-15s"/>

        </operations>

        <meta_attributes id="mysql_1-meta_attributes">

          <nvpair name="target-role" value="Started" id="mysql_1-meta_attributes-target-role"/>

          <nvpair name="is-managed" value="true" id="mysql_1-meta_attributes-is-managed"/>

        </meta_attributes>

      </primitive>

      <primitive id="mysql_2" class="systemd" type="mariadb at intranet-test2">

        <operations>

          <op name="monitor" interval="15s" id="mysql_2-monitor-15s"/>

        </operations>

        <meta_attributes id="mysql_2-meta_attributes">

          <nvpair name="target-role" value="Started" id="mysql_2-meta_attributes-target-role"/>

          <nvpair name="is-managed" value="true" id="mysql_2-meta_attributes-is-managed"/>

        </meta_attributes>

      </primitive>

      <primitive id="nginx_1" class="systemd" type="nginx at intranet-test1">

        <operations>

          <op name="monitor" interval="15s" id="nginx_1-monitor-15s"/>

        </operations>

        <meta_attributes id="nginx_1-meta_attributes">

          <nvpair name="target-role" value="Started" id="nginx_1-meta_attributes-target-role"/>

        </meta_attributes>

      </primitive>

      <primitive id="nginx_1_active" class="ocf" provider="pacemaker" type="attribute">

        <instance_attributes id="nginx_1_active-instance_attributes">

          <nvpair name="active_value" value="1" id="nginx_1_active-instance_attributes-active_value"/>

          <nvpair name="inactive_value" value="0" id="nginx_1_active-instance_attributes-inactive_value"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10s" timeout="20s" id="nginx_1_active-monitor-10s"/>

        </operations>

      </primitive>

      <primitive id="nginx_2" class="systemd" type="nginx at intranet-test2">

        <operations>

          <op name="monitor" interval="15s" id="nginx_2-monitor-15s"/>

        </operations>

        <meta_attributes id="nginx_2-meta_attributes">

          <nvpair name="target-role" value="Started" id="nginx_2-meta_attributes-target-role"/>

          <nvpair name="is-managed" value="true" id="nginx_2-meta_attributes-is-managed"/>

        </meta_attributes>

      </primitive>

      <primitive id="nginx_2_active" class="ocf" provider="pacemaker" type="attribute">

        <instance_attributes id="nginx_2_active-instance_attributes">

          <nvpair name="active_value" value="1" id="nginx_2_active-instance_attributes-active_value"/>

          <nvpair name="inactive_value" value="0" id="nginx_2_active-instance_attributes-inactive_value"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10s" timeout="20s" id="nginx_2_active-monitor-10s"/>

        </operations>

        <meta_attributes id="nginx_2_active-meta_attributes">

          <nvpair id="nginx_2_active-meta_attributes-target-role" name="target-role" value="Started"/>

        </meta_attributes>

      </primitive>

      <primitive id="php_1" class="systemd" type="php5.6-fpm at intranet-test1">

        <operations>

          <op name="monitor" interval="15s" id="php_1-monitor-15s"/>

        </operations>

        <meta_attributes id="php_1-meta_attributes">

          <nvpair name="target-role" value="Started" id="php_1-meta_attributes-target-role"/>

        </meta_attributes>

      </primitive>

      <primitive id="php_2" class="systemd" type="php5.6-fpm at intranet-test2">

        <operations>

          <op name="monitor" interval="15s" id="php_2-monitor-15s"/>

        </operations>

        <meta_attributes id="php_2-meta_attributes">

          <nvpair name="is-managed" value="true" id="php_2-meta_attributes-is-managed"/>

          <nvpair name="target-role" value="Started" id="php_2-meta_attributes-target-role"/>

        </meta_attributes>

      </primitive>

      <primitive id="data_1" class="ocf" provider="heartbeat" type="Filesystem">

        <instance_attributes id="data_1-instance_attributes">

          <nvpair name="device" value="nas-sync-test1:/home/pharmya/NAS" id="data_1-instance_attributes-device"/>

          <nvpair name="fstype" value="nfs" id="data_1-instance_attributes-fstype"/>

          <nvpair name="options" value="v4" id="data_1-instance_attributes-options"/>

          <nvpair name="directory" value="/data/synology/pharmya_office/NAS_Sync/NAS" id="data_1-instance_attributes-directory"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10s" id="data_1-monitor-10s"/>

        </operations>

      </primitive>

      <primitive id="data_2" class="ocf" provider="heartbeat" type="Filesystem">

        <instance_attributes id="data_2-instance_attributes">

          <nvpair name="device" value="nas-sync-test2:/home/pharmya/NAS" id="data_2-instance_attributes-device"/>

          <nvpair name="fstype" value="nfs" id="data_2-instance_attributes-fstype"/>

          <nvpair name="options" value="v4" id="data_2-instance_attributes-options"/>

          <nvpair name="directory" value="/data/synology/pharmya_office/NAS_Sync/NAS" id="data_2-instance_attributes-directory"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="10s" id="data_2-monitor-10s"/>

        </operations>

      </primitive>

      <primitive id="nfs_export_1" class="ocf" provider="heartbeat" type="exportfs">

        <instance_attributes id="nfs_export_1-instance_attributes">

          <nvpair name="directory" value="/home/pharmya/NAS" id="nfs_export_1-instance_attributes-directory"/>

          <nvpair name="options" value="rw" id="nfs_export_1-instance_attributes-options"/>

          <nvpair name="clientspec" value="192.168.2.48/29" id="nfs_export_1-instance_attributes-clientspec"/>

          <nvpair name="fsid" value="1" id="nfs_export_1-instance_attributes-fsid"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="30s" id="nfs_export_1-monitor-30s"/>

        </operations>

        <meta_attributes id="nfs_export_1-meta_attributes">

          <nvpair name="target-role" value="Started" id="nfs_export_1-meta_attributes-target-role"/>

          <nvpair name="is-managed" value="true" id="nfs_export_1-meta_attributes-is-managed"/>

        </meta_attributes>

      </primitive>

      <primitive id="nfs_export_2" class="ocf" provider="heartbeat" type="exportfs">

        <instance_attributes id="nfs_export_2-instance_attributes">

          <nvpair name="directory" value="/home/pharmya/NAS" id="nfs_export_2-instance_attributes-directory"/>

          <nvpair name="options" value="rw" id="nfs_export_2-instance_attributes-options"/>

          <nvpair name="clientspec" value="192.168.2.48/29" id="nfs_export_2-instance_attributes-clientspec"/>

          <nvpair name="fsid" value="1" id="nfs_export_2-instance_attributes-fsid"/>

        </instance_attributes>

        <operations>

          <op name="monitor" interval="30s" id="nfs_export_2-monitor-30s"/>

        </operations>

        <meta_attributes id="nfs_export_2-meta_attributes">

          <nvpair name="target-role" value="Started" id="nfs_export_2-meta_attributes-target-role"/>

        </meta_attributes>

      </primitive>

      <primitive id="nfs_server_1" class="systemd" type="nfs-server at nas-sync-test1">

        <operations>

          <op name="monitor" interval="30s" id="nfs_server_1-monitor-30s"/>

        </operations>

        <meta_attributes id="nfs_server_1-meta_attributes">

          <nvpair name="target-role" value="Started" id="nfs_server_1-meta_attributes-target-role"/>

        </meta_attributes>

      </primitive>

      <primitive id="nfs_server_2" class="systemd" type="nfs-server at nas-sync-test2">

        <operations>

          <op name="monitor" interval="30s" id="nfs_server_2-monitor-30s"/>

        </operations>

      </primitive>

    </resources>

    <constraints>

      <rsc_colocation id="attribute_1" score="INFINITY" rsc="nginx_1_active" with-rsc="nginx_1"/>

      <rsc_colocation id="attribute_2" score="INFINITY" rsc="nginx_2_active" with-rsc="nginx_2"/>

      <rsc_colocation id="c_cron_symlink_on_intranet-ip" score="INFINITY" rsc="cron_symlink" with-rsc="intranet-ip"/>

      <rsc_location id="intranet-ip_loc" rsc="intranet-ip">

        <rule score="-INFINITY" id="intranet-ip_loc-rule">

          <expression operation="ne" attribute="#uname" value="intranet-test1" id="intranet-ip_loc-rule-expression"/>

          <expression operation="ne" attribute="#uname" value="intranet-test2" id="intranet-ip_loc-rule-expression-0"/>

        </rule>

      </rsc_location>

      <rsc_order id="intranet_1_order" kind="Mandatory">

        <resource_set sequential="false" id="intranet_1_order-0">

          <resource_ref id="data_1"/>

          <resource_ref id="mysql_1"/>

          <resource_ref id="php_1"/>

        </resource_set>

        <resource_set id="intranet_1_order-1">

          <resource_ref id="nginx_1"/>

        </resource_set>

      </rsc_order>

      <rsc_colocation id="intranet_1_resources" score="INFINITY">

        <resource_set id="intranet_1_resources-0">

          <resource_ref id="nginx_1"/>

        </resource_set>

        <resource_set sequential="false" id="intranet_1_resources-1">

          <resource_ref id="data_1"/>

          <resource_ref id="mysql_1"/>

          <resource_ref id="php_1"/>

        </resource_set>

      </rsc_colocation>

      <rsc_order id="mount_1" kind="Mandatory">

        <resource_set id="mount_1-0">

          <resource_ref id="nfs_server_1"/>

          <resource_ref id="nfs_export_1"/>

          <resource_ref id="data_1"/>

        </resource_set>

      </rsc_order>

      <rsc_order id="nginx_1_active_ord" kind="Mandatory" first="nginx_1" then="nginx_1_active">

        <!--#location mysql_1_loc mysql_1 #     rule -inf: #uname ne intranet-test1-->

        <!--#location mysql_2_loc mysql_2 #     rule -inf: #uname ne intranet-test2-->

      </rsc_order>

      <rsc_order id="nginx_2_active_ord" kind="Mandatory" first="nginx_2" then="nginx_2_active"/>

      <rsc_location id="resources_1" rsc="intranet-test1_resources">

        <rule score="-INFINITY" id="resources_1-rule">

          <expression operation="ne" attribute="#uname" value="intranet-test1" id="resources_1-rule-expression"/>

        </rule>

      </rsc_location>

      <rsc_location id="resources_2" rsc="intranet-test2_resources">

        <rule score="-INFINITY" id="resources_2-rule">

          <expression operation="ne" attribute="#uname" value="intranet-test2" id="resources_2-rule-expression"/>

        </rule>

      </rsc_location>

      <rsc_order id="intranet_2_order" kind="Mandatory">

        <resource_set sequential="false" id="intranet_2_order-0">

          <resource_ref id="data_2"/>

          <resource_ref id="mysql_2"/>

          <resource_ref id="php_2"/>

        </resource_set>

        <resource_set id="intranet_2_order-1">

          <resource_ref id="nginx_2"/>

        </resource_set>

      </rsc_order>

      <rsc_colocation id="intranet_2_resources" score="INFINITY">

        <resource_set id="intranet_2_resources-0">

          <resource_ref id="nginx_2"/>

        </resource_set>

        <resource_set sequential="false" id="intranet_2_resources-1">

          <resource_ref id="data_2"/>

          <resource_ref id="mysql_2"/>

          <resource_ref id="php_2"/>

        </resource_set>

      </rsc_colocation>

      <rsc_order id="intranet-ip_order" kind="Mandatory">

        <resource_set require-all="false" sequential="false" id="intranet-ip_order-0">

          <resource_ref id="nginx_1"/>

          <resource_ref id="nginx_2"/>

        </resource_set>

        <resource_set id="intranet-ip_order-1">

          <resource_ref id="intranet-ip"/>

        </resource_set>

      </rsc_order>

      <rsc_colocation id="co_nfs_1" score="INFINITY" rsc="nfs_export_1" with-rsc="nfs_server_1"/>

      <rsc_colocation id="co_nfs_2" score="INFINITY" rsc="nfs_export_2" with-rsc="nfs_server_2"/>

      <rsc_location id="l_data_1" rsc="data_1">

        <rule score="-INFINITY" id="l_data_1-rule">

          <expression operation="ne" attribute="#uname" value="intranet-test1" id="l_data_1-rule-expression"/>

        </rule>

      </rsc_location>

      <rsc_location id="l_data_2" rsc="data_2">

        <rule score="-INFINITY" id="l_data_2-rule">

          <expression operation="ne" attribute="#uname" value="intranet-test2" id="l_data_2-rule-expression"/>

        </rule>

      </rsc_location>

      <rsc_order id="mount_2" kind="Mandatory">

        <resource_set id="mount_2-0">

          <resource_ref id="nfs_server_2"/>

          <resource_ref id="nfs_export_2"/>

          <resource_ref id="data_2"/>

        </resource_set>

      </rsc_order>

      <rsc_location id="deny_sync" rsc="sync">

        <rule score="-INFINITY" id="deny_sync-rule">

          <expression operation="ne" attribute="#uname" value="nas-sync-test1" id="deny_sync-rule-expression"/>

          <expression operation="ne" attribute="#uname" value="nas-sync-test2" id="deny_sync-rule-expression-0"/>

        </rule>

      </rsc_location>

      <rsc_location id="l_nfs_server_1" rsc="nfs_server_1">

        <rule score="-INFINITY" id="l_nfs_server_1-rule">

          <expression operation="ne" attribute="#uname" value="nas-sync-test1" id="l_nfs_server_1-rule-expression"/>

        </rule>

      </rsc_location>

      <rsc_location id="l_nfs_server_2" rsc="nfs_server_2">

        <rule score="-INFINITY" id="l_nfs_server_2-rule">

          <expression operation="ne" attribute="#uname" value="nas-sync-test2" id="l_nfs_server_2-rule-expression"/>

        </rule>

      </rsc_location>

      <rsc_location id="mount_on_intranet" rsc="all_mounts">

        <rule score="-INFINITY" boolean-op="or" id="mount_on_intranet-rule">

          <expression operation="eq" attribute="#uname" value="nas-sync-test1" id="mount_on_intranet-rule-expression"/>

          <expression operation="eq" attribute="#uname" value="nas-sync-test2" id="mount_on_intranet-rule-expression-0"/>

        </rule>

      </rsc_location>

      <rsc_location id="intranet-ip_on_any_nginx" rsc="intranet-ip">

        <rule score="-INFINITY" id="intranet-ip_on_any_nginx-rule">

          <expression operation="eq" attribute="opa-nginx_1_active" value="0" id="intranet-ip_on_any_nginx-rule-expression"/>

        </rule>

        <rule score="-INFINITY" id="intranet-ip_on_any_nginx-rule-0">

          <expression operation="eq" attribute="opa-nginx_2_active" value="0" id="intranet-ip_on_any_nginx-rule-0-expression"/>

        </rule>

      </rsc_location>

    </constraints>

    <rsc_defaults>

      <meta_attributes id="rsc-options">

        <nvpair name="resource-stickiness" value="10" id="rsc-options-resource-stickiness"/>

        <nvpair name="migration-threshold" value="5" id="rsc-options-migration-threshold"/>

      </meta_attributes>

    </rsc_defaults>

    <tags>

      <tag id="intranet-test1_resources">

        <obj_ref id="nginx_1"/>

        <obj_ref id="mysql_1"/>

        <obj_ref id="php_1"/>

      </tag>

      <tag id="intranet-test2_resources">

        <obj_ref id="nginx_2"/>

        <obj_ref id="mysql_2"/>

       <obj_ref id="php_2"/>

      </tag>

      <tag id="all_mounts">

        <obj_ref id="data_1"/>

        <obj_ref id="data_2"/>

      </tag>

      <tag id="sync">

        <obj_ref id="nfs_server_1"/>

        <obj_ref id="nfs_export_1"/>

        <obj_ref id="nfs_server_2"/>

        <obj_ref id="nfs_export_2"/>

      </tag>

    </tags>

  </configuration>

</cib>

 

From: Users <users-bounces at clusterlabs.org> On Behalf Of Andrei Borzenkov
Sent: 10 April 2023 14:49
To: Cluster Labs - All topics related to open-source clustering welcomed <users at clusterlabs.org>
Subject: Re: [ClusterLabs] Location not working

 

On Mon, Apr 10, 2023 at 2:19 PM Miro Igov <miro.igov at pharmya.com <mailto:miro.igov at pharmya.com> > wrote:

Hello,

I have a resource with location constraint set to:

 

location intranet-ip_on_any_nginx intranet-ip \

        rule -inf: opa-nginx_1_active eq 0 \

        rule -inf: opa-nginx_2_active eq 0

 

In syslog I see the attribute transition:

Apr 10 12:11:02 intranet-test2 pacemaker-attrd[1511]:  notice: Setting opa-nginx_1_active[intranet-test1]: 1 -> 0

 

Current cluster status is :

 

Node List:

  * Online: [ intranet-test1 intranet-test2 nas-sync-test1 nas-sync-test2 ]

 

* stonith-sbd (stonith:external/sbd):  Started intranet-test2

  * admin-ip    (ocf::heartbeat:IPaddr2):        Started nas-sync-test2

  * cron_symlink        (ocf::heartbeat:symlink):        Started intranet-test1

  * intranet-ip (ocf::heartbeat:IPaddr2):        Started intranet-test1

  * mysql_1     (systemd:mariadb at intranet-test1):        Started intranet-test1

  * mysql_2     (systemd:mariadb at intranet-test2):        Started intranet-test2

  * nginx_1     (systemd:nginx at intranet-test1):  Stopped

  * nginx_1_active      (ocf::pacemaker:attribute):      Stopped

  * nginx_2     (systemd:nginx at intranet-test2):  Started intranet-test2

  * nginx_2_active      (ocf::pacemaker:attribute):      Started intranet-test2

  * php_1       (systemd:php5.6-fpm at intranet-test1):     Started intranet-test1

  * php_2       (systemd:php5.6-fpm at intranet-test2):     Started intranet-test2

  * data_1      (ocf::heartbeat:Filesystem):     Stopped

  * data_2      (ocf::heartbeat:Filesystem):     Started intranet-test2

  * nfs_export_1        (ocf::heartbeat:exportfs):       Stopped

  * nfs_export_2        (ocf::heartbeat:exportfs):       Started nas-sync-test2

  * nfs_server_1        (systemd:nfs-server at nas-sync-test1):     Stopped

  * nfs_server_2        (systemd:nfs-server at nas-sync-test2):     Started nas-sync-test2

 

Failed Resource Actions:

  * nfs_server_1_start_0 on nas-sync-test1 'error' (1): call=95, status='complete', exitreason='', last-rc-change='2023-04-10 12:35:12 +02:00', queued=0ms, exec=209ms

 

 

Why intranet-ip is located on intranet-test1 while nginx_1_active is 0 ?

 

# crm res constraint intranet-ip

    cron_symlink                                                                 (score=INFINITY, id=c_cron_symlink_on_intranet-ip)

* intranet-ip

  : Node nas-sync-test2                                                          (score=-INFINITY, id=intranet-ip_loc-rule)

  : Node nas-sync-test1                                                          (score=-INFINITY, id=intranet-ip_loc-rule)

 

Why no constraint entry for intranet-ip_on_any_nginx location ?

 

 

It is impossible to answer based on those fragments of information you provided. Full output of "crm config show" (or even better full CIB) may give some hint.


-- 

This message has been sent as a part of discussion between PHARMYA and the

addressee whose name is specified above. Should you receive this message by

mistake, we would be most grateful if you informed us that the message has

been sent to you. In this case, we also ask that you delete this message

from your mailbox, and do not forward it or any part of it to anyone else.

Thank you for your cooperation and understanding.



-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/users/attachments/20230410/b51f2462/attachment-0001.htm>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: PharmyaGDPRLogo1.png
Type: image/png
Size: 848 bytes
Desc: not available
URL: <https://lists.clusterlabs.org/pipermail/users/attachments/20230410/b51f2462/attachment-0001.png>


More information about the Users mailing list