[ClusterLabs] resource-stickiness

Rakovec Jost Jost.Rakovec at snt.si
Thu Aug 27 07:42:11 UTC 2015


Hi


it doesn't work as I expected, I change name to:

location loc-aapche-sles1 aapche role=Started 10: sles1


but after I manual move resource via HAWK to other node it auto add this line:

location cli-prefer-aapche aapche role=Started inf: sles1


so now I have both lines:

location cli-prefer-aapche aapche role=Started inf: sles1
location loc-aapche-sles1 aapche role=Started 10: sles1


and resource-stickiness doesn't work since after fence node1 the resource is move back to node1 after node1 come back and this is what I don't like. I know that I can remove line  that was added by cluster, but this is not the proper solution. Please tell me what is wrong. Thanks.  My config: 

node sles1
node sles2
primitive filesystem Filesystem \
        params fstype=ext3 directory="/srv/www/vhosts" device="/dev/xvdd1" \
        op start interval=0 timeout=60 \
        op stop interval=0 timeout=60 \
        op monitor interval=20 timeout=40
primitive myip IPaddr2 \
        params ip=10.9.131.86 \
        op start interval=0 timeout=20s \
        op stop interval=0 timeout=20s \
        op monitor interval=10s timeout=20s
primitive stonith_sbd stonith:external/sbd \
        params pcmk_delay_max=30
primitive web apache \
        params configfile="/etc/apache2/httpd.conf" \
        op start interval=0 timeout=40s \
        op stop interval=0 timeout=60s \
        op monitor interval=10 timeout=20s
group aapche filesystem myip web \
        meta target-role=Started is-managed=true resource-stickiness=1000
location cli-prefer-aapche aapche role=Started inf: sles1
location loc-aapche-sles1 aapche role=Started 10: sles1
property cib-bootstrap-options: \
        stonith-enabled=true \
        no-quorum-policy=ignore \
        placement-strategy=balanced \
        expected-quorum-votes=2 \
        dc-version=1.1.12-f47ea56 \
        cluster-infrastructure="classic openais (with plugin)" \
        last-lrm-refresh=1440502955 \
        stonith-timeout=40s
rsc_defaults rsc-options: \
        resource-stickiness=1000 \
        migration-threshold=3
op_defaults op-options: \
        timeout=600 \
        record-pending=true


BR

Jost



________________________________________
From: Andrew Beekhof <andrew at beekhof.net>
Sent: Thursday, August 27, 2015 12:20 AM
To: Cluster Labs - All topics related to open-source clustering welcomed
Subject: Re: [ClusterLabs] resource-stickiness

> On 26 Aug 2015, at 10:09 pm, Rakovec Jost <Jost.Rakovec at snt.si> wrote:
>
> Sorry  one typo: problem is the same....
>
>
> location cli-prefer-aapche aapche role=Started 10: sles2

Change the name of your constraint.
The 'cli-prefer-’ prefix is reserved for “temporary” constraints created by the command line tools (which therefor feel entitled to delete them as necessary).

>
> to:
>
> location cli-prefer-aapche aapche role=Started inf: sles2
>
>
> It keep change to infinity.
>
>
>
> my configuration is:
>
> node sles1
> node sles2
> primitive filesystem Filesystem \
>        params fstype=ext3 directory="/srv/www/vhosts" device="/dev/xvdd1" \
>        op start interval=0 timeout=60 \
>        op stop interval=0 timeout=60 \
>        op monitor interval=20 timeout=40
> primitive myip IPaddr2 \
>        params ip=x.x.x.x \
>        op start interval=0 timeout=20s \
>        op stop interval=0 timeout=20s \
>        op monitor interval=10s timeout=20s
> primitive stonith_sbd stonith:external/sbd \
>        params pcmk_delay_max=30
> primitive web apache \
>        params configfile="/etc/apache2/httpd.conf" \
>        op start interval=0 timeout=40s \
>        op stop interval=0 timeout=60s \
>        op monitor interval=10 timeout=20s
> group aapche filesystem myip web \
>        meta target-role=Started is-managed=true resource-stickiness=1000
> location cli-prefer-aapche aapche role=Started 10: sles2
> property cib-bootstrap-options: \
>        stonith-enabled=true \
>        no-quorum-policy=ignore \
>        placement-strategy=balanced \
>        expected-quorum-votes=2 \
>        dc-version=1.1.12-f47ea56 \
>        cluster-infrastructure="classic openais (with plugin)" \
>        last-lrm-refresh=1440502955 \
>        stonith-timeout=40s
> rsc_defaults rsc-options: \
>        resource-stickiness=1000 \
>        migration-threshold=3
> op_defaults op-options: \
>        timeout=600 \
>        record-pending=true
>
>
>
> and after migration:
>
>
> node sles1
> node sles2
> primitive filesystem Filesystem \
>        params fstype=ext3 directory="/srv/www/vhosts" device="/dev/xvdd1" \
>        op start interval=0 timeout=60 \
>        op stop interval=0 timeout=60 \
>        op monitor interval=20 timeout=40
> primitive myip IPaddr2 \
>        params ip=10.9.131.86 \
>        op start interval=0 timeout=20s \
>        op stop interval=0 timeout=20s \
>        op monitor interval=10s timeout=20s
> primitive stonith_sbd stonith:external/sbd \
>        params pcmk_delay_max=30
> primitive web apache \
>        params configfile="/etc/apache2/httpd.conf" \
>        op start interval=0 timeout=40s \
>        op stop interval=0 timeout=60s \
>        op monitor interval=10 timeout=20s
> group aapche filesystem myip web \
>        meta target-role=Started is-managed=true resource-stickiness=1000
> location cli-prefer-aapche aapche role=Started inf: sles2
> property cib-bootstrap-options: \
>        stonith-enabled=true \
>        no-quorum-policy=ignore \
>        placement-strategy=balanced \
>        expected-quorum-votes=2 \
>        dc-version=1.1.12-f47ea56 \
>        cluster-infrastructure="classic openais (with plugin)" \
>        last-lrm-refresh=1440502955 \
>        stonith-timeout=40s
> rsc_defaults rsc-options: \
>        resource-stickiness=1000 \
>        migration-threshold=3
> op_defaults op-options: \
>        timeout=600 \
>        record-pending=true
>
>
> From: Rakovec Jost
> Sent: Wednesday, August 26, 2015 1:33 PM
> To: users at clusterlabs.org
> Subject: resource-stickiness
>
> Hi list,
>
>
> I have configure simple cluster on sles 11 sp4 and have a problem with “auto_failover off". The problem is that when ever I migrate resource group via HAWK my configuration change from:
>
> location cli-prefer-aapche aapche role=Started 10: sles2
>
> to:
>
> location cli-ban-aapche-on-sles1 aapche role=Started -inf: sles1
>
>
> It keep change to inf.
>
>
> and then after fance node, resource is moving back to original node which I don't want. How can I avoid this situation?
>
> my configuration is:
>
> node sles1
> node sles2
> primitive filesystem Filesystem \
>        params fstype=ext3 directory="/srv/www/vhosts" device="/dev/xvdd1" \
>        op start interval=0 timeout=60 \
>        op stop interval=0 timeout=60 \
>        op monitor interval=20 timeout=40
> primitive myip IPaddr2 \
>        params ip=x.x.x.x \
>        op start interval=0 timeout=20s \
>        op stop interval=0 timeout=20s \
>        op monitor interval=10s timeout=20s
> primitive stonith_sbd stonith:external/sbd \
>        params pcmk_delay_max=30
> primitive web apache \
>        params configfile="/etc/apache2/httpd.conf" \
>        op start interval=0 timeout=40s \
>        op stop interval=0 timeout=60s \
>        op monitor interval=10 timeout=20s
> group aapche filesystem myip web \
>        meta target-role=Started is-managed=true resource-stickiness=1000
> location cli-prefer-aapche aapche role=Started 10: sles2
> property cib-bootstrap-options: \
>        stonith-enabled=true \
>        no-quorum-policy=ignore \
>        placement-strategy=balanced \
>        expected-quorum-votes=2 \
>        dc-version=1.1.12-f47ea56 \
>        cluster-infrastructure="classic openais (with plugin)" \
>        last-lrm-refresh=1440502955 \
>        stonith-timeout=40s
> rsc_defaults rsc-options: \
>        resource-stickiness=1000 \
>        migration-threshold=3
> op_defaults op-options: \
>        timeout=600 \
>        record-pending=true
>
>
>
> and after migration:
>
> node sles1
> node sles2
> primitive filesystem Filesystem \
>        params fstype=ext3 directory="/srv/www/vhosts" device="/dev/xvdd1" \
>        op start interval=0 timeout=60 \
>        op stop interval=0 timeout=60 \
>        op monitor interval=20 timeout=40
> primitive myip IPaddr2 \
>        params ip=10.9.131.86 \
>        op start interval=0 timeout=20s \
>        op stop interval=0 timeout=20s \
>        op monitor interval=10s timeout=20s
> primitive stonith_sbd stonith:external/sbd \
>        params pcmk_delay_max=30
> primitive web apache \
>        params configfile="/etc/apache2/httpd.conf" \
>        op start interval=0 timeout=40s \
>        op stop interval=0 timeout=60s \
>        op monitor interval=10 timeout=20s
> group aapche filesystem myip web \
>        meta target-role=Started is-managed=true resource-stickiness=1000
> location cli-ban-aapche-on-sles1 aapche role=Started -inf: sles1
> location cli-prefer-aapche aapche role=Started 10: sles2
> property cib-bootstrap-options: \
>        stonith-enabled=true \
>        no-quorum-policy=ignore \
>        placement-strategy=balanced \
>        expected-quorum-votes=2 \
>        dc-version=1.1.12-f47ea56 \
>        cluster-infrastructure="classic openais (with plugin)" \
>        last-lrm-refresh=1440502955 \
>        stonith-timeout=40s
> rsc_defaults rsc-options: \
>        resource-stickiness=1000 \
>        migration-threshold=3
> op_defaults op-options: \
>        timeout=600 \
>        record-pending=true
>
>
>
>
> thanks
>
> Best Regards
>
> Jost
>
>
>
>
>
>
>
> _______________________________________________
> Users mailing list: Users at clusterlabs.org
> http://clusterlabs.org/mailman/listinfo/users
>
> Project Home: http://www.clusterlabs.org
> Getting started: http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf
> Bugs: http://bugs.clusterlabs.org


_______________________________________________
Users mailing list: Users at clusterlabs.org
http://clusterlabs.org/mailman/listinfo/users

Project Home: http://www.clusterlabs.org
Getting started: http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf
Bugs: http://bugs.clusterlabs.org




More information about the Users mailing list