[ClusterLabs] Pacemaker
Siwakoti, Ganesh
ganesh at agile.ne.jp
Tue Nov 1 07:31:39 UTC 2016
Hi,
i'm using CentOS release 6.8 (Final) as a KVM and i configured 3
nodes(PM1.local,PM2.local and PM3.local), and using
CMAN clustering. Resources running at two nodes as Active node then another
one node is for Fail-over resource as a Standby node. Configured resources
are groupA and groupB, 5 resources are configured in each resource group.
groupA is Basically run at PM1.local and groupB at PM2.local. If fail any
resource group it'll fail-over at PM3.local. On PM3.local can run only one
Resource group at a time. groupA should not run at PM2.local and groupB
should not run at PM1.local. groupA or groupB resources not move back on
own nodes from PM3.local automatically, if need to move back on own nodes,
can move by manually.someone pease help me.
version information:
CentOS release 6.8 (Final) (KVM)
Pacemaker 1.1.14-8.el6_8.1
Cman Version: 6.2.0
I want to be sure that my configuration will work properly or not.
my Cluster configuration and crm_configuration are on below:
cluster.conf:
<?xml version="1.0"?>
<cluster config_version="23" name="clusterpm666">
<fence_daemon/>
<clusternodes>
<clusternode name="pm1.local" nodeid="1">
<fence>
<method name="pcmk-redirect">
<device name="pcmk"
port="pm1.local"/>
</method>
</fence>
<altname name="192.168.14.210"/>
</clusternode>
<clusternode name="pm2.local" nodeid="2">
<fence>
<method name="pcmk-redirect">
<device name="pcmk"
port="pm2.local"/>
</method>
</fence>
<altname name="192.168.14.211"/>
</clusternode>
<clusternode name="pm3.local" nodeid="3">
<fence>
<method name="pcmk-redirect">
<device name="pcmk"
port="pm3.local"/>
</method>
</fence>
<altname name="192.168.14.212"/>
</clusternode>
</clusternodes>
<fencedevices>
<fencedevice agent="fence_pcmk" name="pcmk"/>
</fencedevices>
<rm>
<failoverdomains/>
<resources/>
</rm>
</cluster>
crm_configuration:
node pm1.local \
attributes \
utilization capacity=1
node pm2.local \
attributes \
utilization capacity=1
node pm3.local \
attributes \
utilization capacity=1
primitive asteriskA asterisk \
params binary="/usr/sbin/asterisk" canary_binary=astcanary
additional_parameters="-vvvg -I" config="/etc/asteri
sk_pm1/asterisk.conf" user=root group=root additional_parameters="-vvvg -I"
realtime=true maxfiles=32768 \
meta migration-threshold=2 \
op monitor interval=20s start-delay=5s timeout=30s \
op stop interval=0s on-fail=ignore
primitive asteriskB asterisk \
params binary="/usr/sbin/asterisk" canary_binary=astcanary
additional_parameters="-vvvg -I" config="/etc/asteri
sk_pm2/asterisk.conf" user=root group=root additional_parameters="-vvvg -I"
realtime=true maxfiles=32768 \
meta migration-threshold=2 \
op monitor interval=20s start-delay=5s timeout=30s \
op stop interval=0s on-fail=ignore
primitive changeSrcIpA ocf:pacemaker:changeSrcIp \
params vip=192.168.12.215 mask=23 device=eth0 \
op start interval=0s timeout=0 \
op monitor interval=10s \
op stop interval=0s on-fail=ignore
primitive changeSrcIpB ocf:pacemaker:changeSrcIp \
params vip=192.168.12.216 mask=23 device=eth0 \
op start interval=0s timeout=0 \
op monitor interval=10s \
op stop interval=0s on-fail=ignore
primitive cronA lsb:crond \
meta migration-threshold=2 \
op monitor interval=20s start-delay=5s timeout=15s \
op stop interval=0s on-fail=ignore
primitive cronB lsb:crond \
meta migration-threshold=2 \
op monitor interval=20s start-delay=5s timeout=15s \
op stop interval=0s on-fail=ignore
primitive vip-local-checkA VIPcheck \
params target_ip=192.168.12.215 count=1 wait=5 \
op start interval=0s on-fail=restart timeout=60s \
op monitor interval=10s timeout=60s \
op stop interval=0s on-fail=ignore timeout=60s \
utilization capacity=1
primitive vip-local-checkB VIPcheck \
params target_ip=192.168.12.216 count=1 wait=5 \
op start interval=0s on-fail=restart timeout=60s \
op monitor interval=10s timeout=60s \
op stop interval=0s on-fail=ignore timeout=60s \
utilization capacity=1
primitive vip-localA IPaddr2 \
params ip=192.168.12.215 cidr_netmask=23 nic=eth0 iflabel=0
broadcast=192.168.13.255 \
op start interval=0s timeout=20s \
op monitor interval=5s timeout=20s \
op stop interval=0s on-fail=ignore
primitive vip-localB IPaddr2 \
params ip=192.168.12.216 cidr_netmask=23 nic=eth0 iflabel=0
broadcast=192.168.13.255 \
op start interval=0s timeout=20s \
op monitor interval=5s timeout=20s \
op stop interval=0s on-fail=ignore
group groupA vip-local-checkA vip-localA changeSrcIpA cronA asteriskA \
meta target-role=Started
group groupB vip-local-checkB vip-localB changeSrcIpB cronB asteriskB \
meta
location location-groupA-avoid-failed-node groupA \
rule -inf: defined fail-count-vip-local-checkA \
rule -inf: defined fail-count-vip-localA \
rule -inf: defined fail-count-changeSrcIpA \
rule -inf: defined fail-count-cronA \
rule -inf: defined fail-count-asteriskA \
rule -inf: defined fail-count-vip-local-checkB \
rule -inf: defined fail-count-vip-localB \
rule -inf: defined fail-count-changeSrcIpB \
rule -inf: defined fail-count-cronB \
rule -inf: defined fail-count-asteriskB
location location-groupA-pm1.local-10000 groupA 10000: pm1.local
location location-groupA-pm2.local--inf groupA resource-discovery=never
-inf: pm2.local
location location-groupA-pm3.local-1000 groupA 1000: pm3.local
location location-groupB-avoid-failed-node groupB \
rule -inf: defined fail-count-vip-local-checkA \
rule -inf: defined fail-count-vip-localA \
rule -inf: defined fail-count-changeSrcIpA \
rule -inf: defined fail-count-cronA \
rule -inf: defined fail-count-asteriskA \
rule -inf: defined fail-count-vip-local-checkB \
rule -inf: defined fail-count-vip-localB \
rule -inf: defined fail-count-changeSrcIpB \
rule -inf: defined fail-count-cronB \
rule -inf: defined fail-count-asteriskB
location location-groupB-pm1.local--inf groupB resource-discovery=never
-inf: pm1.local
location location-groupB-pm2.local-10000 groupB 10000: pm2.local
location location-groupB-pm3.local-1000 groupB 1000: pm3.local
property cib-bootstrap-options: \
have-watchdog=false \
dc-version=1.1.14-8.el6_8.1-70404b0 \
cluster-infrastructure=cman \
stonith-enabled=false \
no-quorum-policy=ignore \
enable-startup-probes=true \
maintenance-mode=false \
startup-fencing=false \
dc-deadtime=20s \
last-lrm-refresh=1475445380 \
placement-strategy=balanced
rsc_defaults rsc_defaults-options: \
migration-threshold=1 \
resource-stickiness=INFINITY
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/users/attachments/20161101/25436ab1/attachment-0003.html>
More information about the Users
mailing list