[Pacemaker] Help with N+1 configuration

Cal Heldenbrand cal at fbsdata.com
Thu Jul 26 16:34:11 UTC 2012


Hi everybody,

I've read through the Clusters from Scratch document, but it doesn't seem
to help me very well with an N+1 (shared hot spare) style cluster setup.

My test case, is I have 3 memcache servers.  Two are in primary use (hashed
50/50 by the clients) and one is a hot failover.

I hacked up my own OCF script to do the monitoring.  It's based on this old
script<https://github.com/fbmarc/facebook-memcached-old/blob/master/scripts/memcachedctl>that
Facebook made, but tailored to work in Centos 6.  It just uses netcat
to monitor the memcached daemon to verify it's running.  (Better than the
LSB script)  I can send my script if anyone would like to see it.

I don't quite understand the difference between colocation, location, and
group.  I'd like to change my memcached-mem1 primitive so that when the
memcache service times out or stops, the memcached-mem3 primitive takes
over on the cluster-ip-mem1 address.  Effectively creating a pair
relationship, that the cluster-ip-mem1 primitive always requires a
primitive of either memcached-mem1 or memcached-mem3.

Currently, I can set the mem1 node in standby mode, and the mem3 server
takes over correctly, assuming the cluster IP and keeping his
memcached-mem3 service running.  However, if I only stop the memcached-mem1
primitive, nothing happens.  The mem3 node does not take over.  mem1 keeps
the cluster IP and the service is just stopped.

Please reference my config below, and thanks for any help!

--Cal

------------------------------------------------------------------------------------------------------------------
node mem1
node mem2
node mem3
# Define the two cluster IPs, which the clients will connect to directly
primitive cluster-ip-mem1 ocf:heartbeat:IPaddr2 \
        params ip="192.168.1.240" cidr_netmask="24" \
        op monitor interval="1s" timeout="20s" \
        op start interval="0" timeout="20s" \
        op stop interval="0" timeout="20s" \
        meta is-managed="true" target-role="Started"
primitive cluster-ip-mem2 ocf:heartbeat:IPaddr2 \
        params ip="192.168.1.241" cidr_netmask="24" \
        op monitor interval="1s" timeout="20s" \
        op start interval="0" timeout="20s" \
        op stop interval="0" timeout="20s" \
        meta is-managed="true" target-role="Started"
# Define the memcached service, tied to each node
primitive memcached-mem1 ocf:fbs:memcached \
        meta is-managed="true" target-role="Started" \
        op monitor interval="2s" timeout="5s"
primitive memcached-mem2 ocf:fbs:memcached \
        meta is-managed="true" target-role="Started" \
        op monitor interval="2s" timeout="5s"
primitive memcached-mem3 ocf:fbs:memcached \
        meta is-managed="true" target-role="Started" \
        op monitor interval="2s" timeout="5s"

###  "sticky" rules to keep memcache services running on each individual
machines
# memcached-mem1 prefers to live on node mem1
location cli-prefer-memcached-mem1 memcached-mem1 \
        rule $id="cli-prefer-rule-memcached-mem1" inf: #uname eq mem1
# memcached-mem2 prefers to live on node mem2
location cli-prefer-memcached-mem2 memcached-mem2 \
        rule $id="cli-prefer-rule-memcached-mem2" inf: #uname eq mem2
# memcahced-mem3 prefers to live on node mem3
location cli-prefer-memcached-mem3 memcached-mem3 \
        rule $id="cli-prefer-rule-memcached-mem3" inf: #uname eq mem3
# service memcached-mem1 isn't allowed to run on mem3
location cli-standby-memcached-mem1 memcached-mem1 \
        rule $id="cli-standby-rule-memcached-mem1" -inf: #uname eq mem3
# service memcached-mem1 isn't allowed to run on mem2
location cli-standby-memcached-mem1-mem2 memcached-mem1 \
        rule $id="cli-standby-rule-memcached-mem1-mem2" -inf: #uname eq mem2
# service memcached-mem3 isn't allowed to run on mem1
location cli-standby-memcached-mem3-mem1 memcached-mem3 \
        rule $id="cli-standby-rule-memcached-mem3-mem1" -inf: #uname eq mem1
# Cluster IP mem1 prefers to live on mem1
location cluster-ip-mem1-likes-mem1 cluster-ip-mem1 \
        rule $id="rule-cluster-ip-mem1-likes-mem1" inf: #uname eq mem1
# Cluster IP mem2 perfers to live on mem2
location cluster-ip-mem2-likes-mem2 cluster-ip-mem2 \
        rule $id="rule-cluster-ip-mem2-likes-mem2" inf: #uname eq mem2
# Cluster IP mem2 should never live on mem1
location cluster-ip-mem2-not-on-mem1 cluster-ip-mem2 \
        rule $id="rule-cluster-ip-mem2-not-on-mem1" -inf: #uname eq mem1
# Cluster IP mem1 should never live on mem2
location cluster-ip-mem1-not-on-mem2 cluster-ip-mem1 \
        rule $id="rule-cluster-ip-mem1-not-on-mem2" -inf: #uname eq
mem2
property $id="cib-bootstrap-options" \
        dc-version="1.1.7-6.el6-148fccfd5985c5590cc601123c6c16e966b85d14" \
        cluster-infrastructure="openais" \
        expected-quorum-votes="3" \
        stonith-enabled="false"
### playing around, didn't work
#colocation mem1-on-cluster1 inf: memcached-mem1 cluster-ip-mem1
#colocation mem3-on-cluster1 inf: memcached-mem3 cluster-ip-mem1
#colocation mem1-cluster-on-service inf: cluster-ip-mem1 memcached-mem1
memcached-mem3
# Apparently the service memcached-mem3 can't be a member of two groups at
the same time?
#group mem1-group memcached-mem1 cluster-ip-mem1


------------------------------------------------------------------------------------------------------------------
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/pacemaker/attachments/20120726/01563606/attachment-0003.html>


More information about the Pacemaker mailing list