[ClusterLabs] 3 mysql nodes master, slave, slave

Dejan Muhamedagic dejanmm at fastmail.fm
Tue Feb 9 19:14:56 UTC 2016


Hi,

On Tue, Jan 26, 2016 at 01:51:44PM +0100, ml at galuan.org wrote:
> Hello !
> 
> I'm pretty new to pacemaker/corosync and I'm facing some troubles
> making everything work as I want.
> 
> I have 3 mysql nodes (Debian) and 1 vip. The mysql replication is as
> follow:
> 
> node01: mysql master + vip
> node02: mysql slave
> node03: mysql slave
> 
> Both mysql slaves replicate from the master (no slave chaining).
> 
> In case of failure, I want one of the slave to be promoted to master
> along with the vip being attached to it.
> I wasn't able to make this work so far, as when I put the master
> node01 to a "failed" state (ie: killing mysql), the vip wouldn't
> follow the new master. For example, node03 would become the new
> mysql master but the vip would be on node02.
> 
> Is there something I'm missing ?

Yes, you need to collocate the mysql master with the vip.

Thanks,

Dejan



> Thanks in advance !
> 
> 
> --
> An8
> 
> 
> ## corosync.conf
> totem {
>         version: 2
>         token: 3000
>         token_retransmits_before_loss_const: 10
>         clear_node_high_bit: yes
>         crypto_cipher: none
>         crypto_hash: none
>         transport: udpu
>         interface {
>                 ringnumber: 0
>                 bindnetaddr: 10.8.8.0
>         }
> }
> 
> service {
>         name:      pacemaker
>         ver:       1
> }
> 
> logging {
>         to_logfile: yes
>         logfile: /var/log/corosync/corosync.log
>         debug: off
>         timestamp: on
>         logger_subsys {
>                 subsys: QUORUM
>                 debug: off
>         }
> }
> 
> quorum {
>         provider: corosync_votequorum
> }
> 
> nodelist {
>         node {
>                 ring0_addr: node01
>         }
>         node {
>                 ring0_addr: node02
>         }
>         node {
>                 ring0_addr: node03
>         }
> }
> 
> ## crm
>     <crm_config>
>       <cluster_property_set id="cib-bootstrap-options">
>         <nvpair id="cib-bootstrap-options-dc-version"
> name="dc-version" value="1.1.12-561c4cf"/>
>         <nvpair id="cib-bootstrap-options-cluster-infrastructure"
> name="cluster-infrastructure" value="corosync"/>
>         <nvpair name="stonith-enabled" value="no"
> id="cib-bootstrap-options-stonith-enabled"/>
>         <nvpair name="no-quorum-policy" value="ignore"
> id="cib-bootstrap-options-no-quorum-policy"/>
>       </cluster_property_set>
>     </crm_config>
>     <nodes>
>       <node id="168298506" uname="node01"/>
>       <node id="168298516" uname="node02"/>
>       <node id="168298526" uname="node03"/>
>     </nodes>
>     <resources>
>       <primitive id="IPFO" class="ocf" provider="heartbeat"
> type="IPaddr2">
>         <instance_attributes id="IPFO-instance_attributes">
>           <nvpair name="ip" value="10.8.8.254"
> id="IPFO-instance_attributes-ip"/>
>           <nvpair name="nic" value="eth0"
> id="IPFO-instance_attributes-nic"/>
>           <nvpair name="cidr_netmask" value="32"
> id="IPFO-instance_attributes-cidr_netmask"/>
>         </instance_attributes>
>         <meta_attributes id="IPFO-meta_attributes">
>           <nvpair name="migration-threshold" value="2"
> id="IPFO-meta_attributes-migration-threshold"/>
>         </meta_attributes>
>         <operations>
>           <op name="monitor" interval="20" timeout="60"
> on-fail="restart" id="IPFO-monitor-20"/>
>         </operations>
>       </primitive>
>       <primitive id="MYSQL_HA" class="ocf" provider="heartbeat"
> type="mysql">
>         <instance_attributes id="MYSQL_HA-instance_attributes">
>           <nvpair name="binary" value="/usr/bin/mysqld_safe"
> id="MYSQL_HA-instance_attributes-binary"/>
>           <nvpair name="config" value="/etc/mysql/my.cnf"
> id="MYSQL_HA-instance_attributes-config"/>
>           <nvpair name="datadir" value="/var/lib/mysql"
> id="MYSQL_HA-instance_attributes-datadir"/>
>           <nvpair name="replication_user" value="slave"
> id="MYSQL_HA-instance_attributes-replication_user"/>
>           <nvpair name="replication_passwd" value="slave"
> id="MYSQL_HA-instance_attributes-replication_passwd"/>
>         </instance_attributes>
>         <operations>
>           <op name="promote" timeout="10" interval="0"
> id="MYSQL_HA-promote-0"/>
>           <op name="monitor" role="Master" timeout="10" interval="2"
> id="MYSQL_HA-monitor-2"/>
>           <op name="monitor" role="Slave" timeout="10" interval="2"
> id="MYSQL_HA-monitor-2-0">
>             <instance_attributes
> id="MYSQL_HA-monitor-2-0-instance_attributes">
>               <nvpair name="ms"
> id="MYSQL_HA-monitor-2-0-instance_attributes-ms"/>
>               <nvpair name="MYSQL_MS"
> id="MYSQL_HA-monitor-2-0-instance_attributes-MYSQL_MS"/>
>               <nvpair name="MYSQL_HA"
> id="MYSQL_HA-monitor-2-0-instance_attributes-MYSQL_HA"/>
>             </instance_attributes>
>           </op>
>         </operations>
>         <meta_attributes id="MYSQL_HA-meta_attributes">
>           <nvpair name="clone-max" value="3"
> id="MYSQL_HA-meta_attributes-clone-max"/>
>           <nvpair id="MYSQL_HA-meta_attributes-target-role"
> name="target-role" value="Stopped"/>
>         </meta_attributes>
>       </primitive>
>     </resources>
>     <constraints/>
>     <rsc_defaults>
>       <meta_attributes id="rsc-options">
>         <nvpair name="resource-stickiness" value="100"
> id="rsc-options-resource-stickiness"/>
>       </meta_attributes>
>     </rsc_defaults>
>   </configuration>
> </cib>
> 
> 
> _______________________________________________
> Users mailing list: Users at clusterlabs.org
> http://clusterlabs.org/mailman/listinfo/users
> 
> Project Home: http://www.clusterlabs.org
> Getting started: http://www.clusterlabs.org/doc/Cluster_from_Scratch.pdf
> Bugs: http://bugs.clusterlabs.org




More information about the Users mailing list