CoroSync+Drbd+MySQL如何实现MySQL的高可用集群
这篇文章主要介绍CoroSync+Drbd+MySQL如何实现MySQL的高可用集群,文中介绍的非常详细,具有一定的参考价值,感兴趣的小伙伴们一定要看完!
成都创新互联公司专注于安国企业网站建设,成都响应式网站建设,成都做商城网站。安国网站建设公司,为安国等地区提供建站服务。全流程定制网站设计,专业设计,全程项目跟踪,成都创新互联公司专业和态度为您提供的服务
节点规划:
node1.huhu.com172.16.100.103
node2.huhu.com172.16.100.104
资源名称规划:
资源名称:可以是除了空白字符外的任意ACSII码字符
DRBD设备:在双节点上,此DRBD设备文件,一般为/dev/drbdN,主设备号147
磁盘:在双方节点上,各自提供存储设备
网络配置:双方数据同步所使用的网络属性
DRBD从Linux内核2.6.33起已经整合进内核
1.配置双击互信(基于秘钥认证),HOSTS文件,时间同步
1)所有节点的主机名称和对应的IP地址解析服务可以正常工作,且每个节点的主机名称需要跟"uname -n“命令的结果保持一致;因此,需要保证两个节点上的/etc/hosts文件均为下面的内容:
172.16.100.103node1.huhu.com node1 172.16.100.104node2.huhu.com node2
Node1:
#sed -i 's@\(HOSTNAME=\).*@\1node1.huhu.com@g' /etc/sysconfig/network #hostname node1.huhu.com
Node2:
#sed -i 's@\(HOSTNAME=\).*@\1node2.huhu.com@g' /etc/sysconfig/network #hostname node2.huhu.com
2)设定两个节点可以基于密钥进行ssh通信,这可以通过类似如下的命令实现:
#yum install openssh-clients
Node1:
#ssh-keygen -t rsa #ssh-copy-id -i ~/.ssh/id_rsa.pub root@node2
Node2:
#ssh-keygen -t rsa #ssh-copy-id -i ~/.ssh/id_rsa.pub root@node1
配置时间同步:
*/5* * * * root /usr/sbin/ntpdate ntp.api.bz & > /dev/null
2.创建和配置DRBD
Node1上执行:
#rpm -Uvh http://www.elrepo.org/elrepo-release-6-6.el6.elrepo.noarch.rpm #ssh node2 'rpm -Uvh http://www.elrepo.org/elrepo-release-6-6.el6.elrepo.noarch.rpm' #yum update -y #ssh node2 'yum update -y' #yum install drbd84-utils kmod-drbd84 -y #ssh node2 'yum install drbd84-utils kmod-drbd84 -y'
加载模块到内核:
#/sbin/modprobe drbd #ssh node2 '/sbin/modprobe drbd'
DRBD的配置文件:
/etc/drbd.conf /etc/drbd.d/global_common.conf /etc/drbd.d/resource.d/ #yum -y install parted #ssh node2 'yum -y install parted' #fdisk /dev/sdb
n新建分区
p主分区
1分区号,两车回车按照默认大小选择
wq保存退出
#partprobe /dev/sdb1
资源规划:
资源名称:mydrbd
DRBD设备:/dev/drbd0
磁盘:/dev/sdb1
网络配置:100M
#cat /etc/drbd.d/global_common.conf | grep -v "#"
#cat /etc/drbd.d/global_common.conf | grep -v "#" global{ usage-countyes; } common{ handlers{ pri-on-incon-degr"/usr/lib/drbd/notify-pri-on-incon-degr.sh;/usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ;reboot -f"; pri-lost-after-sb"/usr/lib/drbd/notify-pri-lost-after-sb.sh;/usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ;reboot -f"; local-io-error"/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh;echo o > /proc/sysrq-trigger ; halt -f"; } startup{ } options{ } disk{ on-io-errordetach; } net{ cram-hmac-alg "sha1"; shared-secret "1q2w3e4r5t6y"; } syncer{ rate 200M; } } #cat mydrbd.res resourcemydrbd { device/dev/drbd0; disk/dev/sdb1; meta-diskinternal; onnode1.huhu.com { address172.16.100.103:7789; } onnode1.huhu.com { address172.16.100.104:7789; } }
复制配置文件到node2节点
scp-r /etc/drbd.* node2:/etc/
在两个节点上,初始化已定义的资源,并启动服务
#drbdadm create-md mydrbd
#ssh node2 'drbdadm create-md mydrbd' #/etc/init.d/drbd start #ssh node2 '/etc/init.d/drbd start'
查看DRBD设备的状态:
#cat /proc/drbd version:8.4.4 (api:1/proto:86-101) GIT-hash:599f286440bd633d15d5ff985204aff4bccffadd build by phil@Build64R6, 2013-10-1415:33:06 0:cs:Connected ro:Secondary/Secondaryds:Inconsistent/Inconsistent C r----- ns:0nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:2096348
目前两个节点都处于secondary状态,手动让其node1成为主节点:
#drbdadm -- --overwrite-data-of-peer primary mydrbd #cat /proc/drbd version:8.4.4 (api:1/proto:86-101) GIT-hash:599f286440bd633d15d5ff985204aff4bccffadd build by phil@Build64R6, 2013-10-1415:33:06 0:cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDateC r----- ns:2096348nr:0 dw:0 dr:2097012 al:0 bm:128 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
在primary节点上进行格式化drbd分区然后挂载
#mke2fs -j /dev/drbd0
#mkdir /mydata
#mount /dev/drbd0 /mydata/
#cp /etc/inittab /mydata/
#ls -lh /mydata/
total20K
-rw-r--r--.1 root root 884 Jul 8 17:24 inittab
drwx------.2 root root 16K Jul 8 17:23 lost+found
到此drbd分区已经可以正常使用了
DRBD分区的主备切换
primary节点上执行:
#umount /mydata/
# drbdadm secondary mydrbd
#drbd-overview
0:mydrbd/0Connected Secondary/Secondary UpToDate/UpToDateC r-----
secondary上执行
#drbd-overview 确保两者的状态都有secondary状态
#drbdadm primary mydrbd
#mkdir -p /mydata
# mount /dev/drbd0 /mydata/
#ls -lh /mydata/
total20K
-rw-r--r--.1 root root 884 Jul 8 17:24 inittab
drwx------.2 root root 16K Jul 8 17:23 lost+found
#drbd-overview
0:mydrbd/0Connected Primary/Secondary UpToDate/UpToDate Cr----- /mydata ext3 2.0G 36M 1.9G 2%
状态已经改变,primary /secondary
3.配置coresync服务
各个节点上停止掉drbd服务,并且关闭开机启动
#/etc/init.d/drbd stop
#ssh node2 '/etc/init.d/drbd stop'
#chkconfig drbd off
#ssh node2 'chkconfig drbd off'
#chkconfig --list | grep drbd
#ssh node2 'chkconfig --list | grep drbd'
drbd0:off 1:off 2:off 3:off 4:off 5:off 6:off
安装corosync
#yum install libibverbs librdmacm lm_sensors libtool-ltdl openhpi-libs openhpiperl-TimeDate
#yum install corosync pacemaker
#ssh node2 '# yum install libibverbs librdmacm lm_sensors libtool-ltdlopenhpi-libs openhpi perl-TimeDate'
wgethttp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/crmsh-2.1-1.1.x86_64.rpm&& wgethttp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/pssh-2.3.1-4.1.x86_64.rpm
#ssh node2 'wget http://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/crmsh-2.1-1.1.x86_64.rpm&& wgethttp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/pssh-2.3.1-4.1.x86_64.rpm'
yum--nogpgcheck localinstall crmsh-2.1-1.1.x86_64.rpm pssh-2.3.1-4.1.x86_64.rpm
如果安装失败,请添加以下源
#vim /etc/yum.repos.d/ha-clustering.repo
[haclustering]
name=HAClustering
baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/
enabled=1
gpgcheck=0
#yum --nogpgcheck localinstall crmsh-2.1-1.1.x86_64.rpmpssh-2.3.1-4.1.x86_64.rpm
node2上同样执行以上
配置corosync
#cd /etc/corosync/
#cp corosync.conf.example corosync.conf
#cat corosync.conf | grep -v "^#" | sed -e'/^$/d'
compatibility: whitetank
totem {
version: 2
secauth: on
threads: 2
interface {
ringnumber: 0
bindnetaddr: 172.16.100.0
mcastaddr: 226.94.8.9
mcastport: 5405
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
to_syslog: no
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
service {
ver: 0
name: pacemaker
# use_mgmtd: yes
}
aisexec {
user: root
group: root
}
amf {
mode: disabled
}
生成秘钥
#corosync-keygen
#scp -p authkey corosync.conf node2:/etc/corosync/
创建日志文件目录
#mkdir -p /var/log/cluster/ -pv
# ssh node2 'mkdir -p /var/log/cluster/ -pv'
启动corosync服务
#service corosync start
#ssh node2 'service corosync start'
检查corosync引擎是否已经启动
#grep -e "Corosync Cluster Engine" -e "configuration file"/var/log/cluster/corosync.log
Jul09 10:28:14 corosync [MAIN ] Corosync Cluster Engine ('1.4.1'): started andready to provide service.
Jul09 10:28:14 corosync [MAIN ] Successfully readmain configuration file '/etc/corosync/corosync.conf'.
查看节点成员之间通信是否正常
#grep TOTEM /var/log/cluster/corosync.log
Jul09 10:28:14 corosync [TOTEM ] Initializing transport (UDP/IP Multicast).
Jul09 10:28:14 corosync [TOTEM ] Initializing transmit/receive security:libtomcrypt SOBER128/SHA1HMAC (mode 0).
Jul09 10:28:14 corosync [TOTEM ] The network interface[172.16.100.103] is now up.
Jul09 10:28:14 corosync [TOTEM ] A processor joined or left the membership and anew membership was formed.
Jul09 10:28:29 corosync [TOTEM ] A processor joined or left the membership and anew membership was formed.
检查pacemaker启动是否正常
#grep pcmk_startup /var/log/cluster/corosync.log
Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup: CRM: Initialized
Jul09 10:28:14 corosync [pcmk ] Logging: Initialized pcmk_startup
Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup:Maximum core file size is: 18446744073709551615
Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup: Service: 9
Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup: Local hostname: node1.huhu.com
查看错误信息
#grep ERROR /var/log/cluster/corosync.log | grep -v unpack_resources
Jul09 10:28:14 corosync [pcmk ] ERROR: process_ais_conf: You have configured acluster using the Pacemaker plugin for Corosync. The plugin is not supported inthis environment and will be removed very soon.
Jul09 10:28:14 corosync [pcmk ] ERROR: process_ais_conf: Please see Chapter 8 of'Clusters from Scratch' (http://www.clusterlabs.org/doc) for details on usingPacemaker with CMAN
Jul09 10:28:35 [1373] node1.huhu.com pengine: notice: process_pe_message:Configuration ERRORs found during PE processing. Please run "crm_verify-L" to identify issues.
注意:这里因为没有使用stonith设备,因此错误可以忽略
#crm status
Lastupdated: Wed Jul 9 10:49:53 2014
Lastchange: Wed Jul 9 10:19:07 2014 via crmd on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node1.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
0Resources configured
Online:[ node1.huhu.com node2.huhu.com ]
以上说明corosync配置启动正常。
关闭stonith设备,并验证提交
crm(live)#configure
crm(live)configure#property stonith-enabled=false
crm(live)configure#verify
crm(live)configure#commit
关闭不具备法定票数的时候,不能关闭集群服务
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure#verify
crm(live)configure#commit
配置资源粘性,更倾向于当前节点
crm(live)configure#rsc_defaults resource-stickiness=100
crm(live)configure#verify
crm(live)configure#commit
查看当前的配置
crm(live)configure#show
nodenode1.huhu.com
nodenode2.huhu.com
propertycib-bootstrap-options: \
dc-version=1.1.10-14.el6_5.3-368c726\
cluster-infrastructure="classicopenais (with plugin)" \
expected-quorum-votes=2\
stonith-enabled=false\
no-quorum-policy=ignore
rsc_defaultsrsc-options: \
resource-stickiness=100
crm(live)configure#
查看drbd的资源代理
crm(live)configure#cd ..
crm(live)#ra
crm(live)ra#providers drbd
linbit
注意:这里只有linbit没有beartbeat,corosync1.4以前的版本有heartbeat。
查看原数据
crm(live)ra#meta ocf:linbit:drbd
定义资源:
crm(live)configure#primitive mysql_drbd ocf:linbit:drbd paramsdrbd_resource=mydrbd op start timeout=240 op stop timeout=100 op monitorrole=Master interval=50s timeout=30s op monitor role=Slave interval=60s timeout=30s
定义群集资源:
crm(live)configure#master MS_mysql_drbd mysql_drbd metamaster-max="1" master-node-max="1" clone-max="2"clone-node-max="1" notify="true"
crm(live)configure#show mysql_drbd
primitivemysql_drbd ocf:linbit:drbd \
paramsdrbd_resource=mydrbd \
opstart timeout=240 interval=0 \
opstop timeout=100 interval=0 \
opmonitor role=Master interval=50s timeout=30s \
opmonitor role=Slave interval=60s timeout=30s
crm(live)configure#show MS_mysql_drbd
msMS_mysql_drbd mysql_drbd \
metamaster-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
crm(live)configure#verify
crm(live)configure#commit
crm(live)configure#cd
crm(live)#status
Lastupdated: Wed Jul 9 11:54:30 2014
Lastchange: Wed Jul 9 11:54:17 2014 via cibadmin on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
2Resources configured
Online:[ node1.huhu.com node2.huhu.com ]
Master/Slave Set: MS_mysql_drbd [mysql_drbd]
Masters: [ node1.huhu.com ]
Slaves: [ node2.huhu.com ]
crm(live)#
主从资源已经定义完成
[root@node1corosync]# drbd-overview
0:mydrbd/0Connected Primary/Secondary UpToDate/UpToDate C r-----
[root@node1corosync]#
此时当前节点已经成为主资源了
手动做一次主从切换:
#crm node standby
#crm status
Lastupdated: Wed Jul 9 12:01:44 2014
Lastchange: Wed Jul 9 12:01:29 2014 via crm_attribute on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
2Resources configured
Nodenode1.huhu.com: standby
Online:[ node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters:[ node2.huhu.com ]
Stopped:[ node1.huhu.com ]
#crm node online
#crm status
Lastupdated: Wed Jul 9 12:02:46 2014
Lastchange: Wed Jul 9 12:02:43 2014 via crm_attribute on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
2Resources configured
Online:[ node1.huhu.com node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters:[ node2.huhu.com ]
Slaves:[ node1.huhu.com ]
#drbd-overview
0:mydrbd/0Connected Secondary/Primary UpToDate/UpToDate C r-----
[root@node1corosync]#
当前节点就切换为从节点了
此时保证了资源可以主从切换,但是文件系统是没有挂载
因此必须定义文件系统
crm(live)configure#primitive mystore ocf:heartbeat:Filesystem paramsdevice=/dev/drbd0 directory=/mydata fstype=ext3 op start timeout=60 op stoptimeout=60
crm(live)configure#verify
注意:这里千万不要提交,因为必须保证文件系统跟主节点在一起,定义排列约束
crm(live)configure#colocation mystore_with_MS_mysql_drbd inf: mystoreMS_mysql_drbd:Master
定义存储资源必须和资源的主节点在一起
crm(live)configure#order mystore_after_MS_mysql_drbd mandatory:MS_mysql_drbd:promote mystore:start
定义存储资源必须在主节点启动后进行挂载
crm(live)configure#verify
crm(live)configure#commit
crm(live)configure#cd ..
crm(live)#status
Lastupdated: Wed Jul 9 12:25:25 2014
Lastchange: Wed Jul 9 12:22:30 2014 via cibadmin on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
3Resources configured
Online:[ node1.huhu.com node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters:[ node1.huhu.com ]
Slaves:[ node2.huhu.com ]
mystore(ocf::heartbeat:Filesystem): Started node1.huhu.com
crm(live)#
可以看到Master在node1上,mystore就启动在node1上
[root@node1~]# ls -lh /mydata/
total20K
-rw-r--r--.1 root root 884 Jul 8 17:24 inittab
drwx------2 root root 16K Jul 8 17:23 lost+found
[root@node1~]#
手动模拟一次切换
[root@node1corosync]# crm node standby
[root@node1corosync]# crm status
Lastupdated: Wed Jul 9 12:28:55 2014
Lastchange: Wed Jul 9 12:28:49 2014 via crm_attribute on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
3Resources configured
Nodenode1.huhu.com: standby
Online:[ node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters:[ node2.huhu.com ]
Stopped:[ node1.huhu.com ]
mystore(ocf::heartbeat:Filesystem): Started node2.huhu.com
[root@node1corosync]#
[root@node2~]# ls -lh /mydata/
total20K
-rw-r--r--.1 root root 884 Jul 8 17:24 inittab
drwx------2 root root 16K Jul 8 17:23 lost+found
Youhave new mail in /var/spool/mail/root
[root@node2~]#
这样就切换到node2节点上。
4.配置MySQL结合DRBD和corosync
分别在node1节点上创建MySQL用户和组
#groupadd -g 3306 mysql
#useradd -u 3306 -g mysql -s /sbin/nologin -M mysql
#id mysql
uid=3306(mysql)gid=3306(mysql) groups=3306(mysql)
#ssh node2 'groupadd -g 3306 mysql'
#ssh node2 'useradd -u 3306 -g mysql -s /sbin/nologin -M mysql'
#wgethttp://cdn.mysql.com/Downloads/MySQL-5.5/mysql-5.5.38-linux2.6-x86_64.tar.gz
#tar zxvf mysql-5.5.38-linux2.6-x86_64.tar.gz -C /usr/local/
#cd /usr/local/
#ln -s mysql-5.5.38-linux2.6-x86_64/ mysql
#cd mysql
#chown root:mysql -R .
#cp support-files/my-huge.cnf /etc/my.cnf
#cp support-files/mysql.server /etc/init.d/mysqld
#[ -x /etc/init.d/mysqld ] && echo "ok" || echo "NO"
确保当前在主节点上操作
#drbd-overview
0:mydrbd/0Connected Primary/Secondary UpToDate/UpToDate Cr----- /mydata ext3 2.0G 36M 1.9G 2%
#mkdir -p /mydata/data
#chown -R mysql:mysql /mydata/data/
#scripts/mysql_install_db --user=mysql--datadir=/mydata/data
vim/etc/my.cnf
datadir=/mydata/data
#chkconfig --add mysqld
#chkconfig mysqld off
#service mysqld start
确保启动OK
#/usr/local/mysql/bin/mysql -uroot -e "CREATE DATABASE mydb"
[root@node1mysql]# /usr/local/mysql/bin/mysql -uroot -e "SHOW DATABASES"
+--------------------+
|Database |
+--------------------+
|information_schema |
|mydb |
|mysql |
|performance_schema |
|test |
+--------------------
#service mysqld stop
#chkconfig --list | grep 3:off | grep mysql
mysqld0:off 1:off 2:off 3:off 4:off 5:off 6:off
[root@node1mysql]#
将存储资源切换到node2上,在node2MySQL配置好
#crm node standby
[root@node1mysql]# crm status
Lastupdated: Wed Jul 9 14:45:36 2014
Lastchange: Wed Jul 9 14:45:29 2014 via crm_attribute on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
3Resources configure
Nodenode1.huhu.com: standby
Online:[ node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters: [ node2.huhu.com ]
Stopped:[ node1.huhu.com ]
mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com
[root@node1mysql]# crm node online
[root@node1mysql]# crm status
Lastupdated: Wed Jul 9 14:45:52 2014
Lastchange: Wed Jul 9 14:45:49 2014 via crm_attribute on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
3Resources configure
Online:[ node1.huhu.com node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters: [ node2.huhu.com ]
Slaves:[ node1.huhu.com ]
mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com
[root@node1mysql]#
# scp /root/mysql-5.5.38-linux2.6-x86_64.tar.gz node2:/root/
# scp /etc/my.cnf node2:/etc/my.cnf
# scp /etc/init.d/mysqld node2:/etc/init.d/mysqld
在node2上安装MySQL
#tar zxvf mysql-5.5.38-linux2.6-x86_64.tar.gz -C /usr/local/
#cd /usr/local/
#ln -s mysql-5.5.38-linux2.6-x86_64/ mysql
#cd mysql
#chown root:mysql -R .
注意:千万不要手动创建/mydata/data,否则会导致文件损坏
缺少相关库文件# yum install libaio
#service mysqld start
#/usr/local/mysql/bin/mysql -uroot -e "SHOW DATABASES"
+--------------------+
|Database |
+--------------------+
|information_schema |
|mydb |
|mysql |
|performance_schema |
|test |
+--------------------+
[root@node2mydata]#
#service mysqld stop
#chkconfig mysqld off
配置MySQL成为集群资源
crm(live)#configure
crm(live)configure#primitive mysqld lsb:mysqld
crm(live)configure#verify
crm(live)configure# colocation mysqld_with_mystore inf: mysqld mystore
crm(live)configure#show xml
MySQL服务一定是跟MySQL存储资源在一起
crm(live)configure#order mysqld_after_mystore mandatory: mystore mysqld
crm(live)configure#verify
MySQL服务一定是在MySQ存储转移之后的,因此定义顺序约束
crm(live)#status
Lastupdated: Wed Jul 9 16:18:27 2014
Lastchange: Wed Jul 9 16:18:16 2014 via cibadmin on node2.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
4Resources configured
Online:[ node1.huhu.com node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters: [ node2.huhu.com ]
Slaves:[ node1.huhu.com ]
mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com
mysqld (lsb:mysqld): Started node2.huhu.com
crm(live)#
因此登录node2节点
#/usr/local/mysql/bin/mysql -uroot -e "SHOW DATABASES"
+--------------------+
|Database |
+--------------------+
|information_schema |
| mydb |
|mysql |
|performance_schema |
|test |
+--------------------+
#/usr/local/mysql/bin/mysql -uroot -e "DROP DATABASE mydb"
#/usr/local/mysql/bin/mysql -uroot -e "CREATE DATABASE testdb"
这里再次进行主从节切换
#crm node standby
#crm status
Masters:[ node1.huhu.com ]
Stopped:[ node2.huhu.com ]
mystore(ocf::heartbeat:Filesystem): Started node1.huhu.com
mysqld(lsb:mysqld): Started node1.huhu.com
#crm node online
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters: [ node1.huhu.com ]
Slaves:[ node2.huhu.com ]
mystore (ocf::heartbeat:Filesystem): Started node1.huhu.com
mysqld (lsb:mysqld): Started node1.huhu.com
在node1节点上
#/usr/local/mysql/bin/mysql -uroot -e "SHOW DATABASES"
+--------------------+
|Database |
+--------------------+
|information_schema |
|mysql |
|performance_schema |
|test |
|testdb |
+--------------------
testdb正常显示
***给MySQL定义一个虚拟IP资源
crm(live)configure#primitive myip ocf:heartbeat:IPaddr paramsip=172.16.100.119 nic=eth0 cidr_netmask=24
crm(live)configure#verify
crm(live)configure#colocation myip_with_MS_mysql_drbd inf:MS_mysql_drbd:Master myip
crm(live)configure#verify
crm(live)configure#show xml
crm(live)configure#commit
crm(live)configure#cd ..
crm(live)#status
Lastupdated: Wed Jul 9 16:46:27 2014
Lastchange: Wed Jul 9 16:46:20 2014 via cibadmin on node1.huhu.com
Stack:classic openais (with plugin)
CurrentDC: node2.huhu.com - partition with quorum
Version:1.1.10-14.el6_5.3-368c726
2Nodes configured, 2 expected votes
5Resources configured
Online:[ node1.huhu.com node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters: [ node1.huhu.com ]
Slaves:[ node2.huhu.com ]
mystore (ocf::heartbeat:Filesystem): Started node1.huhu.com
mysqld (lsb:mysqld): Started node1.huhu.com
myip (ocf::heartbeat:IPaddr): Started node1.huhu.com
crm(live)
可以看到myip已经在node1启动。
#ip addr
1:lo:
link/loopback00:00:00:00:00:00 brd 00:00:00:00:00:00
inet127.0.0.1/8 scope host lo
inet6::1/128 scope host
valid_lftforever preferred_lft forever
2:eth0:
link/ether00:0c:29:a9:86:42 brd ff:ff:ff:ff:ff:ff
inet172.16.100.103/24 brd 172.16.100.255 scope global eth0
inet172.16.100.119/24 brd 172.16.100.255 scopeglobal secondary eth0
inet6fe80::20c:29ff:fea9:8642/64 scope link
valid_lftforever preferred_lft forever
5.在其节点上进行MySQL登录验证
登录MySQL创建用户
#/usr/local/mysql/bin/mysql -uroot -e "GRANT ALL ON *.* TO root@'%'IDENTIFIED BY '123.com';FLUSH PRIVILEGES"
#mysql -uroot -p123.com -h272.16.100.119 -e "SHOW DATABASES"
+--------------------+
|Database |
+--------------------+
|information_schema |
|mysql |
|performance_schema |
|test |
| testdb |
+--------------------+
[root@localhost~]#
在模拟主从节点切换:
#crm node standby
#crm node online
#crm status
Online:[ node1.huhu.com node2.huhu.com ]
Master/SlaveSet: MS_mysql_drbd [mysql_drbd]
Masters: [ node2.huhu.com ]
Slaves:[ node1.huhu.com ]
mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com
mysqld (lsb:mysqld): Started node2.huhu.com
myip (ocf::heartbeat:IPaddr): Started node2.huhu.com
#mysql -uroot -p123.com -h272.16.100.119 -e "SHOW DATABASES"
[root@node2~]# crm
crm(live)# configure
crm(live)configure# show
nodenode1.huhu.com \
attributes standby=off
nodenode2.huhu.com \
attributes standby=off
primitive myipIPaddr \
params ip=172.16.100.119 nic=eth0 cidr_netmask=24
primitive mysql_drbdocf:linbit:drbd \
params drbd_resource=mydrbd \
op start timeout=240 interval=0 \
op stop timeout=100 interval=0 \
op monitor role=Masterinterval=50s timeout=30s \
op monitor role=Slaveinterval=60s timeout=30s
primitive mysqldlsb:mysqld
primitive mystoreFilesystem \
params device="/dev/drbd0" directory="/mydata" fstype=ext3 \
op start timeout=60 interval=0 \
op stop timeout=60 interval=0
msMS_mysql_drbd mysql_drbd \
meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
colocationmyip_with_MS_mysql_drbd inf: MS_mysql_drbd:Master myip
colocationmysqld_with_mystore inf: mysqld mystore
colocationmystore_with_MS_mysql_drbd inf: mystore MS_mysql_drbd:Master
ordermysqld_after_mystore Mandatory: mystore mysqld
ordermystore_after_MS_mysql_drbd Mandatory: MS_mysql_drbd:promote mystore:start
property cib-bootstrap-options: \
dc-version=1.1.10-14.el6_5.3-368c726\
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes=2 \
stonith-enabled=false\
no-quorum-policy=ignore
rsc_defaults rsc-options: \
resource-stickiness=100
crm(live)configure#
以上是“CoroSync+Drbd+MySQL如何实现MySQL的高可用集群”这篇文章的所有内容,感谢各位的阅读!希望分享的内容对大家有帮助,更多相关知识,欢迎关注创新互联行业资讯频道!
分享名称:CoroSync+Drbd+MySQL如何实现MySQL的高可用集群
分享地址:http://cdiso.cn/article/pecpec.html