虚拟机系统:redhat6.5
真机:redhat7.2
[rhel6.5]
name=rhel6.5
baseurl=http://10.10.10.250/rhel6.5
gpgcheck=0
[HighAvailability]
name=HighAvailability
baseurl=http://10.10.10.250/rhel6.5/HighAvailability
gpgcheck=0
[LoadBalancer]
name=LoadBalancer
baseurl=http://10.10.10.250/rhel6.5/LoadBalancer
gpgcheck=0
[ScalableFileSystem]
name=ScalableFileSystem
baseurl=http://10.10.10.250/rhel6.5/ScalableFileSystem
gpgcheck=0
[ResilientStorage]
name=ResilientStorage
baseurl=http://10.10.10.250/rhel6.5/ResilientStorage
gpgcheck=0
server1 ====>> 10.10.10.1(配置Nginx、ricci和luci)
server2 ====>> 10.10.10.2(Apache)
server3 ====>> 10.10.10.3(Apache)
server4 ====>> 10.10.10.4(配置Nginx、ricci)
tar zxf /mnt/nginx-1.10.1.tar.gz
vim /mnt/nginx-1.10.0/src/core/nginx.h ###关闭版本显示
12 #define nginx_version 1010001
13 #define NGINX_VERSION "1.10.1"
14 #define NGINX_VER "nginx"
vim /mnt/nginx-1.10.0/auto/cc/gcc ###关闭调试环境
# debug
#CFLAGS="$CFLAGS -g"
yum install -y pcre-devel gcc openssl-devel
./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module
make && make install
ln -s /usr/local/nginx/sbin/nginx /usr/local/sbin/
vim vim /usr/local/nginx/conf/nginx.conf ###在http里面加入
upstream dream {
server 10.10.10.2:80;
server 10.10.10.3:80;
}
server {
listen 80;
server_name www.dream.com;
location / {
proxy_pass http://dream;
}
}
#!/bin/bash
. /etc/rc.d/init.d/functions
nginx=${nginx-/usr/local/nginx/sbin/nginx}
prog=nginx
RETVAL=0
start() {
echo -n $"Starting $prog: "
daemon $nginx
RETVAL=$?
echo
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc $nginx
RETVAL=$?
echo
}
reload() {
echo -n $"Reloading $prog: "
$nginx -s reload
echo
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p $nginx
RETVAL=$?
;;
restart)
stop
start
;;
reload)
reload
;;
*)
echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|reload|status|fullstatus|graceful|help|configtest}"
RETVAL=2
esac
exit $RETVAL
yum install -y ricci luci
/etc/init.d/ricci restart
/etc/init.d/luci restart
passwd ricci ###在redhat6.1以上的版本必须设置密码
chkconfig ricci on ###一定要设置开机自启
chkconfig luci on
yum install -y ricci
/etc/init.d/ricci restart
passwd ricci
chkconfig ricci on
注意:如果创建失败在server1和server4中执行>/etc/cluster/cluster.conf清除掉,在执行网页设置步骤即可!!!
https://10.10.10.1:8084/
[root@server1 cluster]# cman_tool status
Version: 6.2.0
Config Version: 1
Cluster Name: 11
Cluster Id: 14140
Cluster Member: Yes
Cluster Generation: 80
Membership state: Cluster-Member
Nodes: 2
Expected votes: 1
Total votes: 2
Node votes: 1
Quorum: 1
Active subsystems: 9
Flags: 2node
Ports Bound: 0 11
Node name: server1
Node ID: 1
Multicast addresses: 239.192.55.115
Node addresses: 10.10.10.1
[root@server1 cluster]# clustat ###查看状态
Cluster Status for 11 @ Thu Apr 12 14:50:09 2018
Member Status: Quorate
Member Name ID Status
------ ---- ---- ------
server1 1 Online, Local
server4 2 Online
我们也可以用ip addr查询VIP的位置
vim /etc/hosts
10.10.10.100 www.dream.com
curl www.dream.com
/etc/init.d/nginx stop
可以发现在真机中照样可以访问,VIP跑到server4中,并且nginx服务也被开启,但是当你执行echo c>/proc/sysrq-trigger时,模拟内核崩溃,发现server1并没有接管,下面就是强大的fence讲解
[root@foundation25 network-scripts]# mkdir /etc/cluster
[root@foundation25 network-scripts]# dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=128 count=1 ###生成随机数key
1+0 records in
1+0 records out
128 bytes (128 B) copied, 0.000149135 s, 858 kB/s
scp /etc/cluster/fence_xvm.key root@10.10.10.1:/etc/cluster/
scp /etc/cluster/fence_xvm.key root@10.10.10.4:/etc/cluster/
[root@foundation23 mnt]# fence_virtd -c ###如果没有yum进行下载即可
Module search path [/usr/lib64/fence-virt]:
Available backends:
libvirt 0.1
Available listeners:
serial 0.4
multicast 1.2
Listener modules are responsible for accepting requests
from fencing clients.
Listener module [multicast]: ###模式
The multicast listener module is designed for use environments
where the guests and hosts may communicate over a network using
multicast.
The multicast address is the address that a client will use to
send fencing requests to fence_virtd.
Multicast IP Address [225.0.0.12]: ###广播地址
Using ipv4 as family.
Multicast IP Port [1229]: ###端口,可以自行指定
Setting a preferred interface causes fence_virtd to listen only
on that interface. Normally, it listens on all interfaces.
In environments where the virtual machines are using the host
machine as a gateway, this *must* be set (typically to virbr0).
Set to 'none' for no interface.
Interface [virbr0]: br0 ###此处根据自己的网卡名进行设置
The key file is the shared key information which is used to
authenticate fencing requests. The contents of this file must
be distributed to each physical host and virtual machine within
a cluster.
Key File [/etc/cluster/fence_xvm.key]:
Backend modules are responsible for routing requests to
the appropriate hypervisor or management layer.
Backend module [libvirt]:
Configuration complete.
=== Begin Configuration ===
backends {
libvirt {
uri = "qemu:///system";
}
}
listeners {
multicast {
port = "1229";
family = "ipv4";
interface = "br0";
address = "225.0.0.12";
key_file = "/etc/cluster/fence_xvm.key";
}
}
fence_virtd {
module_path = "/usr/lib64/fence-virt";
backend = "libvirt";
listener = "multicast";
}
=== End Configuration ===
Replace /etc/fence_virt.conf with the above [y/N]? y
systemctl restart fence_virtd.service ###重启fence服务,其配置文件在/etc/fence_virt.conf
echo c>/proc/sysrq-trigger