LVS负载均衡高可用最优方案(LVS+Keepalived)
内部IP(eth0) 外部IP(eth1) 角色 备注
192.168.0.210 无 LVS负载均衡器(主) VIP:192.168.0.240
192.168.0.211 无 LVS负载均衡器(备) VIP:192.168.0.240
192.168.0.223 无 Web01节点
192.168.0.224 无 Web02节点
192.168.0.220 无 内网客户端
1.LVS负载均衡器主和备安装Keepalived软件
[root@lvs01 ~]# yum -y install keepalived #光盘安装即可
仅实现LVS负载均衡器主和备的keepalived高可用功能
LVS负载均衡器主的keepalived配置文件内容如下
[
root@lvs01 ~]# sed -n '1,30p' /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
}
notification_email_from yunjisuan
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS01
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 55
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.0.240/24 dev eth0 label eth0:240
}
}
LVS负载均衡器主的keepalived配置文件内容如下
[root@localhost ~]# sed -n '1,30p' /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
}
notification_email_from yunjisuan
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS02
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 55
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.0.240/24 dev eth0 label eth0:240
}
}
1.1 添加LVS的负载均衡规则
以下操作过程,在LVS主和备上完全一样
[root@localhost ~]# ipvsadm -C
[root@localhost ~]# ipvsadm -A -t 192.168.0.240:80 -s rr
[root@localhost ~]# ipvsadm -a -t 192.168.0.240:80 -r 192.168.0.223:80 -g -w 1
[root@localhost ~]# ipvsadm -a -t 192.168.0.240:80 -r 192.168.0.224:80 -g -w 1
[root@localhost ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.240:80 rr persistent 20
-> 192.168.0.223:80 Route 1 0 0
-> 192.168.0.224:80 Route 1 0 0
1.2启动LVS主和备的keepalived服务
#在LVS主上
[root@lvs01 ~]# /etc/init.d/keepalived start
[root@lvs01 ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 00:0C:29:D5:7F:9D
inet addr:192.168.0.210 Bcast:192.168.0.255 Mask:255.255.255.0
inet6 addr: fe80::20c:29ff:fed5:7f9d/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:23567 errors:0 dropped:0 overruns:0 frame:0
TX packets:14635 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:2008524 (1.9 MiB) TX bytes:1746298 (1.6 MiB)
eth0:240 Link encap:Ethernet HWaddr 00:0C:29:D5:7F:9D
inet addr:192.168.0.240 Bcast:0.0.0.0 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:769 errors:0 dropped:0 overruns:0 frame:0
TX packets:769 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:56636 (55.3 KiB) TX bytes:56636 (55.3 KiB)
#在LVS副上
[root@localhost ~]# /etc/init.d/keepalived start
[root@localhost ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 00:0C:29:E7:06:1D
inet addr:192.168.0.211 Bcast:192.168.0.255 Mask:255.255.255.0
inet6 addr: fe80::20c:29ff:fee7:61d/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:14109 errors:0 dropped:0 overruns:0 frame:0
TX packets:4902 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:12683754 (12.0 MiB) TX bytes:553207 (540.2 KiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:155 errors:0 dropped:0 overruns:0 frame:0
TX packets:155 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:11283 (11.0 KiB) TX bytes:11283 (11.0 KiB)
#如果LVS副上没有VIP就对了。如果主副都有,那么请检查防火墙是否开启状态
1.3内网客户端进行访问测试
#在内网客户端上进行访问测试
[root@LanClient ~]# curl 192.168.0.240
192.168.0.224 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.223 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.224 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.223 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.224 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.223 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.224 bbs
#在LVS主上进行访问连接查询
[root@lvs01 ~]# ipvsadm -Lnc
IPVS connection entries
pro expire state source virtual destination
TCP 00:01 FIN_WAIT 192.168.0.220:59887 192.168.0.240:80 192.168.0.223:80
TCP 00:01 FIN_WAIT 192.168.0.220:59889 192.168.0.240:80 192.168.0.223:80
TCP 00:01 FIN_WAIT 192.168.0.220:59888 192.168.0.240:80 192.168.0.224:80
TCP 00:00 FIN_WAIT 192.168.0.220:59886 192.168.0.240:80 192.168.0.224:80
#在LVS主上停掉keepalived服务
[root@lvs01 ~]# /etc/init.d/keepalived stop
Stopping keepalived: [ OK ]
[root@lvs01 ~]# ifconfig | grep eth0:240
#在LVS副上查看VIP
[root@localhost ~]# ip a | grep eth0:240
inet 192.168.0.240/24 scope global secondary eth0:240
#再次在内网客户端上进行访问测试
[root@LanClient ~]# curl 192.168.0.240
192.168.0.223 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.224 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.223 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.224 bbs
[root@LanClient ~]# curl 192.168.0.240
192.168.0.223 bbs
#在LVS副上进行访问连接查询
[root@localhost ~]# ipvsadm -Lnc
IPVS connection entries
pro expire state source virtual destination
TCP 01:47 FIN_WAIT 192.168.0.220:59900 192.168.0.240:80 192.168.0.223:80
TCP 01:09 FIN_WAIT 192.168.0.220:59891 192.168.0.240:80 192.168.0.224:80
TCP 01:48 FIN_WAIT 192.168.0.220:59902 192.168.0.240:80 192.168.0.223:80
TCP 01:09 FIN_WAIT 192.168.0.220:59892 192.168.0.240:80 192.168.0.224:80
TCP 01:14 FIN_WAIT 192.168.0.220:59896 192.168.0.240:80 192.168.0.224:80
TCP 01:10 FIN_WAIT 192.168.0.220:59894 192.168.0.240:80 192.168.0.224:80
#开启LVS主上的keepalived服务
[root@lvs01 ~]# /etc/init.d/keepalived start
[root@lvs01 ~]# ip a | grep eth0:240
inet 192.168.0.240/24 scope global secondary eth0:240
#查看LVS副上VIP资源是否释放
[root@localhost ~]# ip a | grep eth0:240
[root@localhost ~]#
综上,至此基于LVS的keepalived高可用功能实验完毕
1.4通过Keepalived对LVS进行管理的功能实现
[root@lvs01 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
}
notification_email_from yunjisuan
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS01
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 55
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.0.240/24 dev eth0 label eth0:240
}
}
virtual_server 192.168.0.240 80 { #虚拟主机VIP
delay_loop 6 #
lb_algo rr #算法
lb_kind DR #模式
nat_mask 255.255.255.0 #掩码
# persistence_timeout 50 #会话保持
protocol TCP #协议
real_server 192.168.0.223 80 { #RS节点
weight 1 #权重
TCP_CHECK { #节点健康检查
connect_timeout 8 #延迟超时时间
nb_get_retry 3 #重试次数
delay_before_retry 3 #延迟重试次数
connect_port 80 #利用80端口检查
}
}
real_server 192.168.0.224 80 { #RS节点
weight 1
TCP_CHECK {
connect_timeout 8
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
以上keepalived配置文件在LVS主和备上都进行修改。
然后在lvs服务器上通过ipvsadm -C清除之前设置的规则
重新启动keepalived服务进行测试,操作过程如下:
[root@lvs01 ~]# /etc/init.d/keepalived stop #关闭主LVS的keepalived服务
Stopping keepalived: [ OK ]
[root@lvs01 ~]# ipvsadm -Ln #没有ipvs规则
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
[root@lvs01 ~]# ip a | grep 240 #没有VIP
[root@lvs01 ~]# /etc/init.d/keepalived start #启动keepalived服务
Starting keepalived: [ OK ]
[root@lvs01 ~]# ipvsadm -Ln #出现ipvs规则
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.240:80 rr
-> 192.168.0.223:80 Route 1 0 0
-> 192.168.0.224:80 Route 1 0 0
[root@lvs01 ~]# ip a | grep 240 #出现VIP
inet 192.168.0.240/24 scope global secondary eth0:240