Linux搭建负载均衡集群——LVS的TUN模式的搭建部署

一.TUN模式简介

  •  TUN是IP Tunneling ,IP隧道的简称,它将调度器收到的IP数据包封装在一个新的IP数据包中,转交给应用服务器,然后实际服务器的返回数据会直接返回给用户。
  •  IP隧道(IP tunneling)是将一个IP报文封装在另一个IP报文的技术,这可以使得目标为一个IP地址的数据报文能被封装和转发到另一个IP地址。IP隧道技术亦称为IP封装技术(IP encapsulation)。IP隧道主要用于移动主机和虚拟私有网络(Virtual Private Network),在其中隧道都是静态建立的,隧道一端有一个IP地址,另一端也有唯一的IP地址

二.实验环境(rhel6.5版本)

1.各主机信息

主机名 IP
server1(调度器) 172.25.83.1
server2(后端服务器) 172.25.83.2
server3(后端服务器) 172.25.83.3

2.server2与server3安装apache,在默认发布目录/var/www/html下编写发布文件,并开启httpd服务

[root@server2 html]# vim   index.html
[root@server2 html]# cat   index.html
<h1>www.xin.com -server2</h1>

[root@server2 html]# /etc/init.d/httpd   start

[root@server3 html]# vim   index.html
[root@server3 html]# cat   index.html
<h1>bbs.xin.com - server3</h1>

[root@server3 html]# /etc/init.d/httpd   start

三.TUN隧道模式的搭建部署

实验前提调度器(server1)以配置好ipvsadm:配置步骤请查看博文:
https://mp.csdn.net/postedit/86923616

配置调度器server1:

  1. 删除之前DR模式添加的策略
  2. 添加隧道
  3. 给隧道添加Virtual IP
  4. 激活隧道
[root@server1 ~]# ipvsadm -C         #删除之前DR模式添加的策略
[root@server1 ~]# ipvsadm -l
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
[root@server1 ~]# modprobe ipip      #添加模块(删除模块:modprobe -r ipip)
[root@server1 ~]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 1  brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:99:d9:5c brd ff:ff:ff:ff:ff:ff
    inet 172.25.83.1/24 brd 172.25.83.255 scope global eth0
    inet6 fe80::5054:ff:fe99:d95c/64 scope link 
       valid_lft forever preferred_lft forever
3: tunl0: <NOARP> mtu 1480 qdisc noop state DOWN 
    link/ipip 0.0.0.0 brd 0.0.0.0
[root@server1 ~]# ip addr del 172.25.83.100/24 dev eth0    #删除DR模式下绑定的Virtual IP
[root@server1 ~]# ip addr add 172.25.83.100/24 dev tunl0   #给隧道添加Virtual IP
[root@server1 ~]# ip addr show
3: tunl0: <NOARP> mtu 1480 qdisc noop state DOWN     #DOWN表示隧道处于关闭状态
    link/ipip 0.0.0.0 brd 0.0.0.0
    inet 172.25.83.100/24 scope global tunl0
[root@server1 ~]# ip link set up tunl0          #激活隧道
[root@server1 ~]# ip addr show
3: tunl0: <NOARP,UP,LOWER_UP> mtu 1480 qdisc noqueue state UNKNOWN 
    link/ipip 0.0.0.0 brd 0.0.0.0
    inet 172.25.83.100/24 scope global tunl0

5.添加新的策略

[root@server1 ~]# ipvsadm -A -t 172.25.83.100:80 -s rr
[root@server1 ~]# ipvsadm -a -t 172.25.83.100:80 -r 172.25.83.2:80 -i   #-i指定隧道模式
[root@server1 ~]# ipvsadm -a -t 172.25.83.100:80 -r 172.25.83.3:80 -i
[root@server1 ~]# ipvsadm -L     #查看ipvsadm策略
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.83.100:http rr
  -> server2:http                 Tunnel  1      0          0         
  -> server3:http                 Tunnel  1      0          0         
[root@server1 ~]# ipvsadm -Ln    #不解析查看ipvsadm策略
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.83.100:80 rr
  -> 172.25.83.2:80               Tunnel  1      0          0         
  -> 172.25.83.3:80               Tunnel  1      0          0        

6.保存策略:/etc/init.d/ipvsadm save

配置后端服务器server2:

  1. 添加隧道
  2. 给隧道添加Virtual IP
  3. 激活隧道
[root@server2 html]# modprobe ipip
[root@server2 html]# ip addr del 172.25.83.100/24 dev eth0
[root@server2 html]# ip addr add 172.25.83.100/24 dev tunl0
[root@server2 html]# ip link set up tunl0
[root@server2 html]# ip addr show
3: tunl0: <NOARP,UP,LOWER_UP> mtu 1480 qdisc noqueue state UNKNOWN 
    link/ipip 0.0.0.0 brd 0.0.0.0
    inet 172.25.83.100/24 scope global tunl0

4.修改rp_filter参数

[root@server2 html]# sysctl -a | grep rp_filter  #需要关掉反向路径检测(反向路径检测:当以100访问进来的包要符合内核的某种规则,你出去的应该还是100,如果不关掉的话,出去的路径是4.2,客户端不会接受)
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.all.arp_filter = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.arp_filter = 0
net.ipv4.conf.lo.rp_filter = 1
net.ipv4.conf.lo.arp_filter = 0
net.ipv4.conf.eth0.rp_filter = 1
net.ipv4.conf.eth0.arp_filter = 0
net.ipv4.conf.tunl0.rp_filter = 1
net.ipv4.conf.tunl0.arp_filter = 0
[root@server2 html]# sysctl -w net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.rp_filter = 0
[root@server2 html]# sysctl -w net.ipv4.conf.lo.rp_filter=0
net.ipv4.conf.lo.rp_filter = 0
[root@server2 html]# sysctl -w net.ipv4.conf.eth0.rp_filter=0
net.ipv4.conf.eth0.rp_filter = 0
[root@server2 html]# sysctl -w net.ipv4.conf.tunl0.rp_filter=0
net.ipv4.conf.tunl0.rp_filter = 0
[root@server2 html]# sysctl -p     #重新加载
[root@server2 html]# sysctl -a | grep rp_filter       #查看后发现仍有一个参数的值是1,此时必须要从文件中修改
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.all.arp_filter = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.arp_filter = 0
net.ipv4.conf.lo.rp_filter = 0
net.ipv4.conf.lo.arp_filter = 0
net.ipv4.conf.eth0.rp_filter = 0
net.ipv4.conf.eth0.arp_filter = 0
net.ipv4.conf.tunl0.rp_filter = 0
net.ipv4.conf.tunl0.arp_filter = 0
[root@server2 html]# vim /etc/sysctl.conf     #编辑文件,将参数该为0
net.ipv4.conf.default.rp_filter = 0
[root@server2 html]# sysctl -p           #再重新加载
[root@server2 html]# sysctl -a | grep rp_filter     #再次查看,发现参数已全部该为0
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.all.arp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_filter = 0
net.ipv4.conf.lo.rp_filter = 0
net.ipv4.conf.lo.arp_filter = 0
net.ipv4.conf.eth0.rp_filter = 0
net.ipv4.conf.eth0.arp_filter = 0
net.ipv4.conf.tunl0.rp_filter = 0
net.ipv4.conf.tunl0.arp_filter = 0

为什么要修改rp_filter参数?

  • rp_filter参数用于控制系统是否开启对数据包源地址的校验。有三个值,0、1、2,具体含义:
  • 0:不开启源地址校验。
  • 1:开启严格的反向路径校验。对每个进来的数据包,校验其反向路径是否是最佳路径。如果反向路径不是最佳路径,则直接丢弃该数据包。
  • 2:开启松散的反向路径校验。对每个进来的数据包,校验其源地址是否可达,即反向路径是否能通(通过任意网口),如果反向路径不同,则直接丢弃该数据包。

配置后端服务器server3:与server2一样的配置步骤

客户端(真机)测试:

猜你喜欢

转载自blog.csdn.net/qq_42303254/article/details/86669430