一、基于单节点基础环境,部署多节点
1.1、复制kubernetes目录到master02
[root@master01 k8s]# scp -r /opt/kubernetes/ root@192.168.140.40:/opt
[root@master01 k8s]# scp /usr/lib/systemd/system/{
kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.140.40:/usr/lib/systemd/system/
1.2、master02上操作
[root@server ~]# hostnamectl set-hostname master02
[root@server ~]# su
[root@master02 ~]#
[root@master02 ~]# cd /opt/kubernetes/cfg/
[root@master02 cfg]# vim kube-apiserver
1.3、拷贝master01上已有的etcd证书给master02使用
[root@master01 k8s]# scp -r /opt/etcd/ root@192.168.140.40:/opt/
1.4、启动master02中的三个组件服务
[root@master02 cfg]# systemctl start kube-apiserver.service
[root@master02 cfg]# systemctl enable kube-apiserver.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@master02 cfg]# systemctl start kube-controller-manager.service
[root@master02 cfg]# systemctl enable kube-controller-manager.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@master02 cfg]# systemctl start kube-scheduler.service
[root@master02 cfg]# systemctl enable kube-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master02 cfg]# vim /etc/profile #增加环境变量
[root@master02 cfg]# source /etc/profile
[root@master02 cfg]# kubectl get node
二、K8S负载均衡部署
[root@zabbix-server ~]# hostnamectl set-hostname nginx01
[root@zabbix-server ~]# su
[root@nginx01 ~]#
[root@zabbix-client ~]# hostnamectl set-hostname nginx02
[root@zabbix-client ~]# su
[root@nginx02 ~]#
[root@nginx01 ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
[root@nginx01 ~]# yum clean all
[root@nginx01 ~]# yum list
[root@nginx01 ~]# yum -y install nginx
[root@nginx01 ~]# vim /etc/nginx/nginx.conf #添加四层转发
[root@nginx01 ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@nginx01 ~]# systemctl start nginx
2.1、部署keepalived服务
[root@nginx01 ~]# yum -y install keepalived #下载keepalived
[root@nginx01 ~]# cp keepalived.conf /etc/keepalived/keepalived.conf #修改配置文件
[root@nginx01 keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.140.100/24
}
track_script {
check_nginx
}
[root@nginx01 ~]# rz
[root@nginx01 ~]# ls
[root@nginx01 ~]# vim /etc/nginx/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
[root@nginx01 ~]# chmod +x /etc/nginx/check_nginx.sh
[root@nginx01 nginx]# systemctl start keepalived.service
2.2、node2节点部署
[root@nginx02 ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
[root@nginx02 ~]# yum clean all
[root@nginx02 ~]# yum list
[root@nginx02 ~]# yum -y install nginx
[root@nginx02 ~]# vim /etc/nginx/nginx.conf
[root@nginx02 ~]# systemctl start nginx
2.2.1、部署keepalived服务
[root@nginx02 ~]# yum -y install keepalived
[root@nginx02 ~]# cp keepalived.conf /etc/keepalived/keepalived.conf
[root@nginx02 keepalived]# vim keepalived.conf
[root@nginx02 ~]# vim /etc/nginx/check_nginx.sh
[root@nginx02 ~]# chmod +x /etc/nginx/check_nginx.sh
[root@nginx02 nginx]# systemctl start keepalived.service
三、验证
[root@nginx01 ~]# ip a
[root@nginx02 ~]# ip a
[root@nginx01 ~]# pkill nginx
[root@nginx01 ~]# systemctl status nginx
[root@nginx02 nginx]# ip a
3.1、master恢复nginx,再次验证虚拟地址
[root@nginx01 nginx]# systemctl start nginx
[root@nginx01 nginx]# systemctl start keepalived.service
[root@nginx01 nginx]# ip a
3.2、开始修改node节点配置文件统一VIP
[root@node01 etc]# cd /opt/kubernetes/
[root@node01 kubernetes]# cd cfg/
[root@node01 cfg]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
[root@node02 ~]# cd /opt/kubernetes/cfg/
[root@node02 cfg]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
[root@node01 cfg]# vim /opt/kubernetes/cfg/kubelet.kubeconfig
[root@node02 cfg]# vim /opt/kubernetes/cfg/kubelet.kubeconfig
[root@node01 cfg]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
[root@node02 cfg]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
//全部修改为VIP server: https://192.168.140.100:6443
[root@node01 cfg]# systemctl restart kubelet.service
[root@node01 cfg]# systemctl restart kube-proxy.service
[root@node02 cfg]# systemctl restart kubelet.service
[root@node02 cfg]# systemctl restart kube-proxy.service
[root@node01 cfg]# grep 100 *
[root@node02 cfg]# grep 100 *
3.3、在lb01上查看nginx的k8s日志
[root@nginx01 nginx]# tail /var/log/nginx/k8s-access.log #负载均衡已完成
3.4、在master01上操作测试创建pod
[root@master01 ~]# kubectl run nginx --image=nginx
[root@master01 ~]# kubectl get pods
[root@master01 ~]# kubectl get pods
[root@master01 ~]# kubectl get pods -o wide #查看pod网络
[root@node01 cfg]# docker ps -a
[root@master01 ~]# kubectl describe pod nginx-dbddb74b8-x9pkq #查看创建过程
3.5、在node1节点访问
[root@node01 cfg]# curl 172.17.17.2
3.6、回到master01查看日志
[root@master01 ~]# kubectl logs nginx-dbddb74b8-x9pkq
四、总结
多Master集群中,node节点指向虚拟地址,由VIP去调度Master,Master上的apiserver去和etcd建立联系,并且把数据写入到etcd