kubeadm部署高可用K8S集群(v1.14.0)

一、 集群规划

主机名 IP 角色 主要插件
VIP 172.16.1.10 实现master高可用和负载均衡
k8s-master01 172.16.1.11 master kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd
k8s-master02 172.16.1.12 master kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd
k8s-master03 172.16.1.13 master kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd
k8s-node01 172.16.1.21 node kubelet、kube-proxy、kube-flannel
k8s-node02 172.16.1.22 node kubelet、kube-proxy、kube-flannel

master节点通过keepalived和haproxy来实现的高可用和负载均衡,对于云主机可以直接使用相关云产品,例如阿里云的slb或者腾讯云的clb。

二、 准备工作

在所有节点上作如下准备

1. 硬件配置

建议至少2 CPU 、2G,非硬性要求,1CPU、1G也可以搭建起集群,但是在部署时会有WARNING提示:

#1个CPU的初始化master的时候会报
 [WARNING NumCPU]: the number of available CPUs 1 is less than the required 2
#部署插件或者pod时可能会报
warning:FailedScheduling:Insufficient cpu, Insufficient memory

2. 修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0   #最大限度使用物理内存,然后才是 swap空间
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
sysctl --system

3. 关闭Swap

k8s1.8版本以后,要求关闭swap,否则默认配置下kubelet将无法启动。

#临时关闭
swapoff -a
#永久关闭
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

4. 开启ipvs

modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
#查看是否加载
lsmod | grep ip_vs
#配置开机自加载
cat <<EOF>> /etc/rc.local
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/rc.d/rc.local

5. 禁用selinux

#临时关闭
setenforce 0
#永久关闭
sed -ir 's/(SELINUX=)[a-z]*/\1diabled/' /etc/selinux/config

6. 关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

7. 安装docker

#获取docker-ce的yum源
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
#获取epel源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo 
yum -y install epel-release
#安装docker
yum -y install docker-ce
docker version
systemctl start docker
systemctl enable docker

#注意
这里安装没有指定docker-ce版本,默认安装最新版,如果需要安装指定版本,可以:

#列出docker-ce版本
yum list docker-ce --showduplicates
#安装指定版本
yum -y install docker-ce-<VERSION_STRING>

8. 其他

ssh免密登录、hosts文件、ntp时间同步

三、 安装配置keepalived、haproxy

master节点执行

1. 安装

yum install -y socat keepalived haproxy ipvsadm
systemctl enable haproxy
systemctl enable keepalived

2. 配置

haproxy配置文件:

#/etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local3
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     32768
    user        haproxy
    group       haproxy
    daemon
    nbproc      1
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  tcplog
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout check           10s

listen stats
    mode   http
    bind :8888
    stats   enable
    stats   uri     /admin?stats
    stats   auth    admin:admin
    stats   admin   if TRUE

frontend  k8s_https *:8443
    mode      tcp
    maxconn      2000
    default_backend     https_sri

backend https_sri
    balance      roundrobin
    server master1-api 172.16.1.11:6443  check inter 10000 fall 2 rise 2 weight 1
    server master2-api 172.16.1.12:6443  check inter 10000 fall 2 rise 2 weight 1
    server master3-api 172.16.1.13:6443  check inter 10000 fall 2 rise 2 weight 1

keepalived配置文件:

#/etc/keepalived/keepalived.conf
global_defs {
   router_id master01
}

vrrp_script check_haproxy {
    script /etc/keepalived/check_haproxy.sh
    interval 3
}

vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 80
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.16.1.10
    }
    track_script {   
        check_haproxy
    }
}

}
#/etc/keepalived/check_haproxy.sh
#!/bin/bash
NUM=`ps -C haproxy --no-header |wc -l`
if [ $NUM -eq 0 ];then
    systemctl stop keepalived
fi

注意,三个节点keepalived配置文件存在区别:
router_id分别为master01、master02、master03
state分别为MASTER、BACKUP、BACKUP
priority分别为100、90、80

四、 K8S集群部署

1. 安装 kubeadm、kubelet、kubectl

所有节点都安装 kubeadm、kubelet、kubectl,注意:node节点的kubectl不是必须的。

#配置yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpghttps://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安装
yum -y install kubeadm-1.14.0 kubelet-1.14.0 kubectl-1.14.0
systemctl enable kubelet

2. 初始化master

通过kubeadm config print init-defaults > kubeadm.conf可以获得默认配置文件。

#查看需要的镜像
kubeadm config images list --config kubeadm.conf
#拉取需要的镜像
kubeadm config images pull --config kubeadm.conf
#初始化
kubeadm init –config kubeadm.conf

(1) master01节点

配置文件kubeadm_master01.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.11
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.11:2379"
      advertise-client-urls: "https://172.16.1.11:2379"
      listen-peer-urls: "https://172.16.1.11:2380"
      initial-advertise-peer-urls: "https://172.16.1.11:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380"
      initial-cluster-state: new
    serverCertSANs:
      - k8s-master01
      - 172.16.1.11
    peerCertSANs:
      - k8s-master01
      - 172.16.1.11
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#初始化master01
kubeadm init --config kubeadm_master01.conf
#配置kubectl管理集群
mkdir .kube
cp -i /etc/kubernetes/admin.conf .kube/config

而后才可以执行kubectl相关命令,例如查看当前存在pod,可以发现唯独coredns的pod是出于Pending状态,原因是还未安装网络插件。

[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-96lr9               0/1     Pending   0          40m
coredns-8686dcc4fd-xk9st               0/1     Pending   0          40m
etcd-k8s-master01                      1/1     Running   0          39m
kube-apiserver-k8s-master01            1/1     Running   0          39m
kube-controller-manager-k8s-master01   1/1     Running   0          39m
kube-proxy-2cb7r                       1/1     Running   0          40m
kube-scheduler-k8s-master01            1/1     Running   0          39m
#安装Flannel网络插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#绑定网卡
flannel 默认会使用主机的第一张网卡,如果你有多张网卡,需要指定时,可以修改 kube-flannel.yml 中的以下部分

      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens192        #添加该行
#而后应用配置文件(注意只在master01执行)
kubectl apply -f kube-flannel.yml

#此时,我们再查看pod发现coredns已不是Pending状态

[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-96lr9               1/1     Running   0          54m
coredns-8686dcc4fd-xk9st               1/1     Running   0          54m
etcd-k8s-master01                      1/1     Running   0          53m
kube-apiserver-k8s-master01            1/1     Running   0          53m
kube-controller-manager-k8s-master01   1/1     Running   0          53m
kube-flannel-ds-amd64-4vg2s            1/1     Running   0          50s
kube-proxy-2cb7r                       1/1     Running   0          54m
kube-scheduler-k8s-master01            1/1     Running   0          53m

(2) 分发证书

执行如下脚本

#!/bin/bash
for index in 12 13; do
  ip=172.16.1.${index}
  ssh $ip "mkdir -p /etc/kubernetes/pki/etcd; mkdir -p ~/.kube/"
  scp /etc/kubernetes/pki/ca.crt $ip:/etc/kubernetes/pki/ca.crt
  scp /etc/kubernetes/pki/ca.key $ip:/etc/kubernetes/pki/ca.key
  scp /etc/kubernetes/pki/sa.key $ip:/etc/kubernetes/pki/sa.key
  scp /etc/kubernetes/pki/sa.pub $ip:/etc/kubernetes/pki/sa.pub
  scp /etc/kubernetes/pki/front-proxy-ca.crt $ip:/etc/kubernetes/pki/front-proxy-ca.crt
  scp /etc/kubernetes/pki/front-proxy-ca.key $ip:/etc/kubernetes/pki/front-proxy-ca.key
  scp /etc/kubernetes/pki/etcd/ca.crt $ip:/etc/kubernetes/pki/etcd/ca.crt
  scp /etc/kubernetes/pki/etcd/ca.key $ip:/etc/kubernetes/pki/etcd/ca.key
  scp /etc/kubernetes/admin.conf $ip:/etc/kubernetes/admin.conf
  scp /etc/kubernetes/admin.conf $ip:~/.kube/config
done

(3) master02节点

配置文件kubeadm_master02.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.12
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.12:2379"
      advertise-client-urls: "https://172.16.1.12:2379"
      listen-peer-urls: "https://172.16.1.12:2380"
      initial-advertise-peer-urls: "https://172.16.1.12:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380,k8s-master02=https://172.16.1.12:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - k8s-master02
      - 172.16.1.12
    peerCertSANs:
      - k8s-master02
      - 172.16.1.12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#配置证书
kubeadm init phase certs all --config kubeadm_master02.conf
#配置etcd
kubeadm init phase etcd local --config kubeadm_master02.conf
#生成kubelet配置文件
kubeadm init phase kubeconfig kubelet --config kubeadm_master02.conf
#启动kubelet
kubeadm init phase kubelet-start --config kubeadm_master02.conf
#将master02的etcd加入集群
kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member add master2 https://172.16.1.12:2380
#启动 kube-apiserver、kube-controller-manager、kube-scheduler
kubeadm init phase kubeconfig all --config kubeadm_master02.conf
kubeadm init phase control-plane all --config kubeadm_master02.conf

#查看节点状态

[root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   3h2m   v1.14.0
k8s-master02   Ready    <none>   14m    v1.14.0
#将节点标记为master
kubeadm init phase mark-control-plane --config kubeadm_master02.conf

#再次查看

[root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   3h3m   v1.14.0
k8s-master02   Ready    master   16m    v1.14.0

(4) master03节点

配置文件kubeadm_master03.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.13
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.13:2379"
      advertise-client-urls: "https://172.16.1.13:2379"
      listen-peer-urls: "https://172.16.1.13:2380"
      initial-advertise-peer-urls: "https://172.16.1.13:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380,k8s-master02=https://172.16.1.12:2380,k8s-master03=https://172.16.1.13:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - k8s-master03
      - 172.16.1.13
    peerCertSANs:
      - k8s-master03
      - 172.16.1.13
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#配置证书
kubeadm init phase certs all --config kubeadm_master03.conf
#配置etcd
kubeadm init phase etcd local --config kubeadm_master03.conf
#生成kubelet配置文件
kubeadm init phase kubeconfig kubelet --config kubeadm_master03.conf
#启动kubelet
kubeadm init phase kubelet-start --config kubeadm_master03.conf
#将master03的etcd加入集群
kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member add master3 https://172.16.1.13:2380
#启动 kube-apiserver、kube-controller-manager、kube-scheduler
kubeadm init phase kubeconfig all --config kubeadm_master03.conf
kubeadm init phase control-plane all --config kubeadm_master03.conf
#将节点标记为master
kubeadm init phase mark-control-plane --config kubeadm_master03.conf

通过以上步骤,三台master已初始化完毕。

3.worker节点加入集群

#初始化master01时提示如下命令:
kubeadm join 172.16.1.10:8443 --token 8j5lga.y2cei06i6cfxbxmo \
--discovery-token-ca-cert-hash sha256:9eff14803a65631b74e4db6dfa9e7362eb1dd62cd76d56e840d33b1f5a3aa93b

4. 状态检查

#查看node信息

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    master   3h19m   v1.14.0
k8s-master02   Ready    master   114m    v1.14.0
k8s-master03   Ready    master   95m     v1.14.0
k8s-node01     Ready    <none>   64m     v1.14.0
k8s-node02     Ready    <none>   50m     v1.14.0

#查看集群信息

[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes master is running at https://172.16.1.10:8443
KubeDNS is running at https://172.16.1.10:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

#查看控制器状态

[root@k8s-master01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

#查看etcd集群成员信息

[root@k8s-master01 ~]# kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member list 
2cd4d60db6db4371: name=k8s-master01 peerURLs=https://172.16.1.11:2380 clientURLs=https://172.16.1.11:2379 isLeader=true
707da0ac9cb69832: name=k8s-master02 peerURLs=https://172.16.1.12:2380 clientURLs=https://172.16.1.12:2379 isLeader=false
c702920d32ced638: name=k8s-master03 peerURLs=https://172.16.1.13:2380 clientURLs=https://172.16.1.13:2379 isLeader=false

#检查ipvs是否启用
通过ipvsadm可以看到规则

[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 172.16.1.11:6443             Masq    1      0          0         
  -> 172.16.1.12:6443             Masq    1      0          0         
  -> 172.16.1.13:6443             Masq    1      1          0         
TCP  10.96.0.10:53 rr
  -> 10.244.3.2:53                Masq    1      0          0         
  -> 10.244.4.2:53                Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 10.244.3.2:9153              Masq    1      0          0         
  -> 10.244.4.2:9153              Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.244.3.2:53                Masq    1      0          0         
  -> 10.244.4.2:53                Masq    1      0          0       

通过kubectl log --tail=10 kube-proxy-tqxlq -n kube-system,查看pod日志,可以看到:Using ipvs Proxier.
如果ipvsadm检查不到规则,而且kube-proxy日志中发现:

can't determine whether to use ipvs proxy, error: IPVS proxier will not be used because the following required kernel modules are not loaded: [ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh]
Using iptables Proxier.

说明ipvs启用失败。

猜你喜欢

转载自blog.51cto.com/fengjicheng/2383555