Kubernetes 1.17.2 高可用部署

20.0.0.200    10.0.0.200 bs-k8s-master01 管理节点 2c2g
20.0.0.201    10.0.0.201 bs-k8s-master02 管理节点 2c2g
20.0.0.202    10.0.0.202 bs-k8s-master03 管理节点 2c2g
20.0.0.203    10.0.0.203 bs-k8s-node01 业务节点 2c2g
20.0.0.204    10.0.0.204 bs-k8s-node02 业务节点 2c2g
20.0.0.205    10.0.0.205 bs-k8s-node03 业务节点 2c2g
服务器准备  所有机器    以bs-k8s-master01为例
#关闭selinux/firewalld/iptables 
[root@bs-k8s-master01 ~]# setenforce 0 \
> && sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config \
> && getenforce
l -y iptables-services \
&& systemctl stop iptables \
&& systemctl disable iptables \
&& systemctl status iptablessetenforce: SELinux is disabled
[root@bs-k8s-master01 ~]# 
[root@bs-k8s-master01 ~]# systemctl stop firewalld \
> && systemctl daemon-reload \
> && systemctl disable firewalld \
> && systemctl daemon-reload \
> && systemctl status firewalld
[root@bs-k8s-master01 ~]# 
[root@bs-k8s-master01 ~]# yum install -y iptables-services \
> && systemctl stop iptables \
> && systemctl disable iptables \
> && systemctl status iptables
#添加host解析记录
[root@bs-k8s-master01 ~]# cat >> /etc/hosts <<EOF
> 20.0.0.200  bs-k8s-master01
> 20.0.0.201  bs-k8s-master02
> 20.0.0.202  bs-k8s-master03
> 20.0.0.203  bs-k8s-node01
> 20.0.0.204  bs-k8s-node02
> 20.0.0.205  bs-k8s-node03
> EOF
#更换阿里源
[root@bs-k8s-master01 ~]# cp -r /etc/yum.repos.d /etc/yum.repos.d.bak
[root@bs-k8s-master01 ~]# rm -f /etc/yum.repos.d/*.repo
[root@bs-k8s-master01 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo \
> && wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@bs-k8s-master01 ~]# yum clean all && yum makecache
#设置limits.conf
[root@bs-k8s-master01 ~]# cat >> /etc/security/limits.conf <<EOF
> # End of file
> * soft nproc 10240000
> * hard nproc 10240000
> * soft nofile 10240000
> * hard nofile 10240000
> EOF
#设置sysctl.conf
[root@bs-k8s-master01 ~]#[ ! -e "/etc/sysctl.conf_bk" ] && /bin/mv /etc/sysctl.conf{,_bk} \
&& cat > /etc/sysctl.conf << EOF
fs.file-max=1000000
fs.nr_open=20480000
net.ipv4.tcp_max_tw_buckets = 180000
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.tcp_max_syn_backlog = 16384
net.core.netdev_max_backlog = 32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_fin_timeout = 20
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_syncookies = 1
#net.ipv4.tcp_tw_len = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024 65000
#net.nf_conntrack_max = 6553500
#net.netfilter.nf_conntrack_max = 6553500
#net.netfilter.nf_conntrack_tcp_timeout_close_wait = 60
#net.netfilter.nf_conntrack_tcp_timeout_fin_wait = 120
#net.netfilter.nf_conntrack_tcp_timeout_time_wait = 120
#net.netfilter.nf_conntrack_tcp_timeout_established = 3600
EOF

[root@bs-k8s-master01 ~]#sysctl -p
#配置时间同步
[root@bs-k8s-master01 ~]#ntpdate -u pool.ntp.org
[root@bs-k8s-master01 ~]#crontab -e       #加入定时任务
*/15 * * * * /usr/sbin/ntpdate -u pool.ntp.org >/dev/null 2>&1
#配置k8s.conf
[root@bs-k8s-master01 ~]#cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
#执行命令使其修改生效
[root@bs-k8s-master01 ~]#modprobe br_netfilter \
[root@bs-k8s-master01 ~]#&& sysctl -p /etc/sysctl.d/k8s.conf
#关闭交换分区
[root@bs-k8s-master01 ~]# swapoff -a
[root@bs-k8s-master01 ~]# yes | cp /etc/fstab /etc/fstab_bak
[root@bs-k8s-master01 ~]# cat /etc/fstab_bak |grep -v swap > /etc/fstab
#加载ipvs模块
[root@bs-k8s-master01 ~]#cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@bs-k8s-master01 ~]#chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#添加k8s yum源
[root@bs-k8s-master01 ~]#cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安装服务器必备软件
[root@bs-k8s-master01 ~]# yum -y install wget vim iftop iotop net-tools nmon telnet lsof iptraf nmap httpd-tools lrzsz mlocate ntp ntpdate strace libpcap nethogs iptraf iftop nmon bridge-utils bind-utils telnet nc nfs-utils rpcbind nfs-utils dnsmasq python python-devel  yum-utils device-mapper-persistent-data lvm2 tcpdump mlocate tree 
#添加docker源信息
[root@bs-k8s-master01 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@bs-k8s-master01 ~]# yum list docker-ce --showduplicates | sort -r
[root@bs-k8s-master01 ~]# yum -y install docker-ce-18.06.3.ce-3.el7
#配置daemon.json文件
#获取镜像加速
#阿里云
#   打开网址:https://cr.console.aliyun.com/#/accelerator
#        注册、登录、设置密码
#        然后在页面上可以看到加速器地址,类似于:https://123abc.mirror.aliyuncs.com
#腾讯云(非腾讯云主机不可用)
#加速地址:https://mirror.ccs.tencentyun.com
[root@bs-k8s-master01 ~]# mkdir -p /etc/docker/ \
> && cat > /etc/docker/daemon.json << EOF
> {
>     "registry-mirrors":[
>         "https://c6ai9izk.mirror.aliyuncs.com"
>     ],
>     "max-concurrent-downloads":3,
>     "data-root":"/data/docker",
>     "log-driver":"json-file",
>     "log-opts":{
>         "max-size":"100m",
>         "max-file":"1"
>     },
>     "max-concurrent-uploads":5,
>     "storage-driver":"overlay2",
>     "storage-opts": [
>     "overlay2.override_kernel_check=true"
>   ]
> }
      "live-restore": true, 
   "exec-opts": [
        "native.cgroupdriver=systemd"
  ]
> EOF
[root@bs-k8s-master01 ~]# systemctl enable docker \
> && systemctl restart docker \
> && systemctl status docker
#使用kubeadm 部署kubernetes1.17.2
[root@bs-k8s-master01 ~]# yum list  kubelet kubeadm kubectl --showduplicates | sort -r
[root@bs-k8s-master01 ~]# yum install -y kubelet-1.17.2 kubeadm-1.17.2 kubectl-1.17.2 ipvsadm ipset
#设置kubelet开机自启动,注意:这一步不能直接执行 systemctl start kubelet,会报错,成功初始化完后kubelet会自动起来
[root@bs-k8s-master01 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@bs-k8s-master01 ~]# 
#kubectl 命令补全
[root@bs-k8s-master01 ~]# source /usr/share/bash-completion/bash_completion
[root@bs-k8s-master01 ~]# source <(kubectl completion bash)
[root@bs-k8s-master01 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc

以下 无特殊说明 在bs-k8s-master01上操作
#免密钥登陆
[root@bs-k8s-master01 ~]# vim /service/scripts/ssh-cp.sh
##########################################################################
#Author:                     zisefeizhu
#QQ:                         2********0
#Date:                       2020-02-02
#FileName:                   /service/scripts/ssh-cp.sh
#URL:                        https://www.cnblogs.com/zisefeizhu/
#Description:                The test script
#Copyright (C):              2020 All rights reserved
##########################################################################
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
export $PATH
#目标主机列表
IP="
20.0.0.200
bs-k8s-master01
20.0.0.201
bs-k8s-master02
20.0.0.202
bs-k8s-master03
20.0.0.203
bs-k8s-node01
20.0.0.204
bs-k8s-node02
20.0.0.205
bs-k8s-node03
"
for node in ${IP};do
  sshpass -p 1 ssh-copy-id  ${node}  -o StrictHostKeyChecking=no
  if [ $? -eq 0 ];then
    echo "${node} 秘钥copy完成"
  else
    echo "${node} 秘钥copy失败"
  fi
done
[root@bs-k8s-master01 ~]# ssh-keygen -t rsa
[root@bs-k8s-master01 ~]# sh /service/scripts/ssh-cp.sh 

#修改初始化配置
使用kubeadm config print init-defaults > kubeadm-init.yaml 打印出默认配置,然后在根据自己的环境修改配置
注意
需要修改advertiseAddress、controlPlaneEndpoint、imageRepository、serviceSubnet、kubernetesVersion
    advertiseAddress 为master01的ip
    controlPlaneEndpoint 为VIP+8443端口
    imageRepository 修改为阿里的源
    serviceSubnet 一段没有使用的IP段
    kubernetesVersion 和上一步的版本一致
[root@bs-k8s-master01 ~]# cd /data/
[root@bs-k8s-master01 data]# mkdir k8s
[root@bs-k8s-master01 data]# cd k8s/
[root@bs-k8s-master01 k8s]# ls
[root@bs-k8s-master01 k8s]# mkdir Initialisierung
[root@bs-k8s-master01 k8s]# cd Initialisierung/
[root@bs-k8s-master01 Initialisierung]# kubeadm config print init-defaults > kubeadm-init.yaml
W0202 16:04:55.195871    4006 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0202 16:04:55.195969    4006 validation.go:28] Cannot validate kubelet config - no validator is available
[root@bs-k8s-master01 Initialisierung]# cp kubeadm-init.yaml{,.bak}
[root@bs-k8s-master01 Initialisierung]# diff kubeadm-init.yaml{,.bak}
12c12
<   advertiseAddress: 20.0.0.200
---
>   advertiseAddress: 1.2.3.4
26d25
< controlPlaneEndpoint: "20.0.0.250:8443"
33c32
< imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
---
> imageRepository: k8s.gcr.io
35c34
< kubernetesVersion: v1.17.2
---
> kubernetesVersion: v1.17.0
38d36
<   podSubnet: "10.209.0.0/16"

#预下载镜像
[root@bs-k8s-master01 Initialisierung]# kubeadm config images pull --config kubeadm-init.yaml
#初始化
[root@bs-k8s-master01 Initialisierung]# kubeadm config images pull --config kubeadm-init.yaml
W0202 16:15:50.198535    4055 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0202 16:15:50.198633    4055 validation.go:28] Cannot validate kubelet config - no validator is available
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.17.2
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.17.2
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.17.2
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.17.2
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.5
[root@bs-k8s-master01 Initialisierung]# 
[root@bs-k8s-master01 Initialisierung]# kubeadm init --config kubeadm-init.yaml
W0202 16:17:51.926686    4259 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0202 16:17:51.926769    4259 validation.go:28] Cannot validate kubelet config - no validator is available
[init] Using Kubernetes version: v1.17.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [bs-k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 20.0.0.200 20.0.0.250]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [bs-k8s-master01 localhost] and IPs [20.0.0.200 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [bs-k8s-master01 localhost] and IPs [20.0.0.200 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0202 16:17:57.407938    4259 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0202 16:17:57.411148    4259 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 18.038392 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node bs-k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node bs-k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 20.0.0.250:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:ff9bd96896f749ddcb8597fb958eb38654fb64af89ed844076018bf9b2a6dfd3 \
    --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 20.0.0.250:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:ff9bd96896f749ddcb8597fb958eb38654fb64af89ed844076018bf9b2a6dfd3 
#为kubectl准备Kubeconfig文件
kubectl默认会在执行的用户家目录下面的.kube目录下寻找config文件。这里是将在初始化时[kubeconfig]步骤生成的admin.conf拷贝到.kube/config
[root@bs-k8s-master01 Initialisierung]#  mkdir -p $HOME/.kube
[root@bs-k8s-master01 Initialisierung]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@bs-k8s-master01 Initialisierung]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
在该配置文件中,记录了API Server的访问地址,所以后面直接执行kubectl命令就可以正常连接到API Server中
#查看组件
[root@bs-k8s-master01 Initialisierung]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@bs-k8s-master01 Initialisierung]# kubectl get nodes
NAME              STATUS     ROLES    AGE    VERSION
bs-k8s-master01   NotReady   master   5m3s   v1.17.2
#其他master节点部署
[root@bs-k8s-master01 Initialisierung]# vim /service/scripts/k8s-master-zhengshu.sh
[root@bs-k8s-master01 Initialisierung]# cat /service/scripts/k8s-master-zhengshu.sh
##########################################################################
#Author:                     zisefeizhu
#QQ:                         2********0
#Date:                       2020-02-02
#FileName:                   /service/scripts/k8s-master-zhengshu.sh
#URL:                        https://www.cnblogs.com/zisefeizhu/
#Description:                The test script
#Copyright (C):              2020 All rights reserved
##########################################################################
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
export $PATH
USER=root
CONTROL_PLANE_IPS="bs-k8s-master02 bs-k8s-master03"
for host in ${CONTROL_PLANE_IPS}; do
    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
    scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done

#bs-k8s-master02
[root@bs-k8s-master02 ~]# kubeadm join 20.0.0.250:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:ff9bd96896f749ddcb8597fb958eb38654fb64af89ed844076018bf9b2a6dfd3 \
>     --control-plane
[root@bs-k8s-master02 ~]# mkdir -p $HOME/.kube
[root@bs-k8s-master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@bs-k8s-master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

#bs-k8s-master03
[root@bs-k8s-master03 ~]# kubeadm join 20.0.0.250:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:ff9bd96896f749ddcb8597fb958eb38654fb64af89ed844076018bf9b2a6dfd3 \
>     --control-plane
[root@bs-k8s-master02 ~]# mkdir -p $HOME/.kube
[root@bs-k8s-master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@bs-k8s-master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@bs-k8s-master01 ~]# kubectl get nodes
NAME              STATUS     ROLES    AGE    VERSION
bs-k8s-master01   NotReady   master   14m    v1.17.2
bs-k8s-master02   NotReady   master   91s    v1.17.2
bs-k8s-master03   NotReady   master   104s   v1.17.2

#node节点部署
[root@bs-k8s-node01 ~]# kubeadm join 20.0.0.250:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:ff9bd96896f749ddcb8597fb958eb38654fb64af89ed844076018bf9b2a6dfd3
[root@bs-k8s-node02 ~]# kubeadm join 20.0.0.250:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:ff9bd96896f749ddcb8597fb958eb38654fb64af89ed844076018bf9b2a6dfd3
[root@bs-k8s-node03 ~]# kubeadm join 20.0.0.250:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:ff9bd96896f749ddcb8597fb958eb38654fb64af89ed844076018bf9b2a6dfd3

#部署网络插件calico
[root@bs-k8s-master01 ~]# cd /data/k8s/
[root@bs-k8s-master01 k8s]# ls
Initialisierung
[root@bs-k8s-master01 k8s]# mkdir yaml
[root@bs-k8s-master01 k8s]# cd yaml/
[root@bs-k8s-master01 yaml]# wget http://docs.projectcalico.org/v3.11/getting-started/kubernetes/installation/hosted/calico.yaml
[root@bs-k8s-master01 yaml]# cp calico.yaml{,.bak}
[root@bs-k8s-master01 yaml]# vim calico.yaml
[root@bs-k8s-master01 yaml]# diff calico.yaml{,.bak}
598c598
<               value: "10.209.0.0/16"
---
>               value: "192.168.0.0/16"
[root@bs-k8s-master01 yaml]# kubectl apply -f calico.yaml 

#查看节点状态
[root@bs-k8s-master01 yaml]# kubectl get nodes
NAME              STATUS   ROLES    AGE   VERSION
bs-k8s-master01   Ready    master   44m   v1.17.2
bs-k8s-master02   Ready    master   32m   v1.17.2
bs-k8s-master03   Ready    master   32m   v1.17.2
bs-k8s-node01     Ready    <none>   29m   v1.17.2
bs-k8s-node02     Ready    <none>   29m   v1.17.2
bs-k8s-node03     Ready    <none>   29m   v1.17.2

#kube-proxy开启ipvs[单个master节点执行]
修改ConfigMap的kube-system/kube-proxy中的config.conf,mode: "ipvs"      
[root@bs-k8s-master01 yaml]#kubectl edit cm kube-proxy -n kube-system
#重启各个节点上的kube-proxy pod
[root@bs-k8s-master01 yaml]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
pod "kube-proxy-57gm2" deleted
pod "kube-proxy-7gpws" deleted
pod "kube-proxy-8jb4x" deleted
pod "kube-proxy-lhqmg" deleted
pod "kube-proxy-s2t4s" deleted
pod "kube-proxy-smfv8" deleted
#查看kube-proxy pod状态
[root@bs-k8s-master01 yaml]#  kubectl get pod -n kube-system | grep kube-proxy
kube-proxy-2wks8                           1/1     Running            0          46s
kube-proxy-7jr5q                           1/1     Running            0          33s
kube-proxy-7qzz8                           1/1     Running            0          55s
kube-proxy-cgz5z                           1/1     Running            0          37s
kube-proxy-fxxxs                           1/1     Running            0          49s
kube-proxy-lc9gt                           1/1     Running            0          59s
#查看是否开启了ivs
[root@bs-k8s-master01 yaml]# kubectl logs kube-proxy-2wks8 -n kube-system
I0202 09:10:37.049020       1 node.go:135] Successfully retrieved node IP: 20.0.0.201
I0202 09:10:37.049089       1 server_others.go:172] Using ipvs Proxier.
W0202 09:10:37.049375       1 proxier.go:420] IPVS scheduler not specified, use rr by default
I0202 09:10:37.049560       1 server.go:571] Version: v1.17.2
I0202 09:10:37.049979       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I0202 09:10:37.050282       1 config.go:313] Starting service config controller
I0202 09:10:37.050303       1 shared_informer.go:197] Waiting for caches to sync for service config
I0202 09:10:37.050409       1 config.go:131] Starting endpoints config controller
I0202 09:10:37.050443       1 shared_informer.go:197] Waiting for caches to sync for endpoints config
I0202 09:10:37.157807       1 shared_informer.go:204] Caches are synced for endpoints config 
I0202 09:10:37.162308       1 shared_informer.go:204] Caches are synced for service config 
日志中打印出了Using ipvs Proxier,说明ipvs模式已经开启

#查看ipvs 状态
[root@bs-k8s-master01 yaml]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 20.0.0.200:6443              Masq    1      0          0         
  -> 20.0.0.201:6443              Masq    1      0          0         
  -> 20.0.0.202:6443              Masq    1      0          0         
TCP  10.96.0.10:53 rr
  -> 10.209.194.129:53            Masq    1      0          0         
  -> 10.209.194.130:53            Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 10.209.194.129:9153          Masq    1      0          0         
  -> 10.209.194.130:9153          Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.209.194.129:53            Masq    1      0          0         
  -> 10.209.194.130:53            Masq    1      0          0      
  
[root@hs-k8s-master01 calico-3.11]# wget https://docs.projectcalico.org/v3.11/manifests/calico.yam^C
[root@hs-k8s-master01 calico-3.11]# kubectl apply -f calico.yaml 
^C^C^C^C^C^C^C[root@hs-k8s-master01 calico-3.11]# ^C
[root@hs-k8s-master01 calico-3.11]# ls
calico.yaml  calico.yaml.bak
[root@hs-k8s-master01 calico-3.11]# free -h
              total        used        free      shared  buff/cache   available
Mem:           2.9G        806M        1.1G        1.1M        1.0G        1.8G
Swap:            0B          0B          0B
[root@hs-k8s-master01 calico-3.11]# kubectl apply -f calico.yaml 
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created





[root@hs-k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-5b644bc49c-wdssd   1/1     Running   0          18m
calico-node-bjtbm                          1/1     Running   0          18m
calico-node-c4hfp                          1/1     Running   7          18m
calico-node-m5vz7                          1/1     Running   4          18m
calico-node-pvkdn                          1/1     Running   1          18m
calico-node-qmfz8                          1/1     Running   2          18m
calico-node-sbgfk                          1/1     Running   1          18m
coredns-7f9c544f75-b7ksm                   1/1     Running   0          66m
coredns-7f9c544f75-gg4rm                   1/1     Running   0          66m
etcd-bs-k8s-master02                       1/1     Running   4          58m
etcd-bs-k8s-master03                       1/1     Running   8          59m
etcd-hs-k8s-master01                       1/1     Running   6          66m
kube-apiserver-bs-k8s-master02             1/1     Running   12         58m
kube-apiserver-bs-k8s-master03             1/1     Running   12         59m
kube-apiserver-hs-k8s-master01             1/1     Running   10         66m
kube-controller-manager-bs-k8s-master02    1/1     Running   6          57m
kube-controller-manager-bs-k8s-master03    1/1     Running   6          59m
kube-controller-manager-hs-k8s-master01    1/1     Running   5          66m
kube-proxy-2cffl                           1/1     Running   2          58m
kube-proxy-d95pz                           1/1     Running   2          63m
kube-proxy-j6hxc                           1/1     Running   2          59m
kube-proxy-kgwll                           1/1     Running   2          62m
kube-proxy-lbh7v                           1/1     Running   2          62m
kube-proxy-vfvzl                           1/1     Running   2          66m
kube-scheduler-bs-k8s-master02             1/1     Running   6          58m
kube-scheduler-bs-k8s-master03             1/1     Running   6          59m
kube-scheduler-hs-k8s-master01             1/1     Running   4          66m



测试
[root@hs-k8s-master01 ~]# kubectl run nginx --image=nginx:1.14 --replicas=2
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@hs-k8s-master01 ~]# kubectl get pods  -o wide
NAME                     READY   STATUS    RESTARTS   AGE    IP             NODE            NOMINATED NODE   READINESS GATES
nginx-5cf565498c-q8fzl   1/1     Running   0          112s   10.209.46.65   bs-k8s-node01   <none>           <none>
nginx-5cf565498c-z2c2m   1/1     Running   0          112s   10.209.208.1   bs-k8s-node03   <none>           <none>

[root@hs-k8s-master01 ~]# curl 10.209.46.65
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>


#测试dns
[root@hs-k8s-master01 ~]# kubectl run curl --image=radial/busyboxplus:curl -it
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
If you don't see a command prompt, try pressing enter.
[ root@curl-69c656fd45-sc55l:/ ]$ nslookup kubernetes.default
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

猜你喜欢

转载自www.cnblogs.com/zisefeizhu/p/12318370.html