19.k8s升级
19.1 master
19.1.1 升级master
[root@ansible-server ansible]# mkdir -p roles/kubeadm-master-update/{tasks,vars}
[root@ansible-server ansible]# cd roles/kubeadm-master-update/
[root@ansible-server kubeadm-master-update]# ls
files tasks vars
#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址,MASTER01、MASTER02、MASTER03变量改成自己的master对应的IP地址
[root@ansible-server kubeadm-master-update]# vim vars/main.yml
KUBEADM_VERSION: 1.21.10
HARBOR_DOMAIN: harbor.raymonds.cc
MASTER01: 172.31.3.101
MASTER02: 172.31.3.102
MASTER03: 172.31.3.103
[root@ansible-server kubeadm-master-update]# vim tasks/upgrade_master01.yml
- name: install CentOS or Rocky socat
yum:
name: socat
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- inventory_hostname in groups.ha
- name: install Ubuntu socat
apt:
name: socat
force: yes
when:
- ansible_distribution=="Ubuntu"
- inventory_hostname in groups.ha
- name: down master01
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
{
MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for master
yum:
name: kubelet-{
{
KUBEADM_VERSION }},kubeadm-{
{
KUBEADM_VERSION }},kubectl-{
{
KUBEADM_VERSION }}
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-master01"
- name: install Ubuntu kubeadm for master
apt:
name: kubelet={
{
KUBEADM_VERSION }}-00,kubeadm={
{
KUBEADM_VERSION }}-00,kubectl={
{
KUBEADM_VERSION }}-00
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-master01"
- name: restart kubelet
systemd:
name: kubelet
state: restarted
daemon_reload: yes
when:
- ansible_hostname=="k8s-master01"
- name: get kubeadm version
shell:
cmd: kubeadm config images list --kubernetes-version=v{
{
KUBEADM_VERSION }} | awk -F "/" '{print $NF}'
register: KUBEADM_IMAGES_VERSION
when:
- ansible_hostname=="k8s-master01"
- name: download kubeadm image for master01
shell: |
{
% for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
docker pull registry.aliyuncs.com/google_containers/{
{
i }}
docker tag registry.aliyuncs.com/google_containers/{
{
i }} {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
docker rmi registry.aliyuncs.com/google_containers/{
{
i }}
docker push {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
{
% endfor %}
when:
- ansible_hostname=="k8s-master01"
- name: kubeadm upgrade
shell:
cmd: |
kubeadm upgrade apply v{
{
KUBEADM_VERSION }} <<EOF
y
EOF
sleep 180s
when:
- ansible_hostname=="k8s-master01"
- name: up master01
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
{
MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server kubeadm-master-update]# vim tasks/upgrade_master02.yml
- name: down master02
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
{
MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master02"
- name: install CentOS or Rocky kubeadm for master
yum:
name: kubelet-{
{
KUBEADM_VERSION }},kubeadm-{
{
KUBEADM_VERSION }},kubectl-{
{
KUBEADM_VERSION }}
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-master02"
- name: install Ubuntu kubeadm for master
apt:
name: kubelet={
{
KUBEADM_VERSION }}-00,kubeadm={
{
KUBEADM_VERSION }}-00,kubectl={
{
KUBEADM_VERSION }}-00
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-master02"
- name: restart kubelet
systemd:
name: kubelet
state: restarted
daemon_reload: yes
when:
- ansible_hostname=="k8s-master02"
- name: get kubeadm version
shell:
cmd: kubeadm config images list --kubernetes-version=v{
{
KUBEADM_VERSION }} | awk -F "/" '{print $NF}'
register: KUBEADM_IMAGES_VERSION
when:
- ansible_hostname=="k8s-master02"
- name: download kubeadm image for master02
shell: |
{
% for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
docker pull {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
{
% endfor %}
when:
- ansible_hostname=="k8s-master02"
- name: kubeadm upgrade
shell:
cmd: |
kubeadm upgrade apply v{
{
KUBEADM_VERSION }} <<EOF
y
EOF
sleep 180s
when:
- ansible_hostname=="k8s-master02"
- name: up master02
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
{
MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master02"
[root@ansible-server kubeadm-master-update]# vim tasks/upgrade_master03.yml
- name: down master03
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
{
MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master03"
- name: install CentOS or Rocky kubeadm for master
yum:
name: kubelet-{
{
KUBEADM_VERSION }},kubeadm-{
{
KUBEADM_VERSION }},kubectl-{
{
KUBEADM_VERSION }}
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-master03"
- name: install Ubuntu kubeadm for master
apt:
name: kubelet={
{
KUBEADM_VERSION }}-00,kubeadm={
{
KUBEADM_VERSION }}-00,kubectl={
{
KUBEADM_VERSION }}-00
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-master03"
- name: restart kubelet
systemd:
name: kubelet
state: restarted
daemon_reload: yes
when:
- ansible_hostname=="k8s-master03"
- name: get kubeadm version
shell:
cmd: kubeadm config images list --kubernetes-version=v{
{
KUBEADM_VERSION }} | awk -F "/" '{print $NF}'
register: KUBEADM_IMAGES_VERSION
when:
- ansible_hostname=="k8s-master03"
- name: download kubeadm image for master03
shell: |
{
% for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
docker pull {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
{
% endfor %}
when:
- ansible_hostname=="k8s-master03"
- name: kubeadm upgrade
shell:
cmd: |
kubeadm upgrade apply v{
{
KUBEADM_VERSION }} <<EOF
y
EOF
sleep 180s
when:
- ansible_hostname=="k8s-master03"
- name: up master03
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
{
MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master03"
[root@ansible-server kubeadm-master-update]# vim tasks/main.yml
- include: upgrade_master01.yml
- include: upgrade_master02.yml
- include: upgrade_master03.yml
[root@ansible-server kubeadm-master-update]# cd ../../
[root@ansible-server ansible]# tree roles/kubeadm-master-update/
roles/kubeadm-master-update/
├── tasks
│ ├── main.yml
│ ├── upgrade_master01.yml
│ ├── upgrade_master02.yml
│ └── upgrade_master03.yml
└── vars
└── main.yml
2 directories, 5 files
[root@ansible-server ansible]# vim kubeadm_master_update_role.yml
---
- hosts: k8s_cluster:ha
roles:
- role: kubeadm-master-update
[root@ansible-server ansible]# ansible-playbook kubeadm_master_update_role.yml
19.1.2 验证master
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 22m v1.21.10
k8s-master02.example.local Ready control-plane,master 18m v1.21.10
k8s-master03.example.local Ready control-plane,master 17m v1.21.10
k8s-node01.example.local Ready <none> 16m v1.21.8
k8s-node02.example.local Ready <none> 16m v1.21.8
k8s-node03.example.local Ready <none> 16m v1.21.8
19.2 升级calico
[root@ansible-server ansible]# mkdir -p roles/calico-update/{tasks,vars,templates}
[root@ansible-server ansible]# cd roles/calico-update
[root@ansible-server calico-update]# ls
tasks templates vars
#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址,MASTER01、MASTER02、MASTER03变量改成自己的master对应的IP地址
[root@ansible-server calico-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
MASTER01: 172.31.3.101
MASTER02: 172.31.3.102
MASTER03: 172.31.3.103
[root@ansible-server calico-update]# wget https://docs.projectcalico.org/manifests/calico-etcd.yaml -p templates/calico-etcd.yaml.j2
[root@k8s-master01 ~]# vim templates/calico-etcd.yaml.j2
...
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: OnDelete #修改这里,calico不会滚动更新,只有重启了kubelet,才会更新
template:
metadata:
labels:
k8s-app: calico-node
...
#修改下面内容
[root@ansible-server calico-update]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
[root@ansible-server calico-update]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "{% for i in groups.master %}https://{
{ hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"#g' templates/calico-etcd.yaml.j2
[root@ansible-server calico-update]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
etcd_endpoints: "{% for i in groups.master %}https://{
{ hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
[root@ansible-server calico-update]# vim tasks/calico_file.yml
- name: copy calico-etcd.yaml file
template:
src: calico-etcd.yaml.j2
dest: /root/calico-etcd.yaml
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico-update]# vim tasks/config.yml
- name: get ETCD_KEY key
shell:
cmd: cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'
register: ETCD_KEY
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-key:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (etcd-key:) null'
replace: '\1 {
{ ETCD_KEY.stdout }}'
when:
- ansible_hostname=="k8s-master01"
- name: get ETCD_CERT key
shell:
cmd: cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'
register: ETCD_CERT
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-cert:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (etcd-cert:) null'
replace: '\1 {
{ ETCD_CERT.stdout }}'
when:
- ansible_hostname=="k8s-master01"
- name: get ETCD_CA key
shell:
cmd: cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'
when:
- ansible_hostname=="k8s-master01"
register: ETCD_CA
- name: Modify the ".*etcd-ca:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (etcd-ca:) null'
replace: '\1 {
{ ETCD_CA.stdout }}'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_ca:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '(etcd_ca:) ""'
replace: '\1 "/calico-secrets/etcd-ca"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_cert:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '(etcd_cert:) ""'
replace: '\1 "/calico-secrets/etcd-cert"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_key:.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '(etcd_key:) ""'
replace: '\1 "/calico-secrets/etcd-key"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*CALICO_IPV4POOL_CIDR.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (- name: CALICO_IPV4POOL_CIDR)'
replace: '\1'
when:
- ansible_hostname=="k8s-master01"
- name: get POD_SUBNET
shell:
cmd: cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'
register: POD_SUBNET
when:
- ansible_hostname=="k8s-master01"
- name: Modify the ".*192.168.0.0.*" line
replace:
path: /root/calico-etcd.yaml
regexp: '# (value:) "192.168.0.0/16"'
replace: ' \1 "{
{ POD_SUBNET.stdout }}"'
when:
- ansible_hostname=="k8s-master01"
- name: Modify the "image:" line
replace:
path: /root/calico-etcd.yaml
regexp: '(.*image:) docker.io/calico(/.*)'
replace: '\1 {
{ HARBOR_DOMAIN }}/google_containers\2'
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico-update]# vim tasks/download_images.yml
- name: get calico version
shell:
chdir: /root
cmd: awk -F "/" '/image:/{print $NF}' calico-etcd.yaml
register: CALICO_VERSION
when:
- ansible_hostname=="k8s-master01"
- name: download calico image
shell: |
{
% for i in CALICO_VERSION.stdout_lines %}
docker pull registry.cn-beijing.aliyuncs.com/raymond9/{
{
i }}
docker tag registry.cn-beijing.aliyuncs.com/raymond9/{
{
i }} {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
docker rmi registry.cn-beijing.aliyuncs.com/raymond9/{
{
i }}
docker push {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
{
% endfor %}
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico-update]# vim tasks/install_calico.yml
- name: install calico
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f calico-etcd.yaml"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico-update]# vim tasks/delete_master01_calico_container.yml
- name: down master01
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
{
MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
- name: get calico container
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep master01 |awk -F " " '{print $1}'
register: CALICO_CONTAINER
when:
- ansible_hostname=="k8s-master01"
- name: delete calico container
shell: |
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {
{
CALICO_CONTAINER.stdout }} -n kube-system
sleep 30s
when:
- ansible_hostname=="k8s-master01"
- name: up master01
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
{
MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico-update]# vim tasks/delete_master02_calico_container.yml
- name: down master02
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
{
MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
- name: get calico container
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep master02 |awk -F " " '{print $1}'
register: CALICO_CONTAINER
when:
- ansible_hostname=="k8s-master01"
- name: delete calico container
shell: |
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {
{
CALICO_CONTAINER.stdout }} -n kube-system
sleep 30s
when:
- ansible_hostname=="k8s-master01"
- name: up master02
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
{
MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico-update]# vim tasks/delete_master03_calico_container.yml
- name: down master03
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
{
MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
- name: get calico container
shell:
cmd: kubectl get --kubeconfig=/etc/kubernetes/admin.conf pod -n kube-system -o wide|grep calico |grep master03 |awk -F " " '{print $1}'
register: CALICO_CONTAINER
when:
- ansible_hostname=="k8s-master01"
- name: delete calico container
shell: |
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {
{
CALICO_CONTAINER.stdout }} -n kube-system
sleep 30s
when:
- ansible_hostname=="k8s-master01"
- name: up master03
shell:
cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
{
MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server calico-update]# vim tasks/main.yml
- include: calico_file.yml
- include: config.yml
- include: download_images.yml
- include: install_calico.yml
- include: delete_master01_calico_container.yml
- include: delete_master02_calico_container.yml
- include: delete_master03_calico_container.yml
[root@ansible-server calico-update]# cd ../../
[root@ansible-server ansible]# tree roles/calico-update/
roles/calico-update/
├── tasks
│ ├── calico_file.yml
│ ├── config.yml
│ ├── delete_master01_calico_container.yml
│ ├── delete_master02_calico_container.yml
│ ├── delete_master03_calico_container.yml
│ ├── download_images.yml
│ ├── install_calico.yml
│ └── main.yml
├── templates
│ └── calico-etcd.yaml.j2
└── vars
└── main.yml
3 directories, 10 files
[root@ansible-server ansible]# vim calico_update_role.yml
---
- hosts: master
roles:
- role: calico-update
[root@ansible-server ansible]# ansible-playbook calico_update_role.yml
19.3 node
19.3.1 升级node
[root@ansible-server ansible]# mkdir -p roles/kubeadm-node-update/{tasks,vars}
[root@ansible-server ansible]# cd roles/kubeadm-node-update/
[root@ansible-server kubeadm-node-update]# ls
tasks vars
[root@ansible-server kubeadm-node-update]# vim vars/main.yml
KUBEADM_VERSION: 1.21.10
[root@ansible-server kubeadm-node-update]# vim tasks/upgrade_node01.yml
- name: drain node01
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf drain k8s-node01.example.local --delete-emptydir-data --force --ignore-daemonsets
when:
- ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for node
yum:
name: kubelet-{
{
KUBEADM_VERSION }},kubeadm-{
{
KUBEADM_VERSION }}
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-node01"
- name: install Ubuntu kubeadm for node
apt:
name: kubelet={
{
KUBEADM_VERSION }}-00,kubeadm={
{
KUBEADM_VERSION }}-00
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-node01"
- name: restart kubelet
systemd:
name: kubelet
state: restarted
daemon_reload: yes
when:
- ansible_hostname=="k8s-node01"
- name: get calico container
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep node01 |tail -n1|awk -F " " '{print $1}'
register: CALICO_CONTAINER
when:
- ansible_hostname=="k8s-master01"
- name: delete calico container
shell: |
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {
{
CALICO_CONTAINER.stdout }} -n kube-system
sleep 60s
when:
- ansible_hostname=="k8s-master01"
- name: uncordon node01
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf uncordon k8s-node01.example.local
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server kubeadm-node-update]# vim tasks/upgrade_node02.yml
- name: drain node02
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf drain k8s-node02.example.local --delete-emptydir-data --force --ignore-daemonsets
when:
- ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for node
yum:
name: kubelet-{
{
KUBEADM_VERSION }},kubeadm-{
{
KUBEADM_VERSION }}
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-node02"
- name: install Ubuntu kubeadm for node
apt:
name: kubelet={
{
KUBEADM_VERSION }}-00,kubeadm={
{
KUBEADM_VERSION }}-00
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-node02"
- name: restart kubelet
systemd:
name: kubelet
state: restarted
daemon_reload: yes
when:
- ansible_hostname=="k8s-node02"
- name: get calico container
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep node02 |tail -n1|awk -F " " '{print $1}'
register: CALICO_CONTAINER
when:
- ansible_hostname=="k8s-master01"
- name: delete calico container
shell: |
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {
{
CALICO_CONTAINER.stdout }} -n kube-system
sleep 60s
when:
- ansible_hostname=="k8s-master01"
- name: uncordon node02
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf uncordon k8s-node02.example.local
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server kubeadm-node-update]# vim tasks/upgrade_node03.yml
- name: drain node03
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf drain k8s-node03.example.local --delete-emptydir-data --force --ignore-daemonsets
when:
- ansible_hostname=="k8s-master01"
- name: install CentOS or Rocky kubeadm for node
yum:
name: kubelet-{
{
KUBEADM_VERSION }},kubeadm-{
{
KUBEADM_VERSION }}
when:
- (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- ansible_hostname=="k8s-node03"
- name: install Ubuntu kubeadm for node
apt:
name: kubelet={
{
KUBEADM_VERSION }}-00,kubeadm={
{
KUBEADM_VERSION }}-00
force: yes
when:
- ansible_distribution=="Ubuntu"
- ansible_hostname=="k8s-node03"
- name: restart kubelet
systemd:
name: kubelet
state: restarted
daemon_reload: yes
when:
- ansible_hostname=="k8s-node03"
- name: get calico container
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf get pod -n kube-system -o wide|grep calico |grep node03 |tail -n1|awk -F " " '{print $1}'
register: CALICO_CONTAINER
when:
- ansible_hostname=="k8s-master01"
- name: delete calico container
shell: |
kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod {
{
CALICO_CONTAINER.stdout }} -n kube-system
sleep 60s
when:
- ansible_hostname=="k8s-master01"
- name: uncordon node03
shell:
cmd: kubectl --kubeconfig=/etc/kubernetes/admin.conf uncordon k8s-node03.example.local
when:
- ansible_hostname=="k8s-master01"
[root@ansible-server kubeadm-node-update]# vim tasks/main.yml
- include: upgrade_node01.yml
- include: upgrade_node02.yml
- include: upgrade_node03.yml
[root@ansible-server kubeadm-node-update]# cd ../../
[root@ansible-server ansible]# tree roles/kubeadm-node-update/
roles/kubeadm-node-update/
├── tasks
│ ├── main.yml
│ ├── upgrade_node01.yml
│ ├── upgrade_node02.yml
│ └── upgrade_node03.yml
└── vars
└── main.yml
2 directories, 5 files
[root@ansible-server ansible]# vim kubeadm_node_update_role.yml
---
- hosts: k8s_cluster
roles:
- role: kubeadm-node-update
[root@ansible-server ansible]# ansible-playbook kubeadm_node_update_role.yml
19.3.2 验证node
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 31m v1.21.10
k8s-master02.example.local Ready control-plane,master 27m v1.21.10
k8s-master03.example.local Ready control-plane,master 26m v1.21.10
k8s-node01.example.local Ready <none> 25m v1.21.10
k8s-node02.example.local Ready <none> 25m v1.21.10
k8s-node03.example.local Ready <none> 25m v1.21.10
19.4 metrics
19.4.1 升级metrics
[root@ansible-server ansible]# mkdir -p roles/metrics-update/{files,vars,tasks}
[root@ansible-server ansible]# cd roles/metrics-update/
[root@ansible-server metrics-update]# ls
files tasks vars
[root@ansible-server metrics-update]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -P files/
#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server metrics-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
[root@ansible-server metrics-update]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -P files/
[root@ansible-server metrics-update]# vim files/components.yaml
...
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
#添加下面内容
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
...
volumeMounts:
- mountPath: /tmp
name: tmp-dir
#添加下面内容
- name: ca-ssl
mountPath: /etc/kubernetes/pki
...
volumes:
- emptyDir: {
}
name: tmp-dir
#添加下面内容
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
...
[root@ansible-server metrics-update]# vim tasks/metrics_file.yml
- name: copy components.yaml file
copy:
src: components.yaml
dest: /root/components.yaml
[root@ansible-server metrics-update]# vim tasks/config.yml
- name: Modify the "image:" line
replace:
path: /root/components.yaml
regexp: '(.*image:) k8s.gcr.io/metrics-server(/.*)'
replace: '\1 {
{ HARBOR_DOMAIN }}/google_containers\2'
[root@ansible-server metrics-update]# vim tasks/download_images.yml
- name: get metrics version
shell:
chdir: /root
cmd: awk -F "/" '/image:/{print $NF}' components.yaml
register: METRICS_VERSION
- name: download metrics image
shell: |
{
% for i in METRICS_VERSION.stdout_lines %}
docker pull registry.aliyuncs.com/google_containers/{
{
i }}
docker tag registry.aliyuncs.com/google_containers/{
{
i }} {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
docker rmi registry.aliyuncs.com/google_containers/{
{
i }}
docker push {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
{
% endfor %}
[root@ansible-server metrics-update]# vim tasks/install_metrics.yml
- name: install metrics
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f components.yaml"
[root@ansible-server metrics-update]# vim tasks/main.yml
- include: metrics_file.yml
- include: config.yml
- include: download_images.yml
- include: install_metrics.yml
[root@ansible-server metrics-update]# cd ../../
[root@ansible-server ansible]# tree roles/metrics-update/
roles/metrics-update/
├── files
│ └── components.yaml
├── tasks
│ ├── config.yml
│ ├── download_images.yml
│ ├── install_metrics.yml
│ ├── main.yml
│ └── metrics_file.yml
└── vars
└── main.yml
3 directories, 7 files
[root@ansible-server ansible]# vim metrics_update_role.yml
---
- hosts: master01
roles:
- role: metrics-update
[root@ansible-server ansible]# ansible-playbook metrics_update_role.yml
19.4.2 验证metrics
[root@k8s-master01 ~]# kubectl get pod -A|grep metrics-server
kube-system metrics-server-75c8898f9f-nfmwz 1/1 Running 0 45s
[root@k8s-master01 ~]# kubectl top node --use-protocol-buffers
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 172m 8% 1896Mi 49%
k8s-master02.example.local 143m 7% 1398Mi 36%
k8s-master03.example.local 142m 7% 1436Mi 37%
k8s-node01.example.local 80m 4% 1022Mi 26%
k8s-node02.example.local 81m 4% 1025Mi 26%
k8s-node03.example.local 69m 3% 947Mi 24%
19.5 dashboard
19.5.1 升级dashboard
[root@ansible-server ansible]# mkdir -p roles/dashboard-update/{files,templates,vars,tasks}
[root@ansible-server ansible]# cd roles/dashboard-update/
[root@ansible-server dashboard-update]# ls
files tasks templates vars
[root@ansible-server dashboard-update]# vim files/admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
[root@ansible-server dashboard-update]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.0/aio/deploy/recommended.yaml -P templates/recommended.yaml.j2
[root@ansible-server dashboard-update]# vim templates/recommended.yaml.j2
...
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #添加这行
ports:
- port: 443
targetPort: 8443
nodePort: {
{
NODEPORT }} #添加这行
selector:
k8s-app: kubernetes-dashboard
...
#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server dashboard-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
NODEPORT: 30005
[root@ansible-server dashboard-update]# vim tasks/dashboard_file.yml
- name: copy recommended.yaml file
template:
src: recommended.yaml.j2
dest: /root/recommended.yaml
- name: copy admin.yaml file
copy:
src: admin.yaml
dest: /root/admin.yaml
[root@ansible-server dashboard-update]# vim tasks/config.yml
- name: Modify the "image:" line
replace:
path: /root/recommended.yaml
regexp: '(.*image:) kubernetesui(/.*)'
replace: '\1 {
{ HARBOR_DOMAIN }}/google_containers\2'
[root@ansible-server dashboard-update]# vim tasks/download_images.yml
- name: get dashboard version
shell:
chdir: /root
cmd: awk -F "/" '/image:/{print $NF}' recommended.yaml
register: DASHBOARD_VERSION
- name: download dashboard image
shell: |
{
% for i in DASHBOARD_VERSION.stdout_lines %}
docker pull kubernetesui/{
{
i }}
docker tag kubernetesui/{
{
i }} {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
docker rmi kubernetesui/{
{
i }}
docker push {
{
HARBOR_DOMAIN }}/google_containers/{
{
i }}
{
% endfor %}
[root@ansible-server dashboard-update]# vim tasks/install_dashboard.yml
- name: install dashboard
shell:
chdir: /root
cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f recommended.yaml -f admin.yaml"
[root@ansible-server dashboard-update]# vim tasks/main.yml
- include: dashboard_file.yml
- include: config.yml
- include: download_images.yml
- include: install_dashboard.yml
[root@ansible-server dashboard-update]# cd ../../
[root@ansible-server ansible]# tree roles/dashboard-update/
roles/dashboard-update/
├── files
│ └── admin.yaml
├── tasks
│ ├── config.yml
│ ├── dashboard_file.yml
│ ├── download_images.yml
│ ├── install_dashboard.yml
│ └── main.yml
├── templates
│ └── recommended.yaml.j2
└── vars
└── main.yml
4 directories, 8 files
[root@ansible-server ansible]# vim dashboard_update_role.yml
---
- hosts: master01
roles:
- role: dashboard-update
[root@ansible-server ansible]# ansible-playbook dashboard_update_role.yml
19.5.2 登录dashboard
https://172.31.3.101:30005
[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name: admin-user-token-4cgk2
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 8ea51eb9-5c85-47f8-afef-d087162d6168
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1066 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImhlemE5UEhFc0w2VWZabzlUV2k5c1RaQzZZbmxzMThmZ05ldkpnbWZGeUkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTRjZ2syIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI4ZWE1MWViOS01Yzg1LTQ3ZjgtYWZlZi1kMDg3MTYyZDYxNjgiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.YQVpRA-laaOkOPKnKcHRuOsTj7bNiu-1woJlOC-OWu6mrwOvHGnxC7ru5ugwXQxYDbJXgVX3CzQDbZ1j_6RI4SdvC2H28E2GNtUxVLfKaRMBIiQ1aWv6OmYlQMINqfjd6ZR7m9gwO0iLKkMSrwxZw19ydE0ZlymkImeboSjST0z4nXZvSsRcuZjR52mOLru75E3bYCUiPWAypRRI3z0ZHl_JZ9Lurq3jWlxD_ooYQ17_qVcK_ymc3FxkiGQ9NcMPo3WdQEa-YVOQa7K1_SWMkRBYnOEB-NNvi9SQerWmAglndd9DsBGB3n63BlI05gtfUiH0lUBuj7FIyP7tYYMPdA