ansible二进制部署k8s集群

ansible二进制部署k8s集群

K爷 DevOps视角

服务器部署


ansible二进制部署k8s集群

服务器环境


root@k8s-master1:~# uname -r
4.15.0-112-generic
root@k8s-master1:~# cat /etc/issue
Ubuntu 18.04.4 LTS \n \l

基础环境同使用kubeadm部署kubernetes集群

ha-node1与ha-node2安装keepalived与haproxy,虚拟IP为 172.16.1.188,作为apiserver的vip。ha-node1作为master节点,ha-node2作为backup节点。

harbor-node1部署harbor服务,用于存放镜像。

以上服务暂不做详细部署。

部署ansible


k8s-master作为部署服务器

基础环境配置


  • 安装python2.7

在master、node、etcd节点安装


# apt-get install python2.7 -y
# ln -s /usr/bin/python2.7 /usr/bin/python
  • 安装ansible

# apt install ansible
  • 在ansible控制端配置免密登录

# ssh-keygen
  • 分发密钥

root@k8s-master1:~# ssh-copy-id 172.16.1.31

将密钥分发到master节点、node节点、etcd节点

下载项目


#export release=2.2.0
# curl -C- -fLO --retry 3 https://github.com/easzlab/kubeasz/releases/download/${release}/easzup
# vim easzup
export DOCKER_VER=19.03.12
export KUBEASZ_VER=2.2.0
# chmod +x easzup
# ./easzup -D

准备hosts文件



# cd /etc/ansible/
# cp example/hosts.multi-node ./hosts
# vim hosts
# 'etcd' cluster should have odd member(s) (1,3,5,...)
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
[etcd]
172.16.1.39 NODE_NAME=etcd1
172.16.1.40 NODE_NAME=etcd2
172.16.1.41 NODE_NAME=etcd3

# master node(s)
[kube-master]
172.16.1.30
172.16.1.31

# work node(s)
[kube-node]
172.16.1.33
172.16.1.34

# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'yes' to install a harbor server; 'no' to integrate with existed one
# 'SELF_SIGNED_CERT': 'no' you need put files of certificates named harbor.pem and harbor-key.pem in directory 'down'
[harbor]
#172.16.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes

# [optional] loadbalance for accessing k8s from outside
# 外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
172.16.1.36 LB_ROLE=master EX_APISERVER_VIP=172.16.1.188 EX_APISERVER_PORT=6443
172.16.1.37 LB_ROLE=backup EX_APISERVER_VIP=172.16.1.188 EX_APISERVER_PORT=6443

# [optional] ntp server for the cluster
[chrony]
#172.16.1.1

[all:vars]
# --------- Main Variables ---------------
# Cluster container-runtime supported: docker, containerd
CONTAINER_RUNTIME="docker"

# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="flannel"

# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"

# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="172.20.0.0/16"

# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="10.20.0.0/16"

# NodePort Range
NODE_PORT_RANGE="30000-60000"

# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="kevin.local."

# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/usr/bin"

# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"

# Deploy Directory (kubeasz workspace)
base_dir="/etc/ansible"

网络以flannel为例,可以更改为calico

按步骤部署


环境初始化


root@k8s-master1:/etc/ansible# vim 01.prepare.yml
# [optional] to synchronize system time of nodes with 'chrony'
- hosts:
 - kube-master
 - kube-node
 - etcd
# - ex-lb
# - chrony
roles:
 - { role: chrony, when: "groups['chrony']|length > 0" }

# to create CA, kubeconfig, kube-proxy.kubeconfig etc.
- hosts: localhost
roles:
 - deploy

# prepare tasks for all nodes
- hosts:
 - kube-master
 - kube-node
 - etcd
roles:
 - prepare
root@k8s-master1:/etc/ansible# apt install python-pip
root@k8s-master1:/etc/ansible# ansible-playbook 01.prepare.yml

安装etcd


root@k8s-master1:/etc/ansible# ansible-playbook 02.etcd.yml
  • 各etcd服务器验证etcd服务

root@etc-node1:~# export NODE_IPS="172.16.1.39 172.16.1.40 172.16.1.41"
root@etc-node1:~# for ip in ${NODE_IPS};do /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health;done  
https://172.16.1.39:2379 is healthy: successfully committed proposal: took = 11.288725ms
https://172.16.1.40:2379 is healthy: successfully committed proposal: took = 14.079052ms
https://172.16.1.41:2379 is healthy: successfully committed proposal: took = 13.639416ms

安装docker

docker部署同使用kubeadm部署kubernetes集群。

部署master


root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.yml
root@k8s-master1:/etc/ansible# kubectl get node
NAME         STATUS                     ROLES   AGE   VERSION
172.16.1.30   Ready,SchedulingDisabled   master   65s   v1.17.2
172.16.1.31   Ready,SchedulingDisabled   master   65s   v1.17.2

部署node

  • 配置镜像

# docker pull mirrorgooglecontainers/pause-amd64:3.1
# docker tag mirrorgooglecontainers/pause-amd64:3.1 harbor.kevin.com/base/pause-amd64:3.1
# docker push harbor.kevin.com/base/pause-amd64:3.1
  • 修改镜像地址

root@k8s-master1:/etc/ansible# vim roles/kube-node/defaults/main.yml
# 基础容器镜像
SANDBOX_IMAGE: "harbor.kevin.com/base/pause-amd64:3.1"
root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml
root@k8s-master1:/etc/ansible# kubectl get node
NAME         STATUS                     ROLES   AGE   VERSION
172.16.1.30   Ready,SchedulingDisabled   master   16m   v1.17.2
172.16.1.31   Ready,SchedulingDisabled   master   16m   v1.17.2
172.16.1.33   Ready                      node     10s   v1.17.2
172.16.1.34   Ready                      node     10s   v1.17.2

部署网络组件


root@k8s-master1:/etc/ansible# ansible-playbook 06.network.yml
  • 验证网络

root@k8s-master1:~# kubectl run net-test1 --image=alpine --replicas=3 sleep 360000
\kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/net-test1 created
root@k8s-master1:~# kubectl get pod -o wide
NAME                         READY   STATUS   RESTARTS   AGE   IP         NODE         NOMINATED NODE   READINESS GATES
net-test1-5fcc69db59-67gtx   1/1     Running   0         37s   10.20.2.3   172.16.1.33   <none>           <none>
net-test1-5fcc69db59-skpfl   1/1     Running   0         37s   10.20.2.2   172.16.1.33   <none>           <none>
net-test1-5fcc69db59-w4chj   1/1     Running   0         37s   10.20.3.2   172.16.1.34   <none>           <none>
root@k8s-master1:~# kubectl exec -it net-test1-5fcc69db59-67gtx sh
/ # ping 10.20.3.2
PING 10.20.3.2 (10.20.3.2): 56 data bytes
64 bytes from 10.20.3.2: seq=0 ttl=62 time=1.272 ms
64 bytes from 10.20.3.2: seq=1 ttl=62 time=0.449 ms
^C
--- 10.20.3.2 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.449/0.860/1.272 ms
/ # ping 8.8.8.8
PING 8.8.8.8 (8.8.8.8): 56 data bytes
64 bytes from 8.8.8.8: seq=0 ttl=127 time=112.847 ms
^C
--- 8.8.8.8 ping statistics ---
2 packets transmitted, 1 packets received, 50% packet loss
round-trip min/avg/max = 112.847/112.847/112.847 ms

网络通之后,k8s底层就好了

添加master节点



root@k8s-master1:~# easzctl add-master 172.16.1.32
root@k8s-master1:~# kubectl get node
NAME         STATUS                     ROLES   AGE     VERSION
172.16.1.30   Ready,SchedulingDisabled   master   46h     v1.17.2
172.16.1.31   Ready,SchedulingDisabled   master   46h     v1.17.2
172.16.1.32   Ready,SchedulingDisabled   master   3m28s   v1.17.2
172.16.1.33   Ready                      node     45h     v1.17.2
172.16.1.34   Ready                      node     45h     v1.17.2

添加node节点



root@k8s-master1:~# easzctl add-node 172.16.1.35
root@k8s-master1:~# kubectl get node
NAME         STATUS                     ROLES   AGE     VERSION
172.16.1.30   Ready,SchedulingDisabled   master   46h     v1.17.2
172.16.1.31   Ready,SchedulingDisabled   master   46h     v1.17.2
172.16.1.32   Ready,SchedulingDisabled   master   11m     v1.17.2
172.16.1.33   Ready                      node     46h     v1.17.2
172.16.1.34   Ready                      node     46h     v1.17.2
172.16.1.35   Ready                      node     3m44s   v1.17.2

DNS服务


目前常用的dns组件有kube-dns和coredns两个,这里以安装CoreDNS

部署coreDNS


root@k8s-master1:~# mkdir /opt/dns/
root@k8s-master1:~# cd /opt/dns/
root@k8s-master1:/opt/dns# git clone https://github.com/coredns/deployment.git
root@k8s-master1:/opt/dns# cd deployment/kubernetes/
root@k8s-master1:/opt/dns/deployment/kubernetes# ./deploy.sh >/opt/dns/coredns.yml
root@k8s-master1:/opt/dns/deployment/kubernetes# cd /opt/dns/
root@k8s-master1:/opt/dns# docker pull coredns/coredns:1.7.0
root@k8s-master1:/opt/dns# docker tag coredns/coredns:1.7.0 harbor.kevin.com/base/coredns:1.7.0
root@k8s-master1:/opt/dns# docker push harbor.kevin.com/base/coredns:1.7.0
root@k8s-master1:/opt/dns# vim coredns.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
  kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
  rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
  kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
  .:53 {
      errors
      health {
        lameduck 5s
      }
      ready
      kubernetes kevin.local in-addr.arpa ip6.arpa {
        fallthrough in-addr.arpa ip6.arpa
      }
      prometheus :9153
      forward . 223.6.6.6 {
        max_concurrent 1000
      }
      cache 30
      loop
      reload
      loadbalance
  }
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
  k8s-app: kube-dns
  kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
  type: RollingUpdate
  rollingUpdate:
    maxUnavailable: 1
selector:
  matchLabels:
    k8s-app: kube-dns
template:
  metadata:
    labels:
      k8s-app: kube-dns
  spec:
    priorityClassName: system-cluster-critical
    serviceAccountName: coredns
    tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
    nodeSelector:
      kubernetes.io/os: linux
    affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: k8s-app
                    operator: In
                    values: ["kube-dns"]
              topologyKey: kubernetes.io/hostname
    containers:
    - name: coredns
      image: harbor.kevin.com/base/coredns:1.7.0
      imagePullPolicy: IfNotPresent
      resources:
        limits:
          memory: 512Mi
        requests:
          cpu: 100m
          memory: 70Mi
      args: [ "-conf", "/etc/coredns/Corefile" ]
      volumeMounts:
      - name: config-volume
        mountPath: /etc/coredns
        readOnly: true
      ports:
      - containerPort: 53
        name: dns
        protocol: UDP
      - containerPort: 53
        name: dns-tcp
        protocol: TCP
      - containerPort: 9153
        name: metrics
        protocol: TCP
      securityContext:
        allowPrivilegeEscalation: false
        capabilities:
          add:
          - NET_BIND_SERVICE
          drop:
          - all
        readOnlyRootFilesystem: true
      livenessProbe:
        httpGet:
          path: /health
          port: 8080
          scheme: HTTP
        initialDelaySeconds: 60
        timeoutSeconds: 5
        successThreshold: 1
        failureThreshold: 5
      readinessProbe:
        httpGet:
          path: /ready
          port: 8181
          scheme: HTTP
    dnsPolicy: Default
    volumes:
      - name: config-volume
        configMap:
          name: coredns
          items:
          - key: Corefile
            path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
  prometheus.io/port: "9153"
  prometheus.io/scrape: "true"
labels:
  k8s-app: kube-dns
  kubernetes.io/cluster-service: "true"
  kubernetes.io/name: "CoreDNS"
spec:
selector:
  k8s-app: kube-dns
clusterIP: 172.20.0.2
ports:
- name: dns
  port: 53
  protocol: UDP
- name: dns-tcp
  port: 53
  protocol: TCP
- name: metrics
  port: 9153
  protocol: TCP
root@k8s-master1:/opt/dns# kubectl delete -f kube-dns.yaml
root@k8s-master1:/opt/dns# kubectl apply -f coredns.ym

部署dashboard


部署同使用kubeadm部署kubernetes集群。

猜你喜欢

转载自blog.51cto.com/15127511/2657633