拉取国内镜像搭建Kubernetes

安装kubeadmin

所有节点执行

使用阿里云镜像源安装kubelet kubeadm kubectl

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装1.12.2版本的kubelet kubeadm kubectl

[root@k8s-master ~]# yum install -y kubelet-1.12.2 kubectl-1.12.2 kubeadm-1.12.2
启动kubelet服务
[root@k8s-master ~]# systemctl enable kubelet && systemctl start kubelet

使用以下脚本拉取镜像:

#确认需要拉取哪些版本镜像
[root@k8s-master ~]# kubeadm config images list --kubernetes-version v1.12.2
k8s.gcr.io/kube-apiserver:v1.12.2
k8s.gcr.io/kube-controller-manager:v1.12.2
k8s.gcr.io/kube-scheduler:v1.12.2
k8s.gcr.io/kube-proxy:v1.12.2
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.2.24
k8s.gcr.io/coredns:1.2.2
[root@k8s-master ~]# 

#下载并执行脚本
[root@k8s-master ~]# wget https://raw.githubusercontent.com/zhwill/LinuxShell/master/pull_k8s_images.sh && bash pull_k8s_images.sh

脚本内容:

#!/bin/bash
## 版本信息
K8S_VERSION=v1.12.2
ETCD_VERSION=3.2.24
DASHBOARD_VERSION=v1.10.0
FLANNEL_VERSION=v0.10.0-amd64
DNS_VERSION=1.2.2
PAUSE_VERSION=3.1

## 基本组件
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:$K8S_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION

### 网络
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$DNS_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/kubernetes_containers/flannel:$FLANNEL_VERSION
#docker pull quay.io/coreos/flannel:$FLANNEL_VERSION

### 前端
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION

## 修改tag
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:$K8S_VERSION k8s.gcr.io/kube-apiserver:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:$K8S_VERSION k8s.gcr.io/kube-controller-manager:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:$K8S_VERSION k8s.gcr.io/kube-scheduler:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:$K8S_VERSION k8s.gcr.io/kube-proxy:$K8S_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION k8s.gcr.io/etcd:$ETCD_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION k8s.gcr.io/pause:$PAUSE_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$DNS_VERSION k8s.gcr.io/coredns:$DNS_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/kubernetes_containers/flannel:$FLANNEL_VERSION quay.io/coreos/flannel:$FLANNEL_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION k8s.gcr.io/kubernetes-dashboard-amd64:$DASHBOARD_VERSION

## 删除镜像
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:$K8S_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$DNS_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/kubernetes_containers/flannel:$FLANNEL_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:$DASHBOARD_VERSION

查看拉取的镜像:

[root@k8s-master ~]# docker images
REPOSITORY                              TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy                   v1.12.2             15e9da1ca195        4 weeks ago         96.5MB
k8s.gcr.io/kube-apiserver               v1.12.2             51a9c329b7c5        4 weeks ago         194MB
k8s.gcr.io/kube-controller-manager      v1.12.2             15548c720a70        4 weeks ago         164MB
k8s.gcr.io/kube-scheduler               v1.12.2             d6d57c76136c        4 weeks ago         58.3MB
k8s.gcr.io/etcd                         3.2.24              3cab8e1b9802        2 months ago        220MB
k8s.gcr.io/coredns                      1.2.2               367cdc8433a4        3 months ago        39.2MB
k8s.gcr.io/kubernetes-dashboard-amd64   v1.10.0             0dab2435c100        3 months ago        122MB
quay.io/coreos/flannel                  v0.10.0-amd64       50e7aa4dbbf8        6 months ago        44.6MB
k8s.gcr.io/pause                        3.1                 da86e6ba6ca1        11 months ago       742kB
[root@k8s-master ~]# 

执行初始化

master节点执行

[root@k8s-master ~]# kubeadm init \
>     --apiserver-advertise-address=192.168.92.56 \
>     --pod-network-cidr=10.244.0.0/16 \
>     --service-cidr=10.233.0.0/16 \
>     --kubernetes-version=v1.12.2
[init] using Kubernetes version: v1.12.2
[preflight] running pre-flight checks
[preflight/images] Pulling images required for setting up a Kubernetes cluster
[preflight/images] This might take a minute or two, depending on the speed of your internet connection
[preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[preflight] Activating the kubelet service
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Generated etcd/ca certificate and key.
[certificates] Generated etcd/server certificate and key.
[certificates] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [127.0.0.1 ::1]
[certificates] Generated etcd/healthcheck-client certificate and key.
[certificates] Generated apiserver-etcd-client certificate and key.
[certificates] Generated etcd/peer certificate and key.
[certificates] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.92.56 127.0.0.1 ::1]
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.233.0.1 192.168.92.56]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] valid certificates and keys now exist in "/etc/kubernetes/pki"
[certificates] Generated sa key and public key.
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[controlplane] wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests" 
[init] this might take a minute or longer if the control plane images have to be pulled
[apiclient] All control plane components are healthy after 25.504091 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.12" in namespace kube-system with the configuration for the kubelets in the cluster
[markmaster] Marking the node k8s-master as master by adding the label "node-role.kubernetes.io/master=''"
[markmaster] Marking the node k8s-master as master by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-master" as an annotation
[bootstraptoken] using token: g7mje8.ci220bm8og06eywm
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join 192.168.92.56:6443 --token g7mje8.ci220bm8og06eywm --discovery-token-ca-cert-hash sha256:b4db23f6daeff746996c1f67d7328dc39ce56d5b7768e18035625329c2c9bb6c

[root@k8s-master ~]# 

其他步骤完成后查看节点状态:

[centos@k8s-master ~]$ kubectl get nodes  
NAME         STATUS   ROLES    AGE   VERSION
k8s-master   Ready    master   17m   v1.12.2
k8s-node1    Ready    <none>   49s   v1.12.2
k8s-node2    Ready    <none>   32s   v1.12.2
[centos@k8s-master ~]$ kubectl get pod --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-576cbf47c7-4zq9t             1/1     Running   0          12m
kube-system   coredns-576cbf47c7-5g4xw             1/1     Running   0          12m
kube-system   etcd-k8s-master                      1/1     Running   0          3m20s
kube-system   kube-apiserver-k8s-master            1/1     Running   0          3m20s
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          3m21s
kube-system   kube-flannel-ds-amd64-8h6tg          1/1     Running   0          65s
kube-system   kube-flannel-ds-amd64-ggt7c          1/1     Running   0          3m28s
kube-system   kube-flannel-ds-amd64-k5sdb          1/1     Running   0          48s
kube-system   kube-proxy-cn8xx                     1/1     Running   0          65s
kube-system   kube-proxy-pf9w8                     1/1     Running   0          48s
kube-system   kube-proxy-tmxmx                     1/1     Running   0          17m
kube-system   kube-scheduler-k8s-master            1/1     Running   0          3m21s

猜你喜欢

转载自blog.csdn.net/networken/article/details/84571373