文章目录
一、资源清单
- 格式如下:
apiVersion: group/version //指明api资源属于哪个群组和版本,同一个组可以有多个版本
$ kubectl api-versions //查询命令
kind: //标记创建的资源类型,k8s主要支持以下资源类别
Pod,ReplicaSet,Deployment,StatefulSet,DaemonSet,Job,Cronjob
metadata: //元数据
name: //对像名称
namespace: //对象属于哪个命名空间
labels: //指定资源标签,标签是一种键值数据
spec: //定义目标资源的期望状态
$ kubectl explain pod //查询帮助文档
1、资源清单内容与操作
1.1 生成模板
## 方法一:书写时候的查找方法,可以之直接通过explain一步一步查找参数内容用法
[root@server2 ~]# kubectl explain pod ##查看pod所有参数,-required-带有required是必须存在的参数
[root@server2 ~]# kubectl explain pod.apiVersion ##查看api对应需要添加的参数
## 方法二: 直接打开一个已经有的deployment,然后生成相应的yaml文件,进行参考
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 0 3h51m
[root@server2 ~]# kubectl get pod demo -o yaml ##生成一个yaml文件
1.2 自主式Pod资源清单
[root@server2 ~]# kubectl get all
[root@server2 ~]# kubectl delete deployments.apps nginx
deployment.apps "nginx" deleted
[root@server2 ~]# kubectl delete svc nginx
service "nginx" deleted
[root@server2 ~]# kubectl get all #整理实验环境
NAME READY STATUS RESTARTS AGE
pod/demo 1/1 Running 0 3h38m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 27d
[root@server2 ~]# vim pod.yml
[root@server2 ~]# kubectl apply -f pod.yml
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl describe pod nginx
[root@server2 ~]# kubectl delete -f pod.yml
[root@server2 ~]# kubectl get pod
[root@server2 ~]# vim pod.yml ##编写yml文件,可用性较高
[root@server2 ~]# cat pod.yml
apiVersion: apps/v1
kind: Deployment ##设置为deployment
metadata:
name: nginx
namespace: default ##设置命名空间
spec:
replicas: 1
selector:
matchLabels:
run: nginx
template: ##模板
metadata:
labels: ##标签
run: nginx
spec:
#nodeSelector: ##指定运行节点的俩种方法(方法一)
# kubernetes.io/hostname: server4
#nodeName: server3 ##指定运行节点的俩种方法(方法二)
#hostNetwork: true ##可以通过直接访问节点ip的方式访问,[root@westos Desktop]# curl 172.25.13.4
containers:
- name: nginx
image: myapp:v1
imagePullPolicy: IfNotPresent
resources: ##资源限制
requests:
cpu: 100m
memeory: 100Mi
limits:
cpu: 0.5
memory: 512Mi
#- name: busyboxplus
# image: busyboxplus
# imagePullPolicy: IfNotPresent ##本地有镜像,优先本地拉取,没有就需要联网拉取
# stdin: true ##stdin和tty是为了保证交互式,相当于执行参数-it
# tty: true
[root@server2 ~]# kubectl apply -f pod.yml ##deployment使用apply,会自动更新修改内容
## $ kubectl create -f demo.yaml ##不是deployment使用create,不会自动更新修改内容
deployment.apps/nginx created
运行效果测试,改变myapp版本。
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 0 4h49m
nginx-79cc587f-lq6k9 1/1 Running 0 4m15s
nginx-79cc587f-mdcz7 1/1 Running 0 4m10s
[root@server2 ~]# kubectl describe pod nginx-79cc587f-lq6k9 | less
留着旧的pod是为了回滚
##如果俩个镜像使用的是同一个端口,只有一个可以成功
[root@server2 ~]# vim pod.yml
- name: busyboxplus
image: busyboxplus
imagePullPolicy: IfNotPresent ##本地有镜像,优先本地拉取,没有就需要联网拉取
stdin: true ##stdin和tty是为了保证交互式,相当于执行参数-it
tty: true
[root@server2 ~]# kubectl apply -f pod.yml
deployment.apps/nginx configured
[root@server2 ~]# kubectl get pod
2、标签
## 1.修改节点标签
[root@server2 ~]# kubectl get node --show-labels ##查看节点标签
[root@server2 ~]# kubectl label nodes server3 app=nginx ##打标签
node/server3 labeled
[root@server2 ~]# kubectl get nodes server3 --show-labels ##查看节点server3的标签
NAME STATUS ROLES AGE VERSION LABELS
server3 Ready <none> 27d v1.20.2 app=nginx,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=server3,kubernetes.io/os=linux
[root@server2 ~]# kubectl label nodes server3 app=myapp --overwrite ##重新覆盖
node/server3 labeled
[root@server2 ~]# kubectl get nodes server3 --show-labels ##查看修改内容
NAME STATUS ROLES AGE VERSION LABELS
server3 Ready <none> 27d v1.20.2 app=myapp,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=server3,kubernetes.io/os=linux
[root@server2 ~]# kubectl label nodes server3 app-
node/server3 labeled
[root@server2 ~]# kubectl get nodes server3 --show-labels ##删除标签
NAME STATUS ROLES AGE VERSION LABELS
server3 Ready <none> 27d v1.20.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=server3,kubernetes.io/os=linux
## 2. 修改pod标签
[root@server2 ~]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
demo 1/1 Running 0 6h12m run=demo
nginx-79cc587f-cr7vs 1/1 Running 0 15m pod-template-hash=79cc587f,run=nginx
nginx-79cc587f-hc9lz 1/1 Running 0 15m pod-template-hash=79cc587f,run=nginx
[root@server2 ~]# kubectl label pod demo run=demo2 --overwrite
pod/demo labeled
[root@server2 ~]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
demo 1/1 Running 0 6h12m run=demo2
nginx-79cc587f-cr7vs 1/1 Running 0 15m pod-template-hash=79cc587f,run=nginx
nginx-79cc587f-hc9lz 1/1 Running 0 15m pod-template-hash=79cc587f,run=nginx
##3.查看有相应标签的项
[root@server2 ~]# kubectl get pod -L run
3、清理deployment
[root@server2 ~]# kubectl delete -f pod.yml
deployment.apps "nginx" deleted
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 0 6h14m
二、Pod生命周期
k8s官网 : https://kubernetes.io/zh/docs/home/
1、 livenessProbe
[root@server2 ~]# vim live.yml ##编辑示例文件
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-exec
spec:
containers:
- name: liveness
image: busyboxplus
args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 2
periodSeconds: 3
[root@server2 ~]# kubectl create -f live.yml ##创建
[root@server2 ~]# kubectl get pod ##查看创建的pod,执行成功不会报错。执行错误会一直重启.
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 0 7h44m
liveness-exec 1/1 Running 0 12s
[root@server2 ~]# kubectl describe pod liveness-exec
[root@server2 ~]# kubectl delete -f live.yml
pod "liveness-exec" deleted
2、readinessPeobe
文件存在
[root@server2 ~]# cat live.yml
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-exec
spec:
containers:
- name: liveness
image: myapp:v1
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 2
periodSeconds: 3
readinessProbe:
httpGet:
path: /hostname.html #文件存在
port: 80
initialDelaySeconds: 3
periodSeconds: 3
[root@server2 ~]# kubectl create -f live.yml
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl describe pod liveness-exec ##查看详细信息,发现启动成功
文件不存在就一直请求服务
[root@server2 ~]# kubectl delete -f live.yml
pod "liveness-exec" deleted
[root@server2 ~]# vim live.yml
path: /test.html #没有这个文件就一直请求服务
[root@server2 ~]# kubectl create -f live.yml
pod/liveness-exec created
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 0 8h
liveness-exec 0/1 Running 0 33s
[root@server2 ~]# kubectl describe pod liveness-exec
[root@server2 ~]# kubectl exec -it liveness-exec -- sh
/ # cd usr/share/nginx/
/usr/share/nginx # ls
html
/usr/share/nginx # cd html/
/usr/share/nginx/html # ls
50x.html index.html
/usr/share/nginx/html # touch test.html
/usr/share/nginx/html # ls
50x.html index.html test.html
/usr/share/nginx/html # exit
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 0 8h
liveness-exec 1/1 Running 0 95s
3、使用init容器的情况initContainers
创建service
[root@server2 ~]# cat service.yml
apiVersion: v1
kind: Service
metadata:
name: myservice
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
[root@server2 ~]# kubectl create -f service.yml
[root@server2 ~]# kubectl get svc
[root@server2 ~]# kubectl describe svc myservice
##测试解析myservice
[root@server2 ~]# kubectl attach demo -it
/ # nslookup myservice ##可以解析到myservice
/ # nslookup mydb ##解析不到mydb / #
测试
[root@server2 ~]# vim init.yml
[root@server2 ~]# cat init.yml
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: myapp-container
image: myapp:v1
initContainers:
- name: init-myservice
image: busyboxplus
command: ['sh', '-c', "until nslookup myservice.default.svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
[root@server2 ~]# kubectl delete -f service.yml ##删除myservice服务 service "myservice" deleted
[root@server2 ~]# kubectl create -f init.yml ##创建pod
[root@server2 ~]# kubectl get pod ##查看pod,发现初始化失败
[root@server2 ~]# kubectl create -f service.yml
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl delete -f init.yml ##实验做完之后删除,保证环境的纯净
pod "myapp-pod" deleted
三、控制器
- Pod 的分类:
自主式 Pod:Pod 退出后不会被创建
控制器管理的 Pod:在控制器的生命周期里,始终要维持 Pod 的副本数目
1、ReplicaSet控制器
- Replication Controller和ReplicaSet
ReplicaSet 是下一代的 Replication Controller,官方推荐使用ReplicaSet。
ReplicaSet 和 Replication Controller 的唯一区别是选择器的支持,
ReplicaSet 支持新的基于集合的选择器需求。
ReplicaSet 确保任何时间都有指定数量的 Pod 副本在运行。
虽然 ReplicaSets 可以独立使用,但今天它主要被Deployments 用作协调 Pod 创建、删除和更新的机制。
[root@server2 ~]# vim rs.yml
[root@server2 ~]# kubectl apply -f rs.yml
replicaset.apps/replicaset-example created
[root@server2 ~]# kubectl get rs ##查看rs
NAME DESIRED CURRENT READY AGE
replicaset-example 3 3 1 5s
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 1 9h
liveness-exec 1/1 Running 0 63m
replicaset-example-4hdkg 1/1 Running 0 27s
replicaset-example-85rxq 1/1 Running 0 27s
replicaset-example-n7tp2 1/1 Running 0 27s
[root@server2 ~]# kubectl delete rs replicaset-example
replicaset.apps "replicaset-example" deleted
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 1 9h
liveness-exec 1/1 Running 0 64m
[root@server2 ~]# cat rs.yml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: replicaset-example
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: myapp:v1
2、Deployment控制器
Deployment 为 Pod 和 ReplicaSet 提供了一个申明式的定义方法。
典型的应用场景:
用来创建Pod和ReplicaSet
滚动更新和回滚
扩容和缩容
暂停与恢复
[root@server2 ~]# vim rs.yml
[root@server2 ~]# kubectl apply -f rs.yml
deployment.apps/deployment created
[root@server2 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demo 1/1 Running 1 9h
deployment-6456d7c676-856mr 1/1 Running 0 39s
deployment-6456d7c676-mbx72 1/1 Running 0 39s
deployment-6456d7c676-tt6ld 1/1 Running 0 39s
liveness-exec 1/1 Running 0 71m
[root@server2 ~]# kubectl get pod --show-labels ##显示所有标签
[root@server2 ~]# kubectl label pod deployment-6456d7c676-856mr app=myapp --overwrite
pod/deployment-6456d7c676-856mr labeled
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl get pod --show-labels #控制器副本定义的是3个,通过标签匹配3个副本,标签被改变后会重新创建副本。
[root@server2 ~]# kubectl get pod -L app
[root@server2 ~]# vim rs.yml
image: myapp:v2 #滚动更新
[root@server2 ~]# kubectl apply -f rs.yml
deployment.apps/deployment configured
[root@server2 ~]# kubectl get pod -L app
[root@server2 ~]# kubectl delete pod deployment-6456d7c676-856mr
pod "deployment-6456d7c676-856mr" deleted
[root@server2 ~]# kubectl get pod -L app
[root@server2 ~]# kubectl get rs
NAME DESIRED CURRENT READY AGE
deployment-6456d7c676 0 0 0 10m
deployment-6d4f5bf58f 3 3 3 2m21s
[root@server2 ~]# vim rs.yml
image: myapp:v1
[root@server2 ~]# kubectl apply -f rs.yml
deployment.apps/deployment configured
[root@server2 ~]# kubectl get rs
NAME DESIRED CURRENT READY AGE
deployment-6456d7c676 3 3 3 12m
deployment-6d4f5bf58f 0 0 0 4m38s
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 27d
myservice ClusterIP 10.109.37.8 <none> 80/TCP 73m
[root@server2 ~]# kubectl describe svc myservice
[root@server2 ~]# kubectl delete -f service.yml
service "myservice" deleted
[root@server2 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 27d
[root@server2 ~]# kubectl expose deployment deployment --port=80 ##暴露80端口 service/deployment exposed
[root@server2 ~]# kubectl get svc
[root@server2 ~]# kubectl describe svc deployment ##查看详细信息,有三个pod节点ip
[root@server2 ~]# kubectl svc deployment -o yaml | less
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl get pod -L app
[root@server2 ~]# kubectl label pod deployment-6456d7c676-cbj6j app=myapp --overwrite
pod/deployment-6456d7c676-cbj6j labeled
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl get pod --show-labels #控制器副本定义的是3个,通过标签匹配3个副本,标签被改变后会重新创建副本。
[root@server2 ~]# kubectl get pod -L app
[root@server2 ~]# kubectl edit svc deployment9
app: myapp
[root@server2 ~]# kubectl describe svc deployment #根据标签选择受控主机
[root@server2 ~]# kubectl delete -f rs.yml
[root@server2 ~]# kubectl delete svc deployment
[root@server2 ~]# kubectl get all #查看是否清理干净
[root@server2 ~]# kubectl delete pod/deployment-6456d7c676-cbj6j ##不属于deployment:nginx
3、DaemonSet控制器
DaemonSet 确保全部(或者某些)节点上运行一个 Pod 的副本。当有节点加入集群时, 也会为他们新增一个 Pod 。当有节点从集群移除时,这些 Pod 也会被回收。删除 DaemonSet 将会删除它创建的所有 Pod。
DaemonSet 的典型用法:
在每个节点上运行集群存储 DaemonSet,例如 glusterd、ceph。
在每个节点上运行日志收集 DaemonSet,例如 fluentd、logstash。
在每个节点上运行监控 DaemonSet,例如 Prometheus Node Exporter、zabbix agent等
一个简单的用法是在所有的节点上都启动一个 DaemonSet,将被作为每种类型的 daemon 使用。
一个稍微复杂的用法是单独对每种 daemon 类型使用多个 DaemonSet,但具有不同的标志, 并且对不同硬件类型具有不同的内存、CPU 要求。
[root@server1 harbor]# docker search zabbix-agent
[root@server1 harbor]# docker pull zabbix/zabbix-agent
[root@server1 harbor]# docker tag zabbix/zabbix-agent:latest reg.westos.org/library/zabbix-agent:latest
[root@server1 harbor]# docker push reg.westos.org/library/zabbix-agent:latest
The push refers to repository [reg.westos.org/library/zabbix-agent]
[root@server2 ~]# vim daemonset.yml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: daemonset-example
labels:
k8s-app: zabbix-agent
spec:
selector:
matchLabels:
name: zabbix-agent
template:
metadata:
labels:
name: zabbix-agent
spec:
containers:
- name: zabbix-agent
image: zabbix-agent
[root@server2 ~]# kubectl apply -f daemonset.yml ##执行之后会在每一个节点上都运行一个zabbix
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl get pod -o wide ##发现server3和server4上都运行有zabbix
[root@server2 ~]# kubectl delete -f daemonset.yml
4、Job
执行批处理任务,仅执行一次任务,保证任务的一个或多个Pod成功结束
[root@server1 harbor]# docker pull perl
[root@server1 harbor]# docker tag perl:latest reg.westos.org/library/perl:latest
[root@server1 harbor]# docker push reg.westos.org/library/perl:latest
[root@server2 ~]# vim job.yml
apiVersion: batch/v1
kind: Job
metadata:
name: pi
spec:
template:
spec:
containers:
- name: pi
image: perl ##server1上拉取上传到仓库
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] ##显示pi后面2000位小数
restartPolicy: Never ##不会重启
backoffLimit: 4 ##出错次数限制为4次
[root@server2 ~]# kubectl create -f job.yml
job.batch/pi created
[root@server2 ~]# kubectl get pod
[root@server2 ~]# kubectl describe pod pi-hjkcl
[root@server2 ~]# kubectl logs pi-hjkcl ##查看生成的日志信息
[root@server2 ~]# kubectl delete -f job.yml
5、CronJob
Cron Job 创建基于时间调度的 Jobs。
一个 CronJob 对象就像 crontab (cron table) 文件中的一行,它用 Cron 格式进行编写,并周期性地在给定的调度时间执行 Job。
[root@server2 ~]# vim cronjob.yml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: hello
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busyboxplus
imagePullPolicy: IfNotPresent
args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure
[root@server2 ~]# kubectl create -f cronjob.yml
cronjob.batch/hello created
[root@server2 ~]# kubectl get all
[root@server2 ~]# kubectl get pod ##发生了调度
[root@server2 ~]# kubectl get all
[root@server2 ~]# kubectl logs hello-1614693060-dmcfw
Tue Mar 2 13:51:04 UTC 2021
Hello from the Kubernetes cluster
[root@server2 ~]# kubectl delete -f cronjob.yml ##也可以通过文件删除
cronjob.batch "hello" deleted