BlueXIII's Blog

热爱技术,持续学习

0%

K8S常用API对象

参考文档

https://github.com/chronolaw/k8s_study

Pod

ngx-pod.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
apiVersion: v1
kind: Pod
metadata:
name: ngx-pod
labels:
env: demo
owner: chrono

spec:
containers:
- image: nginx:alpine
name: ngx
ports:
- containerPort: 80
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 创建Pod
kubectl apply -f ngx-pod.yml

# 拷贝文件
kubectl cp a.txt ngx-pod:/tmp

# 进入console
kubectl exec -it ngx-pod -- sh

# 查看日志
kubectl logs busy-pod

# 删除
kubectl delete -f busy-pod.yaml
kubectl delete pod busy-pod

Job

cronjob.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
apiVersion: batch/v1
kind: Job
metadata:
name: echo-job

spec:
template:
spec:
restartPolicy: OnFailure
containers:
- image: busybox
name: echo-job
imagePullPolicy: IfNotPresent
command: ["/bin/echo"]
args: ["hello", "world"]
1
2
3
4
5
6
# 创建Job
kubectl apply -f job.yml

# 查看Pod和Job
kubectl get pod -o wide
kubectl get jobs -o wide

CronJob

cronjob.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
apiVersion: batch/v1
kind: CronJob
metadata:
name: echo-cj

spec:
schedule: '*/1 * * * *'
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- image: busybox
name: echo-cj
imagePullPolicy: IfNotPresent
command: ["/bin/echo"]
args: ["hello", "world"]
1
2
3
4
5
# 创建Job
kubectl apply -f cronjob.yml

# 查看
kubectl get cj

ConfigMap&Secret

cm.yml

1
2
3
4
5
6
7
8
9
10
11
apiVersion: v1
kind: ConfigMap
metadata:
name: info

data:
count: '10'
debug: 'on'
path: '/etc/systemd'
greeting: |
say hello to kubernetes.

secret.yml

1
2
3
4
5
6
7
8
9
apiVersion: v1
kind: Secret
metadata:
name: user

data:
name: cm9vdA==
pwd: MTIzNDU2
db: bXlzcWw=

env-pod.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
apiVersion: v1
kind: Pod
metadata:
name: env-pod

spec:
containers:
- env:
- name: COUNT
valueFrom:
configMapKeyRef:
name: info
key: count
- name: GREETING
valueFrom:
configMapKeyRef:
name: info
key: greeting
- name: USERNAME
valueFrom:
secretKeyRef:
name: user
key: name
- name: PASSWORD
valueFrom:
secretKeyRef:
name: user
key: pwd

image: busybox
name: busy
imagePullPolicy: IfNotPresent
command: ["/bin/sleep", "300"]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# ConfigMap
kubectl apply -f cm.yml
kubectl get configmap
kubectl describe cm info

# Secret
kubectl apply -f secret.yml
kubectl get secret
kubectl describe secret user

# Pod
kubectl apply -f env-pod.yml
kubectl exec -it env-pod -- sh
env

Deployment

deploy.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
apiVersion: v1
kind: ConfigMap
metadata:
name: ngx-conf

data:
default.conf: |
server {
listen 80;
location / {
default_type text/plain;
return 200
'srv : $server_addr:$server_port\nhost: $hostname\nuri : $request_method $host $request_uri\ndate: $time_iso8601\n';
}
}

---

apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-dep
labels:
app: ngx-dep

spec:
replicas: 2
selector:
matchLabels:
app: ngx-dep

template:
metadata:
labels:
app: ngx-dep
spec:
volumes:
- name: ngx-conf-vol
configMap:
name: ngx-conf

containers:
- image: nginx:alpine
name: nginx
ports:
- containerPort: 80

volumeMounts:
- mountPath: /etc/nginx/conf.d
name: ngx-conf-vol
1
2
3
4
5
6
7
8
# 创建
kubectl apply -f deploy.yml
# 伸缩
kubectl scale --replicas=3 deploy ngx-dep
# 查看
kubectl get pod -o wide
kubectl get pod -l app=nginx
kubectl get pod -l 'app in (ngx, nginx, ngx-dep)'

DaemonSet

ds.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: redis-ds
labels:
app: redis-ds

spec:
selector:
matchLabels:
name: redis-ds

template:
metadata:
labels:
name: redis-ds

spec:
containers:
- name: redis5
image: redis:5-alpine
ports:
- containerPort: 6379

tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists
1
2
3
4
5
6
7
# 创建
kubectl apply -f ds.yml
# 查看主节点
kubectl describe node master
# 删除污点(不推荐)
# kubectl taint node master node-role.kubernetes.io/master:NoSchedule
# kubectl taint node master node-role.kubernetes.io/master:NoSchedule-

Service

svc.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: Service
metadata:
name: ngx-svc

spec:
selector:
app: ngx-dep

ports:
- port: 80
protocol: TCP
targetPort: 80

#type: ClusterIP
type: NodePort
1
2
3
4
# 创建
kubectl apply -f svc.yml
# 查看
kubectl describe svc ngx-svc

PV&PVC

host-path-pv.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: PersistentVolume
metadata:
name: host-10m-pv

spec:
storageClassName: host-test

accessModes:
- ReadWriteOnce
capacity:
storage: 10Mi

# mkdir -p /tmp/host-10m-pv/
hostPath:
path: /tmp/host-10m-pv/

host-path-pvc.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: PersistentVolumeClaim

metadata:
name: host-5m-pvc

spec:

storageClassName: host-test

accessModes:
- ReadWriteOnce

resources:
requests:
storage: 5Mi

host-path-pod.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
apiVersion: v1
kind: Pod
metadata:
name: host-pvc-pod
spec:
volumes:
- name: host-pvc-vol
persistentVolumeClaim:
claimName: host-5m-pvc
containers:
- name: ngx-pvc-pod
image: nginx:alpine
ports:
- containerPort: 80
volumeMounts:
- name: host-pvc-vol
mountPath: /tmp
1
2
3
4
5
6
7
8
9
# PV
kubectl apply -f host-path-pv.yml
kubectl get pv
# PVC
kubectl apply -f host-path-pvc.yml
kubectl get pvc
# POD
kubectl apply -f host-path-pod.yml
kubectl get pod -o wide

NFS

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# server端
sudo apt -y install nfs-kernel-server
sudo systemctl start nfs-server
sudo systemctl enable nfs-server
sudo systemctl status nfs-server

mkdir -p /tmp/nfs

sudo vi /etc/exports
/tmp/nfs 192.168.101.0/24(rw,sync,no_subtree_check,no_root_squash,insecure)
sudo exportfs -ra
sudo exportfs -v
showmount -e 127.0.0.1


# client端
sudo apt -y install nfs-common
mkdir -p /tmp/test
sudo mount -t nfs 192.168.101.242:/tmp/nfs /tmp/test

nfs-static-pv.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-1g-pv

spec:

storageClassName: nfs

accessModes:
- ReadWriteMany
capacity:
storage: 1Gi

# you must write the right path
# in nfs server
# mkdir -p /tmp/nfs/1g-pv
nfs:
path: /tmp/nfs/1g-pv
server: 192.168.101.242

---

# pvc
# try to find the most suitable pv
# capacity/accessModes
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-static-pvc

spec:

storageClassName: nfs

accessModes:
- ReadWriteMany

resources:
requests:
storage: 1Gi
#storage: 100Mi

---

# pod
apiVersion: v1
kind: Pod
metadata:
name: nfs-static-pod

spec:

volumes:
- name: nfs-pvc-vol
persistentVolumeClaim:
claimName: nfs-static-pvc

containers:
- name: nfs-pvc-test
image: nginx:alpine
ports:
- containerPort: 80

volumeMounts:
- name: nfs-pvc-vol
mountPath: /tmp
1
2
3
4
kubectl apply -f nfs-static-pv.yml
kubectl get pv
kubectl get pvc
kubectl get pod

NFS Provisioner

安装参考: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

1
2
3
4
5
6
7
8


helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
--set nfs.server=192.168.101.242 \
--set nfs.path=/tmp/nfs

helm uninstall nfs-subdir-external-provisioner

nfs-dynamic-pv.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-dyn-10m-pvc

spec:

# in class.yaml
storageClassName: nfs-client

accessModes:
- ReadWriteMany

resources:
requests:
storage: 10Mi

---

apiVersion: v1
kind: Pod
metadata:
name: nfs-dyn-pod

spec:

volumes:
- name: nfs-dyn-10m-vol
persistentVolumeClaim:
claimName: nfs-dyn-10m-pvc

containers:
- name: nfs-dyn-test
image: nginx:alpine
ports:
- containerPort: 80

volumeMounts:
- name: nfs-dyn-10m-vol
mountPath: /tmp
1
2
3
4
kubectl apply -f nfs-dynamic-pv.yml
kubectl get pv
kubectl get pvc
kubectl get pod

StatefulSet

redis-pv-sts.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-pv-sts

spec:
# headless svc
serviceName: redis-pv-svc

# pvc
volumeClaimTemplates:
- metadata:
name: redis-100m-pvc
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Mi

replicas: 2
selector:
matchLabels:
app: redis-pv-sts

template:
metadata:
labels:
app: redis-pv-sts
spec:
containers:
- image: redis:5-alpine
name: redis
ports:
- containerPort: 6379

volumeMounts:
- name: redis-100m-pvc
mountPath: /data

---

apiVersion: v1
kind: Service
metadata:
name: redis-pv-svc

spec:
selector:
app: redis-pv-sts

# headless
clusterIP: None

ports:
- port: 6379
protocol: TCP
targetPort: 6379
1
2
3
4
5
6
7
8
9
10
11
12
kubectl apply -f redis-pv-sts.yml
kubectl get pvc
kubectl get sts
kubectl get pod

kubectl exec -it redis-pv-sts-0 -- redis-cli
set a 111
set b 222
keys *
quit
kubectl delete pod redis-pv-sts-0

Version

ngx-v1.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
apiVersion: v1
kind: ConfigMap
metadata:
name: ngx-conf

data:
default.conf: |
server {
listen 80;
location / {
default_type text/plain;
return 200
'ver : $nginx_version\nsrv : $server_addr:$server_port\nhost: $hostname\n';
}
}

---

apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-dep
annotations:
kubernetes.io/change-cause: v1, ngx=1.21

spec:
#minReadySeconds: 5

replicas: 4
selector:
matchLabels:
app: ngx-dep

template:
metadata:
labels:
app: ngx-dep
spec:
volumes:
- name: ngx-conf-vol
configMap:
name: ngx-conf

containers:
- image: nginx:1.21-alpine
name: nginx
ports:
- containerPort: 80

volumeMounts:
- mountPath: /etc/nginx/conf.d
name: ngx-conf-vol

---

apiVersion: v1
kind: Service
metadata:
name: ngx-svc

spec:
selector:
app: ngx-dep

ports:
- port: 80
protocol: TCP
targetPort: 80
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 部署V1
kubectl apply -f ngx-v1.yml
kubectl get deployment
kubectl get pod -o wide
# 测试
kubectl port-forward svc/ngx-svc 8080:80 &
curl 127.1:8080
# 更新V2
kubectl apply -f ngx-v2.yml
# 查看更新状态
kubectl rollout status deployment ngx-dep
# 查看日志
kubectl describe deploy ngx-dep
# 查看历史
kubectl rollout history deploy ngx-dep
kubectl rollout history deploy --revision=2
# 回滚
kubectl rollout undo deploy ngx-dep

NameSpace

test-ns.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: Namespace
metadata:
name: test-ns

---

apiVersion: v1
kind: Pod
metadata:
name: ngx
namespace: test-ns
spec:
containers:
- image: nginx:alpine
name: ngx
1
2
3
4
5
kubectl create ns test-ns 
kubectl get ns

kubectl apply -f test-ns.yml
kubectl get pod -n test-ns

ResourceQuota&LimitRange

quota-ns.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
apiVersion: v1
kind: Namespace
metadata:
name: dev-ns

---

apiVersion: v1
kind: ResourceQuota
metadata:
name: dev-qt
namespace: dev-ns

spec:
hard:
requests.cpu: 10
requests.memory: 10Gi
limits.cpu: 10
limits.memory: 20Gi

requests.storage: 100Gi
persistentvolumeclaims: 100

pods: 100
configmaps: 100
secrets: 100
services: 10
services.nodeports: 5

count/jobs.batch: 1
count/cronjobs.batch: 1
count/deployments.apps: 1

---

apiVersion: v1
kind: LimitRange
metadata:
name: dev-limits
namespace: dev-ns

spec:
limits:
- type: Container
defaultRequest:
cpu: 200m
memory: 50Mi
default:
cpu: 500m
memory: 100Mi
- type: Pod
max:
cpu: 800m
memory: 200Mi
1
2
3
4
5
6
7
kubectl apply -f quota-ns.yml

kubectl get quota -n dev-ns
kubectl describe quota -n dev-ns

kubectl get limitranges -n dev-ns
kubectl describe limitranges -n dev-ns