BlueXIII's Blog

热爱技术,持续学习

0%

参考

Helm方式部署

cert-manager安装

镜像清单:

1
2
3
4
5
quay.io/jetstack/cert-manager-acmesolver:v1.11.0
quay.io/jetstack/cert-manager-cainjector:v1.11.0
quay.io/jetstack/cert-manager-controller:v1.11.0
quay.io/jetstack/cert-manager-ctl:v1.11.0
quay.io/jetstack/cert-manager-webhook:v1.11.0

安装:

1
2
3
4
5
6
7
8
# 安装crd
wget https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.crds.yaml
kubectl apply -f cert-manager.crds.yaml

# 安装chart
helm repo add jetstack https://charts.jetstack.io
helm pull jetstack/cert-manager --version=v1.11.0
helm install cert-manager ./cert-manager --namespace cert-manager

rancher安装

镜像清单:

1
2
3
4
5
6
rancher/rancher:v2.7.3
rancher/shell:v0.1.19
rancher/gitjob:v0.1.37
rancher/rancher-webhook:v0.3.3
rancher/fleet:v0.6.0
rancher/fleet-agent:v0.6.0

安装:

1
2
3
4
5
6
7
8
9
10
11
12
# helm安装
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
helm search repo rancher-stable/rancher --versions # 2.7.3
helm pull rancher-stable/rancher --version=2.7.3
helm install rancher ./rancher \
--namespace cattle-system \
--set hostname=rancher.dubhe \
--set bootstrapPassword=admin

# 查看初始密码
echo https://rancher.dubhe/dashboard/?setup=$(kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}')
kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ "\n" }}'

MySQL

https://artifacthub.io/packages/helm/bitnami/mysql

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# 添加仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
# 拉取chart(可选)
helm pull bitnami/mysql --version 9.9.1

# 离线安装
dockemon chart bitnami mysql 9.9.1
helm install mysql ./mysql --namespace mysql --create-namespace
helm status mysql -n mysql

# DNS
mysql.mysql.svc.cluster.local:3306
# 查看root用户名
MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace mysql mysql -o jsonpath="{.data.mysql-root-password}" | base64 -d)
echo $MYSQL_ROOT_PASSWORD # rVrmiKcXOo
# 运行客户端
kubectl run mysql-client --rm --tty -i --restart='Never' --image docker.io/bitnami/mysql:8.0.33-debian-11-r7 --namespace mysql --env MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD --command -- bash
mysql -h mysql.mysql.svc.cluster.local -uroot -p"$MYSQL_ROOT_PASSWORD"
# 本机port-forward
kubectl port-forward --namespace mysql svc/mysql 3306:3306

Redis

https://artifacthub.io/packages/helm/bitnami/redis

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# 添加仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
# 查看所有版本号
helm search repo bitnami/redis --versions --devel
# 拉取chart(可选)
helm pull bitnami/redis --version 17.10.3

# 离线安装
dockemon chart bitnami redis 17.10.3
helm install redis ./redis --namespace redis --create-namespace
helm status redis -n redis

# DNS
redis-master.redis.svc.cluster.local for read/write operations (port 6379)
redis-replicas.redis.svc.cluster.local for read-only operations (port 6379)
# 查看密码
export REDIS_PASSWORD=$(kubectl get secret --namespace redis redis -o jsonpath="{.data.redis-password}" | base64 -d)
echo $REDIS_PASSWORD # 4BEkvq5vdW
# 运行Client
kubectl run --namespace redis redis-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image docker.io/bitnami/redis:7.0.11-debian-11-r7 --command -- sleep infinity
kubectl exec --tty -i redis-client --namespace redis -- bash
REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h redis-master
# 本机port-forward
kubectl port-forward --namespace redis svc/redis-master 6379:6379
REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h 127.0.0.1 -p 6379

Zookeeper

https://artifacthub.io/packages/helm/bitnami/zookeeper

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# 添加仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
# 查看所有版本号
helm search repo bitnami/zookeeper --versions --devel
# 拉取chart(可选)
helm pull bitnami/zookeeper --version 11.3.2

# 离线安装
dockemon chart bitnami zookeeper 11.3.2
helm install zookeeper ./zookeeper --namespace zookeeper --create-namespace
helm status zookeeper -n zookeeper

# DNS
zookeeper.zookeeper.svc.cluster.local
# 客户端
export POD_NAME=$(kubectl get pods --namespace zookeeper -l "app.kubernetes.io/name=zookeeper,app.kubernetes.io/instance=zookeeper,app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}")
echo $POD_NAME
kubectl exec -it --namespace zookeeper $POD_NAME -- zkCli.sh
# 本机port-forward
kubectl port-forward --namespace zookeeper svc/zookeeper 2181:2181
zkCli.sh 127.0.0.1:2181

Minio

https://helm.min.io/

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# 添加仓库
helm repo add minio https://helm.min.io/
helm update
# 查看所有版本号
helm search repo minio/minio --versions --devel
# 拉取chart(可选)
helm pull minio/minio --version 8.0.10

# 离线安装
dockemon chart minio minio 8.0.10
helm install minio ./minio --namespace minio --create-namespace
helm status minio -n minio

# 本机portforward
export POD_NAME=$(kubectl get pods --namespace minio -l "release=minio" -o jsonpath="{.items[0].metadata.name}")
echo $POD_NAME
kubectl port-forward $POD_NAME 9000 --namespace minio
# 查看access_key/secret_key
ACCESS_KEY=$(kubectl get secret minio --namespace minio -o jsonpath="{.data.accesskey}" | base64 --decode)
SECRET_KEY=$(kubectl get secret minio --namespace minio -o jsonpath="{.data.secretkey}" | base64 --decode)
echo $ACCESS_KEY $SECRET_KEY

Nginx

https://artifacthub.io/packages/helm/bitnami/nginx

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 添加仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
# 查看所有版本号
helm search repo bitnami/nginx --versions --devel
# 拉取chart(可选)
helm pull bitnami/nginx --version 14.2.1

# 离线安装
dockemon chart bitnami nginx 14.2.1
helm install nginx ./nginx --namespace nginx --create-namespace
helm status nginx -n nginx


export SERVICE_PORT=$(kubectl get --namespace nginx -o jsonpath="{.spec.ports[0].port}" services nginx)
export SERVICE_IP=$(kubectl get svc --namespace nginx nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo "http://${SERVICE_IP}:${SERVICE_PORT}"

Neo4j

https://github.com/neo4j-contrib/neo4j-helm
https://artifacthub.io/packages/helm/neo4j-helm-charts/neo4j

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 编辑镜像清单
dockemon edit
neo4j:4.4.10-enterprise
gcr.io/neo4j-helm/restore:4.1.0-1

# 推送离线镜像
dockemon image

# 手工下载Chart
https://github.com/neo4j-contrib/neo4j-helm/releases
tar -zxvf neo4j-4.4.10.1.tgz & cd neo4j
# 注意修改values.yaml,pvc.size改小

# Chart安装
helm install neo4j ./neo4j --set core.standalone=true --set acceptLicenseAgreement=yes --set neo4jPassword=abcd1234 --namespace neo4j --create-namespace

参考

版本选择

1.4.1

镜像列表

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
longhornio/backing-image-manager:v1.4.2
longhornio/csi-attacher:v3.4.0
longhornio/csi-node-driver-registrar:v2.7.0
longhornio/csi-provisioner:v2.1.2
longhornio/csi-provisioner:v3.4.1
longhornio/csi-resizer:v1.3.0
longhornio/csi-snapshotter:v5.0.1
longhornio/livenessprobe:v2.8.0
longhornio/livenessprobe:v2.9.0
longhornio/longhorn-engine:v1.4.2
longhornio/longhorn-instance-manager:v1.4.2
longhornio/longhorn-manager:v1.4.2
longhornio/longhorn-share-manager:v1.4.2
longhornio/longhorn-ui:v1.4.2
longhornio/support-bundle-kit:v0.0.24

安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# helm下载
helm repo add longhorn https://charts.longhorn.io
helm repo update
helm pull longhorn/longhorn

# 参数调整
defaultClassReplicaCount: 1
defaultDataLocality: best-effort # best-effort otherwise

# 安装
helm install longhorn ./longhorn_chart --namespace longhorn --create-namespace
# 更新
helm upgrade longhorn ./longhorn_chart --namespace longhorn --create-namespace
# 删除
helm uninstall longhorn -n longhorn

添加自定义StorageClass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-retain
annotations:
storageclass.kubernetes.io/is-default-class: 'true'
storageclass.kubesphere.io/allow-clone: 'true'
storageclass.kubesphere.io/allow-snapshot: 'true'
provisioner: driver.longhorn.io
parameters:
dataLocality: best-effort
fromBackup: ''
fsType: ext4
numberOfReplicas: '2'
staleReplicaTimeout: '30'
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: Immediate

参考文档

https://kubesphere.io/zh/docs/v3.3/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/

版本选择

  • Kubernetes: v1.24.13+k3s1
  • KubeSphere: v3.3.1

导入镜像

使用dockemon工具将镜像离线导入Harbor

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
kubesphere/ks-installer:v3.3.1
kubesphere/ks-apiserver:v3.3.1
kubesphere/ks-console:v3.3.1
kubesphere/ks-controller-manager:v3.3.1
kubesphere/ks-upgrade:v3.3.1
kubesphere/kubectl:v1.22.0
kubesphere/kubectl:v1.21.0
kubesphere/kubectl:v1.20.0
kubesphere/kubefed:v0.8.1
kubesphere/tower:v0.2.0
minio/minio:RELEASE.2019-08-07T01-59-21Z
minio/mc:RELEASE.2019-08-07T23-14-43Z
csiplugin/snapshot-controller:v4.0.0
kubesphere/nginx-ingress-controller:v1.1.0
mirrorgooglecontainers/defaultbackend-amd64:1.4
kubesphere/metrics-server:v0.4.2
redis:5.0.14-alpine
haproxy:2.0.25-alpine
alpine:3.14
osixia/openldap:1.3.0
kubesphere/netshoot:v1.0
jimmidyson/configmap-reload:v0.5.0
prom/prometheus:v2.34.0
kubesphere/prometheus-config-reloader:v0.55.1
kubesphere/prometheus-operator:v0.55.1
kubesphere/kube-rbac-proxy:v0.11.0
kubesphere/kube-state-metrics:v2.5.0
prom/node-exporter:v1.3.1
prom/alertmanager:v0.23.0
thanosio/thanos:v0.25.2
grafana/grafana:8.3.3
kubesphere/kube-rbac-proxy:v0.8.0
kubesphere/notification-manager-operator:v1.4.0
kubesphere/notification-manager:v1.4.0
kubesphere/notification-tenant-sidecar:v3.2.0
kubesphere/elasticsearch-curator:v5.7.6
kubesphere/elasticsearch-oss:6.8.22
kubesphere/fluentbit-operator:v0.13.0
docker:19.03
kubesphere/fluent-bit:v1.8.11
kubesphere/log-sidecar-injector:1.1
elastic/filebeat:6.7.0
kubesphere/kube-events-operator:v0.4.0
kubesphere/kube-events-exporter:v0.4.0
kubesphere/kube-events-ruler:v0.4.0
kubesphere/kube-auditing-operator:v0.2.0
kubesphere/kube-auditing-webhook:v0.2.0
kubesphere/devops-tools:v3.3.1
kubesphere/devops-controller:v3.3.1
kubesphere/devops-apiserver:v3.3.1
kubesphere/ks-jenkins:v3.3.0-2.319.1
kubesphere/s2ioperator:v3.2.1
redis:6.2.6-alpine
quay.io/argoproj/argocd:v2.3.3
quay.io/argoproj/argocd-applicationset:v0.4.1
ghcr.io/dexidp/dex:v2.30.2
kubesphere/builder-nodejs:v3.2.0-podman
kubesphere/builder-maven:v3.2.0-podman
jenkins/inbound-agent:4.10-2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
harbor.dubhe:30002/library/alpine:3.14
harbor.dubhe:30002/library/redis:5.0.14-alpine
harbor.dubhe:30002/kubesphere/ks-console:v3.3.1
harbor.dubhe:30002/kubesphere/ks-controller-manager:v3.3.1
harbor.dubhe:30002/kubesphere/ks-installer:v3.3.1
harbor.dubhe:30002/kubesphere/ks-apiserver:v3.3.1
harbor.dubhe:30002/kubesphere/ks-upgrade:v3.3.1
harbor.dubhe:30002/kubesphere/kube-state-metrics:v2.5.0
harbor.dubhe:30002/kubesphere/fluent-bit:v1.8.11
harbor.dubhe:30002/kubesphere/prometheus-config-reloader:v0.55.1
harbor.dubhe:30002/kubesphere/prometheus-operator:v0.55.1
harbor.dubhe:30002/thanosio/thanos:v0.25.2
harbor.dubhe:30002/prom/prometheus:v2.34.0
harbor.dubhe:30002/kubesphere/fluentbit-operator:v0.13.0
harbor.dubhe:30002/kubesphere/kube-events-ruler:v0.4.0
harbor.dubhe:30002/kubesphere/kube-events-operator:v0.4.0
harbor.dubhe:30002/kubesphere/kube-events-exporter:v0.4.0
harbor.dubhe:30002/kubesphere/elasticsearch-oss:6.8.22
harbor.dubhe:30002/grafana/grafana:8.3.3
harbor.dubhe:30002/prom/node-exporter:v1.3.1
harbor.dubhe:30002/library/haproxy:2.0.25-alpine
harbor.dubhe:30002/kubesphere/nginx-ingress-controller:v1.1.0
harbor.dubhe:30002/kubesphere/kubectl:v1.22.0
harbor.dubhe:30002/kubesphere/kubectl:v1.21.0
harbor.dubhe:30002/kubesphere/notification-manager:v1.4.0
harbor.dubhe:30002/kubesphere/notification-tenant-sidecar:v3.2.0
harbor.dubhe:30002/kubesphere/notification-manager-operator:v1.4.0
harbor.dubhe:30002/kubesphere/kubefed:v0.8.1
harbor.dubhe:30002/prom/alertmanager:v0.23.0
harbor.dubhe:30002/kubesphere/kube-auditing-operator:v0.2.0
harbor.dubhe:30002/kubesphere/kube-auditing-webhook:v0.2.0
harbor.dubhe:30002/kubesphere/kube-rbac-proxy:v0.11.0
harbor.dubhe:30002/kubesphere/kubectl:v1.20.0
harbor.dubhe:30002/kubesphere/tower:v0.2.0
harbor.dubhe:30002/library/docker:19.03
harbor.dubhe:30002/kubesphere/metrics-server:v0.4.2
harbor.dubhe:30002/jimmidyson/configmap-reload:v0.5.0
harbor.dubhe:30002/csiplugin/snapshot-controller:v4.0.0
harbor.dubhe:30002/kubesphere/kube-rbac-proxy:v0.8.0
harbor.dubhe:30002/kubesphere/log-sidecar-injector:1.1
harbor.dubhe:30002/osixia/openldap:1.3.0
harbor.dubhe:30002/kubesphere/elasticsearch-curator:v5.7.6
harbor.dubhe:30002/minio/mc:RELEASE.2019-08-07T23-14-43Z
harbor.dubhe:30002/minio/minio:RELEASE.2019-08-07T01-59-21Z
harbor.dubhe:30002/elastic/filebeat:6.7.0
harbor.dubhe:30002/kubesphere/netshoot:v1.0
harbor.dubhe:30002/mirrorgooglecontainers/defaultbackend-amd64:1.4

部署

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 下载安装工具
curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.1/offline-installation-tool.sh

# 下载部署脚本
curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.1/cluster-configuration.yaml
curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.3.1/kubesphere-installer.yaml

# 修改为本地仓库(可选项)
vi cluster-configuration.yaml
spec.local_registry: "harbor.dubhe:30002"
sed -i "s#^\s*image: kubesphere.*/ks-installer:.*# image: harbor.dubhe:30002/kubesphere/ks-installer:v3.0.0#" kubesphere-installer.yaml

# 开始安装
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml

参考

https://docs.pingcap.com/zh/tidb-in-kubernetes/v1.5/prerequisites

镜像列表

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
alpine:3.16.0
bitnami/kubectl:latest
busybox:1.26.2
grafana/grafana:6.0.1
grafana/grafana:7.5.7
k8s.gcr.io/kube-scheduler:v1.24.13
pingcap/advanced-statefulset:v0.3.3
pingcap/advanced-statefulset:v0.4.0
pingcap/ng-monitoring:v6.5.0
pingcap/pd:v6.5.0
pingcap/ticdc:v6.5.0
pingcap/tidb-backup-manager:v1.5.0-beta.1
pingcap/tidb-binlog:v6.5.0
pingcap/tidb-dashboard:v6.5.0
pingcap/tidb-monitor-initializer:v6.5.0
pingcap/tidb-monitor-reloader:v1.0.1
pingcap/tidb-operator:v1.5.0-beta.1
pingcap/tidb:v6.5.0
pingcap/tiflash:v6.5.0
pingcap/tikv:v6.5.0
prom/prometheus:v2.18.1
quay.io/prometheus-operator/prometheus-config-reloader:v0.49.0
tnir/mysqlclient:latest

注意手工处理一下kube-scheduler镜像,将其移动到默认库中。并同时修改TidbOperator的values.yaml:

1
2
nerdctl tag harbor.dubhe:30002/k8s.gcr.io/kube-scheduler:v1.24.13 harbor.dubhe:30002/library/kube-scheduler:v1.24.13
nerdctl push harbor.dubhe:30002/library/kube-scheduler:v1.24.13

使用Helm安装tidb-operator

1
2
3
4
wget http://charts.pingcap.org/tidb-operator-v1.5.0-beta.1.tgz # 下载HelmRepo
tar -zxvf tidb-operator-v1.5.0-beta.1.tgz

helm install tidb-operator ./tidb-operator --namespace=tidb-admin --create-namespace

部署

yaml示例:
https://github.com/pingcap/tidb-operator/blob/master/examples/advanced/tidb-cluster.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 创建CRD
kubectl create -f ./crd.yaml

# 修改tidb-cluster.yaml
name: tidb-dubhe
namespace: default # 注释掉
requests: # 限制CPU/MEM,可选
storageClassName: # 可以默认不填或写成"local-path"
externalTrafficPolicy: # Local改为Cluster,可选
mysqlNodePort: 30550 # 取消注释

# 部署tidb
kubectl create namespace tidb
kubectl apply -f tidb-cluster.yaml -n tidb

初始化

yaml示例:
https://github.com/pingcap/tidb-operator/blob/master/manifests/initializer/tidb-initializer.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 修改tidb-initializer.yaml
apiVersion: pingcap.com/v1alpha1
kind: TidbInitializer
metadata:
name: tidb-dubhe-init
# namespace: demo
spec:
image: tnir/mysqlclient
cluster:
# namespace: demo
name: tidb-dubhe
initSql: |-
create database app;
passwordSecret: tidb-secret

# 创建secret
kubectl create secret generic tidb-secret --from-literal=root=yourpass --namespace=tidb

# 初始化tidb
kubectl apply -f tidb-initializer.yaml -n tidb

# 访问tidb
kubectl get svc -n tidb
mysql://10.193.35.11:300550
root/yourpass

安装monitoring

yaml示例:

1
2
3
4
kubectl apply -f tidb-monitor.yaml -n tidb

# Console
http://10.193.35.11:30552

安装dashboard

1
2
3
4
5
kubectl apply -f tidb-dashboard.yaml -n tidb
kubectl apply -f tidb-ng-monitoring.yaml -n tidb

# Console
http://10.193.35.11:30551/dashboard

简介

DOCKEMON是一个Docker镜像工具包,用于简化离线K8S环境下的镜像的拉取、打标、推送、导出导入等批量操作

同时支持一键安装Chart一键推送镜像等快捷操作

GitHub下载地址

https://github.com/xiiiblue/dockemon

系统要求

  • 推荐macOS、Linux下的bash或zsh环境,Windows下的mobaXterm未经测试
  • 涉及镜像相关操作,需要安装DockerDesktop或Nerdctl
  • 涉及Chart相关操作,需要安装Helm

安装

dockemon-installer.sh安装脚本下载到本机或服务器,并执行:

1
chmod +x dockemon-installer.sh & ./dockemon-installer.sh

安装完成后,在任意路径下执行 dockemon 即可启动程序

配置

  1. 执行dockemon help查看帮助信息
  2. 执行dockemon conf配置Harbor地址等常用参数(可选)
  3. 执行dockemon edit配置要处理的镜像清单,dockemon show查看清单(可选)

常用功能

一键安装Chart

一键安装Chart。包含: 下载Chart->分析镜像清单->拉取镜像->打标->建仓库->推送镜像

1
dockemon chart [仓库名称] [Chart名称] [版本号]  

示例:

1
dockemon chart bitnami redis 17.10.3

一键推送镜像

一键推送镜像。包含: 拉取镜像->打标->建仓库->推送镜像。如果不传入镜像名,则批量处理镜像清单

1
dockemon image [镜像名]

示例:

1
dockemon image nginx:alpine

其它批量操作

1
2
3
4
5
6
7
8
9
dockemon pull  # 拉取镜像
dockemon tag # 打标签
dockemon tag_group [仓库名] # 打标签归到同一个仓库,必须传入一个仓库名
dockemon push # 推送镜像
dockemon save # 导出镜像
dockemon load # 导入镜像
dockemon login # 登录HARBOR
dockemon repo # 创建HARBOR仓库
dockemon values # 分析helm配置文件并生成镜像清单

注意事项

  1. Helm操作在本机联网环境下远程执行,请确保~/.kube/config配置正确,kubectl get nodes能连接到集群

  2. 安装Chart前需要先添加好仓库并更新,示例:

    1
    2
    helm repo add bitnami https://charts.bitnami.com/bitnami
    helm repo update
  3. Harbor仓库如果使用域名连接,请先配置好/etc/hosts的指向

  4. 如果无法通过VPN连接到离线集群(例如隔着堡垒机),则只能分步操作上传镜像,大体步骤为:

    1
    pull -> tag -> save -> 手工上传 -> load

Docker二进制方式离线

1
2
3
4
wget https://download.docker.com/linux/static/stable/x86_64/docker-23.0.4.tgz
tar -zxvf docker-23.0.4.tgz
cp docker/* /usr/bin/
dockerd &

DockerUbuntu下在线安装

1
2
3
4
5
sudo apt install -y docker.io
sudo gpasswd -a $USER docker
newgrp docker
docker version
docker info

DockerCompose离线安装

1
2
3
4
# 下载
wget https://github.com/docker/compose/releases/download/v2.17.3/docker-compose-linux-x86_64
sudo cp docker-compose-linux-x86_64 /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose

参考

docker下containerd配置信息

1
2
3
4
5
6
7
8
9
10
# address
/var/run/docker/containerd/containerd.sock
# namespace
moby
# 手动配置路径
/etc/docker
# 自动生成的配置路径
/var/run/docker/containerd/containerd.toml
# ctr访问
ctr --address /var/run/docker/containerd/containerd.sock --namespace moby c ls

k3s下containerd配置信息

1
2
3
4
5
6
7
8
9
10
# address
/run/k3s/containerd/containerd.sock
# namespace
k8s.io
# 手动配置路径
cat /etc/rancher/k3s/registries.yaml
# 自动生成配置路径
cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml
# ctr访问
ctr --address /run/k3s/containerd/containerd.sock --namespace k8s.io c ls

ctr常用管理命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# k3s下的alias配置
alias ctr="ctr --address /run/k3s/containerd/containerd.sock --namespace k8s.io"

# 查看命名空间
ctr ns ls

# 查看镜像
ctr i ls
# 拉取镜像
ctr i pull docker.io/library/nginx:1.21
# 导入镜像
ctr i import image.tar
# 删除镜像
ctr i remove docker.io/library/nginx:1.21

# 运行(本质上是容器创建+任务启动)
ctr run --rm -t docker.io/library/debian:latest cont1

# 创建容器
ctr c create -t docker.io/library/nginx:latest nginx_1
# 查看容器
ctr c ls
# 创建任务
ctr t start -d nginx_1
# 查看任务
ctr t ls
# 在容器中执行一个任务
ctr t exec -t --exec-id bash_1 nginx_1 bash

# 停止任务
ctr t kill -9 nginx_1
ctr t rm -f nginx_1
# 删除容器
ctr c rm nginx_1

crictl常用管理命令

1
2
3
4
5
6
7
8
9
10
11
12
# 查看pod
crictl pods
# 查看镜像
crictl images
# 查看容器
crictl ps -a
# 在容器中执行
crictl exec -it 21c8007a90eea ls
# 查看容器日志
crictl logs -f 21c8007a90eea
# 拉取镜像
crictl pull busybox

nerdctl常用管理命令

1
2
3
4
# k3s下alias配置
alias nerdctl='nerdctl --host=/run/k3s/containerd/containerd.sock --namespace k8s.io --insecure-registry'

# 基本兼容docker cmd,直接使用 nerdctl --help 查看帮助即可

参考文档

https://helm.sh/zh/docs/intro/install/

中心仓库

https://artifacthub.io/

离线安装

https://github.com/helm/helm/releases

1
2
tar -zxvf helm-v3.11.3-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm

常用操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# 添加私有仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
helm update

# 查看仓库列表
helm repo list

# 在ArtifactHub中查找
helm search hub wordpress

# 查找所有版本号
helm search repo bitnami/redis --versions --devel

# 添加三方仓库并查找
helm repo add brigade https://brigadecore.github.io/charts
helm search repo brigade

# 安装
## 在线安装
helm install happy-panda bitnami/wordpress
## 本地安装,并指定命名空间
helm install redis ./redis --namespace redis --create-namespace

# 查看安装状态
helm status happy-panda
helm status redis --namespace redis

# 升级
helm upgrade -f panda.yaml happy-panda bitnami/wordpress
helm upgrade redis ./redis

# 回滚
helm status redis --namespace redis

# 卸载
helm uninstall redis --namespace redis

3.8以下通过cm-push插件推送

1
2
3
4
5
6
7
8
9
# 安装cm-push插件,3.8以上虽然自带push命令,但无法实现向http仓库推送
helm plugin install https://github.com/chartmuseum/helm-push

# 添加HarborHelm仓库
helm repo add dubhe http://harbor.dubhe:30002/chartrepo/dubhe
helm repo update

# 上传chart
helm cm-push ./gogs.tgz dubhe --username admin --password yourpass

3.8以上支持OCI规范直接推送

https://helm.sh/zh/docs/helm/helm_push/
https://helm.sh/zh/docs/topics/registries/
https://github.com/helm/helm/pull/11599
https://github.com/helm/helm/issues/11683
https://github.com/helm/helm/issues/6324

1
2
helm registry login -u admin harbor.dubhe:30002 --insecure
helm push dubhe-register.tgz oci://harbor.dubhe:30003/dubhe-chart # 此方式不支持http

参考文档

https://github.com/chronolaw/k8s_study

Pod

ngx-pod.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
apiVersion: v1
kind: Pod
metadata:
name: ngx-pod
labels:
env: demo
owner: chrono

spec:
containers:
- image: nginx:alpine
name: ngx
ports:
- containerPort: 80
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 创建Pod
kubectl apply -f ngx-pod.yml

# 拷贝文件
kubectl cp a.txt ngx-pod:/tmp

# 进入console
kubectl exec -it ngx-pod -- sh

# 查看日志
kubectl logs busy-pod

# 删除
kubectl delete -f busy-pod.yaml
kubectl delete pod busy-pod

Job

cronjob.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
apiVersion: batch/v1
kind: Job
metadata:
name: echo-job

spec:
template:
spec:
restartPolicy: OnFailure
containers:
- image: busybox
name: echo-job
imagePullPolicy: IfNotPresent
command: ["/bin/echo"]
args: ["hello", "world"]
1
2
3
4
5
6
# 创建Job
kubectl apply -f job.yml

# 查看Pod和Job
kubectl get pod -o wide
kubectl get jobs -o wide

CronJob

cronjob.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
apiVersion: batch/v1
kind: CronJob
metadata:
name: echo-cj

spec:
schedule: '*/1 * * * *'
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- image: busybox
name: echo-cj
imagePullPolicy: IfNotPresent
command: ["/bin/echo"]
args: ["hello", "world"]
1
2
3
4
5
# 创建Job
kubectl apply -f cronjob.yml

# 查看
kubectl get cj

ConfigMap&Secret

cm.yml

1
2
3
4
5
6
7
8
9
10
11
apiVersion: v1
kind: ConfigMap
metadata:
name: info

data:
count: '10'
debug: 'on'
path: '/etc/systemd'
greeting: |
say hello to kubernetes.

secret.yml

1
2
3
4
5
6
7
8
9
apiVersion: v1
kind: Secret
metadata:
name: user

data:
name: cm9vdA==
pwd: MTIzNDU2
db: bXlzcWw=

env-pod.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
apiVersion: v1
kind: Pod
metadata:
name: env-pod

spec:
containers:
- env:
- name: COUNT
valueFrom:
configMapKeyRef:
name: info
key: count
- name: GREETING
valueFrom:
configMapKeyRef:
name: info
key: greeting
- name: USERNAME
valueFrom:
secretKeyRef:
name: user
key: name
- name: PASSWORD
valueFrom:
secretKeyRef:
name: user
key: pwd

image: busybox
name: busy
imagePullPolicy: IfNotPresent
command: ["/bin/sleep", "300"]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# ConfigMap
kubectl apply -f cm.yml
kubectl get configmap
kubectl describe cm info

# Secret
kubectl apply -f secret.yml
kubectl get secret
kubectl describe secret user

# Pod
kubectl apply -f env-pod.yml
kubectl exec -it env-pod -- sh
env

Deployment

deploy.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
apiVersion: v1
kind: ConfigMap
metadata:
name: ngx-conf

data:
default.conf: |
server {
listen 80;
location / {
default_type text/plain;
return 200
'srv : $server_addr:$server_port\nhost: $hostname\nuri : $request_method $host $request_uri\ndate: $time_iso8601\n';
}
}

---

apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-dep
labels:
app: ngx-dep

spec:
replicas: 2
selector:
matchLabels:
app: ngx-dep

template:
metadata:
labels:
app: ngx-dep
spec:
volumes:
- name: ngx-conf-vol
configMap:
name: ngx-conf

containers:
- image: nginx:alpine
name: nginx
ports:
- containerPort: 80

volumeMounts:
- mountPath: /etc/nginx/conf.d
name: ngx-conf-vol
1
2
3
4
5
6
7
8
# 创建
kubectl apply -f deploy.yml
# 伸缩
kubectl scale --replicas=3 deploy ngx-dep
# 查看
kubectl get pod -o wide
kubectl get pod -l app=nginx
kubectl get pod -l 'app in (ngx, nginx, ngx-dep)'

DaemonSet

ds.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: redis-ds
labels:
app: redis-ds

spec:
selector:
matchLabels:
name: redis-ds

template:
metadata:
labels:
name: redis-ds

spec:
containers:
- name: redis5
image: redis:5-alpine
ports:
- containerPort: 6379

tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists
1
2
3
4
5
6
7
# 创建
kubectl apply -f ds.yml
# 查看主节点
kubectl describe node master
# 删除污点(不推荐)
# kubectl taint node master node-role.kubernetes.io/master:NoSchedule
# kubectl taint node master node-role.kubernetes.io/master:NoSchedule-

Service

svc.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: Service
metadata:
name: ngx-svc

spec:
selector:
app: ngx-dep

ports:
- port: 80
protocol: TCP
targetPort: 80

#type: ClusterIP
type: NodePort
1
2
3
4
# 创建
kubectl apply -f svc.yml
# 查看
kubectl describe svc ngx-svc

PV&PVC

host-path-pv.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: PersistentVolume
metadata:
name: host-10m-pv

spec:
storageClassName: host-test

accessModes:
- ReadWriteOnce
capacity:
storage: 10Mi

# mkdir -p /tmp/host-10m-pv/
hostPath:
path: /tmp/host-10m-pv/

host-path-pvc.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: PersistentVolumeClaim

metadata:
name: host-5m-pvc

spec:

storageClassName: host-test

accessModes:
- ReadWriteOnce

resources:
requests:
storage: 5Mi

host-path-pod.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
apiVersion: v1
kind: Pod
metadata:
name: host-pvc-pod
spec:
volumes:
- name: host-pvc-vol
persistentVolumeClaim:
claimName: host-5m-pvc
containers:
- name: ngx-pvc-pod
image: nginx:alpine
ports:
- containerPort: 80
volumeMounts:
- name: host-pvc-vol
mountPath: /tmp
1
2
3
4
5
6
7
8
9
# PV
kubectl apply -f host-path-pv.yml
kubectl get pv
# PVC
kubectl apply -f host-path-pvc.yml
kubectl get pvc
# POD
kubectl apply -f host-path-pod.yml
kubectl get pod -o wide

NFS

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# server端
sudo apt -y install nfs-kernel-server
sudo systemctl start nfs-server
sudo systemctl enable nfs-server
sudo systemctl status nfs-server

mkdir -p /tmp/nfs

sudo vi /etc/exports
/tmp/nfs 192.168.101.0/24(rw,sync,no_subtree_check,no_root_squash,insecure)
sudo exportfs -ra
sudo exportfs -v
showmount -e 127.0.0.1


# client端
sudo apt -y install nfs-common
mkdir -p /tmp/test
sudo mount -t nfs 192.168.101.242:/tmp/nfs /tmp/test

nfs-static-pv.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-1g-pv

spec:

storageClassName: nfs

accessModes:
- ReadWriteMany
capacity:
storage: 1Gi

# you must write the right path
# in nfs server
# mkdir -p /tmp/nfs/1g-pv
nfs:
path: /tmp/nfs/1g-pv
server: 192.168.101.242

---

# pvc
# try to find the most suitable pv
# capacity/accessModes
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-static-pvc

spec:

storageClassName: nfs

accessModes:
- ReadWriteMany

resources:
requests:
storage: 1Gi
#storage: 100Mi

---

# pod
apiVersion: v1
kind: Pod
metadata:
name: nfs-static-pod

spec:

volumes:
- name: nfs-pvc-vol
persistentVolumeClaim:
claimName: nfs-static-pvc

containers:
- name: nfs-pvc-test
image: nginx:alpine
ports:
- containerPort: 80

volumeMounts:
- name: nfs-pvc-vol
mountPath: /tmp
1
2
3
4
kubectl apply -f nfs-static-pv.yml
kubectl get pv
kubectl get pvc
kubectl get pod

NFS Provisioner

安装参考: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

1
2
3
4
5
6
7
8


helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
--set nfs.server=192.168.101.242 \
--set nfs.path=/tmp/nfs

helm uninstall nfs-subdir-external-provisioner

nfs-dynamic-pv.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-dyn-10m-pvc

spec:

# in class.yaml
storageClassName: nfs-client

accessModes:
- ReadWriteMany

resources:
requests:
storage: 10Mi

---

apiVersion: v1
kind: Pod
metadata:
name: nfs-dyn-pod

spec:

volumes:
- name: nfs-dyn-10m-vol
persistentVolumeClaim:
claimName: nfs-dyn-10m-pvc

containers:
- name: nfs-dyn-test
image: nginx:alpine
ports:
- containerPort: 80

volumeMounts:
- name: nfs-dyn-10m-vol
mountPath: /tmp
1
2
3
4
kubectl apply -f nfs-dynamic-pv.yml
kubectl get pv
kubectl get pvc
kubectl get pod

StatefulSet

redis-pv-sts.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-pv-sts

spec:
# headless svc
serviceName: redis-pv-svc

# pvc
volumeClaimTemplates:
- metadata:
name: redis-100m-pvc
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Mi

replicas: 2
selector:
matchLabels:
app: redis-pv-sts

template:
metadata:
labels:
app: redis-pv-sts
spec:
containers:
- image: redis:5-alpine
name: redis
ports:
- containerPort: 6379

volumeMounts:
- name: redis-100m-pvc
mountPath: /data

---

apiVersion: v1
kind: Service
metadata:
name: redis-pv-svc

spec:
selector:
app: redis-pv-sts

# headless
clusterIP: None

ports:
- port: 6379
protocol: TCP
targetPort: 6379
1
2
3
4
5
6
7
8
9
10
11
12
kubectl apply -f redis-pv-sts.yml
kubectl get pvc
kubectl get sts
kubectl get pod

kubectl exec -it redis-pv-sts-0 -- redis-cli
set a 111
set b 222
keys *
quit
kubectl delete pod redis-pv-sts-0

Version

ngx-v1.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
apiVersion: v1
kind: ConfigMap
metadata:
name: ngx-conf

data:
default.conf: |
server {
listen 80;
location / {
default_type text/plain;
return 200
'ver : $nginx_version\nsrv : $server_addr:$server_port\nhost: $hostname\n';
}
}

---

apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-dep
annotations:
kubernetes.io/change-cause: v1, ngx=1.21

spec:
#minReadySeconds: 5

replicas: 4
selector:
matchLabels:
app: ngx-dep

template:
metadata:
labels:
app: ngx-dep
spec:
volumes:
- name: ngx-conf-vol
configMap:
name: ngx-conf

containers:
- image: nginx:1.21-alpine
name: nginx
ports:
- containerPort: 80

volumeMounts:
- mountPath: /etc/nginx/conf.d
name: ngx-conf-vol

---

apiVersion: v1
kind: Service
metadata:
name: ngx-svc

spec:
selector:
app: ngx-dep

ports:
- port: 80
protocol: TCP
targetPort: 80
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 部署V1
kubectl apply -f ngx-v1.yml
kubectl get deployment
kubectl get pod -o wide
# 测试
kubectl port-forward svc/ngx-svc 8080:80 &
curl 127.1:8080
# 更新V2
kubectl apply -f ngx-v2.yml
# 查看更新状态
kubectl rollout status deployment ngx-dep
# 查看日志
kubectl describe deploy ngx-dep
# 查看历史
kubectl rollout history deploy ngx-dep
kubectl rollout history deploy --revision=2
# 回滚
kubectl rollout undo deploy ngx-dep

NameSpace

test-ns.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: Namespace
metadata:
name: test-ns

---

apiVersion: v1
kind: Pod
metadata:
name: ngx
namespace: test-ns
spec:
containers:
- image: nginx:alpine
name: ngx
1
2
3
4
5
kubectl create ns test-ns 
kubectl get ns

kubectl apply -f test-ns.yml
kubectl get pod -n test-ns

ResourceQuota&LimitRange

quota-ns.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
apiVersion: v1
kind: Namespace
metadata:
name: dev-ns

---

apiVersion: v1
kind: ResourceQuota
metadata:
name: dev-qt
namespace: dev-ns

spec:
hard:
requests.cpu: 10
requests.memory: 10Gi
limits.cpu: 10
limits.memory: 20Gi

requests.storage: 100Gi
persistentvolumeclaims: 100

pods: 100
configmaps: 100
secrets: 100
services: 10
services.nodeports: 5

count/jobs.batch: 1
count/cronjobs.batch: 1
count/deployments.apps: 1

---

apiVersion: v1
kind: LimitRange
metadata:
name: dev-limits
namespace: dev-ns

spec:
limits:
- type: Container
defaultRequest:
cpu: 200m
memory: 50Mi
default:
cpu: 500m
memory: 100Mi
- type: Pod
max:
cpu: 800m
memory: 200Mi
1
2
3
4
5
6
7
kubectl apply -f quota-ns.yml

kubectl get quota -n dev-ns
kubectl describe quota -n dev-ns

kubectl get limitranges -n dev-ns
kubectl describe limitranges -n dev-ns