概述
该文档主要用于记录本人常用的Yaml文件模板
Kubernetes
RBAC
---
apiVersion: v1 # 该模板与template-deployment和template-pv并不关联
kind: ServiceAccount # 创建一个serviceaccount命令,每个命名空间被创建时其实都会有一个default的serviceaccount.
metadata:
name: prometheus # 令牌的名称,该名称是需要被statefulset或者deployment引用的,与containers同级,表达为serviceAccountName: prometheus
namespace: basic # 令牌所在的命名空间
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus # clusterrole不受命名空间限制,只有role受命名空间限制
rules:
- apiGroups:
- "" # "" 标明core API组
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses # 获取extensions.ingresses的资源清单,通过kubectl describe ClusterRole prometheus命令可查看到
verbs:
- get
- list
- watch # 相当于read
- nonResourceURLs: # 不属于kubernetes中的任何资源之外的URL
- "/metrics" # 能看其他资源清单的service对应的http://serviceIP/metrics,该路径是prometheus获取目标主机指标的标准路径
verbs:
- get # 获取
---
apiVersion: rbac.authorization.k8s.io/v1 # 此角色绑定允许拥有名为prometheus的ServiceAccount的用户读取所有命名空间的资源
kind: ClusterRoleBinding # 授权,绑定。将serviceaccount与clusterrole进行绑定,使拥有该serviceaccount的container拥有clusterrole定义的权限
metadata:
name: prometheus
namespace: basic # RoleBinding的命名空间决定了访问权限的授予权限,ClusterRoleBinding不受命名空间的限制
roleRef: # 指定某role或clusterrole的绑定关系
kind: ClusterRole # 此字段必须是role或者clusterrole
name: prometheus # 此字段必须与要绑定的role或clusterrole的名称相匹配
apiGroup: rbac.authorization.k8s.io
subjects: # 哪些服务需要绑定,可以指定不止一个subject(主体)
- kind: ServiceAccount
name: prometheus # 允许拥有名为prometheus的serviceaccount的所有用户去访问任何命名空间中的secrets.区分大小写
namespace: basic # 该serviceasscount位于basic命名空间内
apiGroup: rbac.authorization.k8s.io # 不知道为啥要加,如果报错就去掉该行
PV & PVC
---
apiVersion: v1
kind: PersistentVolume # 定义pv,pv提供的是基础存储
metadata:
name: nfspv10
spec:
capacity:
storage: 10Gi # pv的容量大小
accessModes: # 访问模式
- ReadWriteMany
persistentVolumeReclaimPolicy: Recycle # 回收策略,Recycle好像已经被弃用。
storageClassName: eboxtest # 创建出来的pv名称,storageClassName是标记该pv的属性,只有匹配的pvc才会被绑定到该pv上
nfs: # 使用nfs的文件格式,可在192.168.254.29主机上使用showmount -e命令查看是否支持nfs,需要192.168.254.29主机配置并启动nfs服务且开机自启
path: /usr/local/wlhiot/mount/nfsdata/nfspv10 # 192.168.254.29主机上的目录路径,真实存储文件的路径
server: 192.168.254.29
---
apiVersion: v1
kind: PersistentVolumeClaim # 推荐pv和pvc位于同一个YAML,防止pv或者pvc被误删除。pvc提供的是抽象存储用于被Pod调用
metadata:
name: eboxtest-pvc
namespace: test
spec: # 创建出来的pvc绑定到具体的pv上提供存储
accessModes:
- ReadWriteMany
resources: # 所需的资源
requests:
storage: 10Gi # 存储大小
storageClassName: eboxtest # 需要pv的storageClassName名称为eboxtest
Deplayment & Service
该模板实现资源的部署及通过本地端口暴露对外提供服务(对于自用的小服务比较适合)。
---
apiVersion: v1
kind: Service
metadata:
name: test
namespace: test # Service的命名空间推荐与Deploument的命名空间一致
spec:
type: NodePort # 本人常用NodePort,比较喜欢。但在对外服务量较多的情况下不适用(推荐使用ingress)
selector:
app: test # 选择标签名app=test的Pod,都受该Service管控
ports:
- name: eboxtest-svr
port: 8080 # k8s调度的Pod上暴露的端口
targetPort: 8080 # container上暴露的端口
nodePort: 31112 # localhost上暴露的端口,可通过k8s-master主机+端口的形式负载均衡到本机
- name: eboxtest-vue
port: 80
targetPort: 80
nodePort: 31111
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ebox-ingrees
namespace: default
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- host: ebox.test.wlhiot.com
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: test
port:
number: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
namespace: test
spec:
replicas: 2
selector:
matchLabels: # spec.selector.matchLabels需要与spec.template.metadata.labels定义的标签一致
app: test # 控制所有带有app=test标签的Pod
template: # 模板,创建出来的所有Pod资源都拥有以下定义的属性
metadata:
annotations: # 注解,类似于本文的注释,并不起实际作用
timestamp: "1"
branch: develop
repository: harbor.wlhiot.com:8080
prometheus.io/scrape: "true" # 开启prometheus监控
prometheus.io/path: "/metrics" # 暴露路径为/metrics
prometheus.io/port: "9113" # 暴露端口为9113
labels:
app: test
app.kubernetes.io/name: test # 共享labels,下面几个是标准模板,每次创建新资源时可以参考引用
app.kubernetes.io/instance: test-abcxzy
app.kubernetes.io/random: '20598' # 随机数,每次构建都会刷新,用于服务滚动更新
app.kubernetes.io/component: project
app.kubernetes.io/part-of: ebox
app.kubernetes.io/managed-by: kubernetes
app.kubernetes.io/created-by: controller-manager
spec:
restartPolicy: Always # 重启策略:永远自动重启
affinity: # 亲和性设置
podAntiAffinity: # 可选以下三种亲和性类型:nodeAffinity、podAffinity、PodAntiAffinity
preferredDuringSchedulingIgnoredDuringExecution: # 软硬亲和性:requiredDuringSchedulingIgnoredDuringExecution、preferredDuringSchedulingIgnoredDuringExecution
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions: # 所有的Pod都尽量不与app=test的Pod调度在一起,因为创建出来的labels就是app=test,所以它们尽量不调度一起增加可用性
- key: app
operator: In # app 包含 test的Pod.若是nodeAffinity的话就是app 包含 test的Node
values:
- test
topologyKey: kubernetes.io/hostname # 该标签是Kubernetes集群节点的内建标签,它的值为当前节点的主机名,对于各个节点来说都是不同的
containers:
- name: eboxtest-svr
image: 192.168.254.29:8080/test/ebox_svr
imagePullPolicy: Always # 永远新拉镜像,私人模板见 有道云笔记day86_kubernetes
ports:
- containerPort: 8080
- containerPort: 8221
livenessProbe: # 存活探针,若存活检测失败,则kubelet杀死容器,并受到重启策略的影响.
tcpSocket: # TCP检查
port: 8080
initialDelaySeconds: 10 # 在第一次探测前应等该10S,默认0S
periodSeconds: 10 # 每10S在执行一次存活探测,默认10S,最小值1S
failureThreshold: 5 # 探针失败后的重试次数,若是存活探针意味着重启容器
successThreshold: 1 # 探测失败后,被视为成功的最小连续成功书,默认值1,该参数可不使用
timeoutSeconds: 2 # 探测超时后等待2S,默认1S
resources: # 资源管控
requests: # 在调度时会保证Node节点能满足该Pod的需求(在容器进行调度时,会保证要调度的Node节点有以下规定的资源,否则不予调度)
cpu: 250m
memory: 750Mi # 常用参数为M而不是Mi
volumeMounts: # 挂载参数,将eboxtest-pv挂载为container的/mount目录
- mountPath: /mount
name: eboxtest-pv
- name: eboxtest-vue
image: 192.168.254.29:8080/test/ebox_vue
imagePullPolicy: Always # 默认就为always
ports:
- containerPort: 80
readinessProbe: # 就绪探针
httpGet: # HTTP Get请求
path: /vehiclemap # 检测container的/vehiclemap返回值是否 ≥200 or <400,若不在范围内表示失败
port: 80
scheme: HTTP # 用于连接host的协议,默认为HTTP,若有问题删除该参数
httpHeaders: # 自定义HTTP请求headers,HTTP允许重复headers
- name: Custom-Header
value: Awesome
initialDelaySeconds: 10
periodSeconds: 10
failureThreshold: 5
successThreshold: 1 # 探针失败后的重试次数,若是就绪探针意味着被的放弃Pod被打上未就绪的标签
timeoutSeconds: 2
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 2
resources:
requests:
cpu: 250m
memory: 750Mi
- name: nginx-exporter
image: fish/nginx-exporter
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 9113
initContainers: # 初始化容器,在Pod启动前会生成这么一个容器进行初始化,常用于为Pod提供初始环境
- name: init-sleep
image: busybox
command: ['sh', '-c', 'echo The app is running! && sleep 5'] # sleep 5s
volumes: # 挂载说明
- name: eboxtest-pv # 持久化存储pvc的名称
persistentVolumeClaim:
claimName: eboxtest-pvc # 将名为eboxtest-pvc的pvc加载到模板,使Pod能将其挂载,通过template-pv.yaml查看创建出来的pvc名称
Service & Ingress
在对外使用的服务时,推荐使用域名的方式。该资源清单只针对单个服务进行暴露。
---
apiVersion: v1
kind: Service
metadata:
name: test-service
namespace: test # Service的命名空间推荐与Deploument的命名空间一致
spec:
type: NodePort # 本人常用NodePort,比较喜欢
selector:
app: test # 选择标签名app=test的Pod,都受该Service管控
ports:
- name: eboxtest-svr
port: 8080 # k8s调度的Pod上暴露的端口
targetPort: 8080 # container上暴露的端口
nodePort: 31112 # localhost上暴露的端口,可通过k8s-master主机+端口的形式负载均衡到本机
- name: eboxtest-vue
port: 80
targetPort: 80
nodePort: 31111
---
apiVersion: networking.k8s.io/v1
kind: Ingress # 使用Ingress资源前需要创建ingress-nginx-controller资源
metadata:
name: test-ingrees
namespace: default
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx # 默认只用的ingress引擎
rules:
- host: test.wlhiot.cn # 域名,需加上Ingress端口作为后缀,例如test.wlhiot.cn:30080
http:
paths:
- pathType: Prefix # 匹配前缀
path: / # 推荐默认为/,当path为/v1时访问链接为test.wlhiot.cn:30080/v1
backend: # 可通过Kubecte get endpoint获取
service:
name: test-service
port:
number: 80 # 对应的实际端口而非对外暴露端口
Mix项目
通过将deplayment(只包含前端)、service、ingress进行整合,实现域名访问。
[root@k8s-master ~]# cat /usr/local/wlhiot/container/kubernetes/project/mix/mix.yaml
---
apiVersion: v1
kind: Service
metadata:
name: mix
namespace: default
spec:
type: NodePort
selector:
app: mix
ports:
- name: fire-vue
port: 9811
targetPort: 9811
nodePort: 30911
- name: fuser-vue
port: 9813
targetPort: 9813
nodePort: 30913
- name: wiot-vue
port: 9815
targetPort: 9815
nodePort: 30915
- name: ebox-vue
port: 9821
targetPort: 9821
nodePort: 30921
- name: euser-vue
port: 9823
targetPort: 9823
nodePort: 30923
- name: umu-vue
port: 9825
targetPort: 9825
nodePort: 30925
- name: gov-vue
port: 9827
targetPort: 9827
nodePort: 30927
- name: report-vue
port: 9829
targetPort: 9829
nodePort: 30929
- name: lops-vue
port: 9841
targetPort: 9841
nodePort: 30941
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mix-ingrees
namespace: default
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- host: fire.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9811
- host: fuser.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9813
- host: wiot.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9815
- host: ebox.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9821
- host: euser.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9823
- host: umu.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9825
- host: gov.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9827
- host: report.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9829
- host: lops.wlhiot.cn
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: mix
port:
number: 9841
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mix
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: mix
template:
metadata:
annotations:
timestamp: "1"
branch: develop
repository: harbor.wlhiot.com:8080
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "9113"
labels:
app: mix
app.kubernetes.io/name: mix
app.kubernetes.io/random: '97' # 每次CI/CD都会变化
app.kubernetes.io/jobname: 'lops_vue_test_k8s'
app.kubernetes.io/component: project
app.kubernetes.io/part-of: mix
app.kubernetes.io/managed-by: kubernetes
app.kubernetes.io/created-by: controller-manager
spec:
restartPolicy: Always
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- mix
topologyKey: kubernetes.io/hostname
containers:
- name: mix-vue
image: 192.168.254.29:8080/test/mix_vue # 将所有前端项目整合进该镜像中
imagePullPolicy: Always
ports:
- containerPort: 9811
- containerPort: 9813
- containerPort: 9815
- containerPort: 9821
- containerPort: 9823
- containerPort: 9825
- containerPort: 9827
- containerPort: 9829
- containerPort: 9841
readinessProbe:
httpGet:
path: /
port: 80
httpHeaders:
- name: Custom-Header
value: Awesome
initialDelaySeconds: 10
periodSeconds: 10
failureThreshold: 5
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 2
resources:
requests:
cpu: 250m
memory: 750Mi
- name: nginx-exporter
image: fish/nginx-exporter
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 9113
最终部署结果
mix项目包含所有项目的前端。其他项目则只单单包含Java项目。
[root@k8s-master ~]# kubectl get pod -o wide -n default
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
apigateway-7448f5b4f4-8wmfb 1/1 Running 0 43h 10.20.3.110 k8s-node03 <none> <none>
apigateway-7448f5b4f4-mn5vf 1/1 Running 0 46h 10.20.2.150 k8s-node02 <none> <none>
ebox-b9d657b84-9lfnf 1/1 Running 0 14d 10.20.1.7 k8s-node01 <none> <none>
ebox-b9d657b84-z2gzm 1/1 Running 0 44h 10.20.3.109 k8s-node03 <none> <none>
euser-b875bf774-t42q2 1/1 Running 0 14d 10.20.1.6 k8s-node01 <none> <none>
euser-b875bf774-x8mm2 1/1 Running 0 43h 10.20.7.223 k8s-node07 <none> <none>
lops-5549948497-q6gkr 1/1 Running 0 46h 10.20.7.218 k8s-node07 <none> <none>
mix-85c8848b9f-22xts 2/2 Running 0 42h 10.20.5.79 k8s-node05 <none> <none>
mix-85c8848b9f-dzn8h 2/2 Running 0 46h 10.20.7.217 k8s-node07 <none> <none>
newfire-5f5478f5b5-wjs6r 1/1 Running 0 43h 10.20.6.116 k8s-node06 <none> <none>
newfire-5f5478f5b5-x7lqv 1/1 Running 0 44h 10.20.3.108 k8s-node03 <none> <none>
nfs-client-provisioner-ccfcc5df7-jhps4 1/1 Running 0 42h 10.20.5.80 k8s-node05 <none> <none>
old-wiot-6c95947d99-twpq5 1/1 Running 0 44h 10.20.3.107 k8s-node03 <none> <none>
old-wiot-6c95947d99-wvj8m 1/1 Running 0 42h 10.20.5.77 k8s-node05 <none> <none>
report-659d5d47c4-kdjvz 1/1 Running 0 44h 10.20.7.222 k8s-node07 <none> <none>
report-659d5d47c4-t9mpw 1/1 Running 0 42h 10.20.5.78 k8s-node05 <none> <none>
signal-675f6d6d8b-6vzr5 1/1 Running 0 44h 10.20.2.153 k8s-node02 <none> <none>
signal-675f6d6d8b-cb5zt 1/1 Running 0 43h 10.20.6.117 k8s-node06 <none> <none>
uniaccfire-66699fb5cb-bst65 1/1 Running 0 42h 10.20.5.75 k8s-node05 <none> <none>
uniaccfire-66699fb5cb-hfwjm 1/1 Running 0 46h 10.20.2.148 k8s-node02 <none> <none>
wiot-86dd94776b-jbrxb 1/1 Running 0 42h 10.20.5.74 k8s-node05 <none> <none>
wiot-86dd94776b-kp8rq 1/1 Running 0 46h 10.20.7.220 k8s-node07 <none> <none>
[root@k8s-master ~]# kubectl get svc -o wide -n default
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
apigateway NodePort 10.254.49.236 <none> 8080:30002/TCP 229d app=apigateway
ebox NodePort 10.254.17.255 <none> 30822:30822/TCP,8221:30221/TCP 29d app=ebox
euser NodePort 10.254.95.32 <none> 8080:30622/TCP 100d app=euser
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 392d <none>
lops NodePort 10.254.42.202 <none> 8892:30722/TCP 47h app=lops
mix NodePort 10.254.92.87 <none> 9811:30911/TCP,9813:30913/TCP,9815:30915/TCP,9821:30921/TCP,9823:30923/TCP,9825:30925/TCP,9827:30927/TCP,9829:30929/TCP,9841:30941/TCP 36d app=mix
newfire NodePort 10.254.103.195 <none> 8080:30812/TCP 36d app=newfire
old-wiot NodePort 10.254.79.134 <none> 8080:30816/TCP,9211:30211/TCP 30d app=old-wiot
report NodePort 10.254.33.172 <none> 30826:30826/TCP 29d app=report
signal NodePort 10.254.109.174 <none> 30624:30624/TCP 127d app=signal
uniaccfire NodePort 10.254.138.246 <none> 8080:30814/TCP 58d app=uniaccfire
wiot NodePort 10.254.245.230 <none> 8080:30830/TCP 86d app=wiot
[root@k8s-master ~]# kubectl get svc -o wide -n ingress-nginx ingress-nginx-controller # 控制器
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
ingress-nginx-controller NodePort 10.254.21.242 <none> 80:30080/TCP,443:30443/TCP 105d app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingressnginx,app.kubernetes.io/name=ingress-nginx
[root@k8s-master ~]# kubectl describe ingress mix-ingrees -n default
Name: mix-ingrees
Namespace: default
Address: 10.254.21.242
Default backend: default-http-backend:80 (<none>)
Rules:
Host Path Backends
---- ---- --------
fire.wlhiot.cn
/ mix:9811 (10.20.5.79:9811,10.20.7.217:9811)
fuser.wlhiot.cn
/ mix:9813 (10.20.5.79:9813,10.20.7.217:9813)
wiot.wlhiot.cn
/ mix:9815 (10.20.5.79:9815,10.20.7.217:9815)
ebox.wlhiot.cn
/ mix:9821 (10.20.5.79:9821,10.20.7.217:9821)
euser.wlhiot.cn
/ mix:9823 (10.20.5.79:9823,10.20.7.217:9823)
umu.wlhiot.cn
/ mix:9825 (10.20.5.79:9825,10.20.7.217:9825)
gov.wlhiot.cn
/ mix:9827 (10.20.5.79:9827,10.20.7.217:9827)
report.wlhiot.cn
/ mix:9829 (10.20.5.79:9829,10.20.7.217:9829)
lops.wlhiot.cn
/ mix:9841 (10.20.5.79:9841,10.20.7.217:9841)
Annotations: nginx.ingress.kubernetes.io/rewrite-target: /
Events: <none>
最终实现效果
通过指定的域名+ingress对外的端口实现访问。