一、前言
1、概述
nodeSelector 提供了一种将pod限制在节点的标签上。从而实现pod的固定调度,但是这种方式比较死板。
2、亲和性和反亲和性的类型
nodeAffinity(节点亲和)
节点亲和针对的是节点的标签,和nodeSelector相似,都是通过节点标签来约束pod的调度
podAffinity(pod亲和)podAntiAffinity(pod反亲和)
pod亲和针与反亲和针对的是pod的标签,虽然是针对pod的标签来约束,但是也不能够少一个条件,就是topologkey(拓扑域 多个node节点具有相同的标标签,键值键名相同),pod标签辅助topologkey来选择pod调度的节点。简而言之,pod的亲和性和反亲和性调度是根据拓扑域来调度的,而不是根据node节点
requiredDuringSchedulingIgnoredDuringExecution(硬 必须满足条件) pod调度到节点必须满足规则条件,不满足则不会调度,pod会一致处于pending状态preferredDuringSchedulingIgnoredDuringExecution(软 优先满足符合条件的节点)
4、亲和性支持的运算符
Notln:label 的值不在某个列表中
Exists:某个label存在
DoesNotExist:某个label不存在
Gt:label的值大于某个值
1、测试环境准备
为节点打标签 [root@master ~]# kubectl label node node1 type=test1 node/node1 labeled [root@master ~]# kubectl label node node2 type=test2 node/node2 labeled [root@master ~]# kubectl label node node1 num=1 node/node1 labeled [root@master ~]# kubectl label node node2 num=3 node/node2 labeled [root@master ~]# kubectl get node --show-labels NAME STATUS ROLES AGE VERSION LABELS master Ready master 12d v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/master= node1 Ready <none> 12d v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1,kubernetes.io/os=linux,num=1,type=test1 node2 Ready <none> 12d v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node2,kubernetes.io/os=linux,num=3,type=test2 node1的标签为type=test1 num=1 node2的标签为type=test2 num=3
[root@master ~]# vi affinity_node.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
affinity:
nodeAffinity: #节点亲和
requiredDuringSchedulingIgnoredDuringExecution: #节点硬亲和
nodeSelectorTerms: #node标签选择对象
- matchExpressions:
- key: type
operator: In #运算符
values: #存在多个值满足一个即可
- test1
- test2
- key: num
operator: Gt #大于符号
values:
- "2"
综上所写我们最终可以知道pod会被调度到node2节点
注:
1.当存在多个nodeSelectorTerms的时候,只有最后一个会生效,因为不能够包含多个对象
2.当存在多个matchExpressions列表,只需要满足其中一个即可将pod调度
3.当存在多个key值时,必须所有key值都要满足,否则会显示pending状态
4.当pod已经通过亲和性规则调度到指定的节点了,删除节点的标签,pod是不会被驱逐的
[root@master ~]# vi affinity_node.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution: #节点软亲和
- weight: 20 #权重 范围为1-100
preference:
matchExpressions:
- key: type
operator: In
values:
- test1
- test2
- key: num
operator: Gt
values:
- "2"
- weight: 30
preference:
matchExpressions:
- key: type
operator: In
values:
- test1
- test2
- key: num
operator: Lt
values:
- "2"
综上所写pod最终会被调度到node1节点,优先调度到node1节点,因为配置的权重较高
注:权重值高的优先调用,如果不满足在使用其他 然后在进行权重之间进行比对
三、podAffinity(pod亲和)podAntiAffinity(pod反亲和)
1、测试环境准备
为节点打标签,在上面打标签不变,增加如下标签 [root@master ~]# kubectl label node master key=a [root@master ~]# kubectl label node node2 key=a [root@master ~]# kubectl get node --show-labels NAME STATUS ROLES AGE VERSION LABELS master Ready master 12d v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,key=a,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/master= node1 Ready <none> 12d v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1,kubernetes.io/os=linux,num=c,type=test1 node2 Ready <none> 12d v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,key=a,kubernetes.io/arch=amd64,kubernetes.io/hostname=node2,kubernetes.io/os=linux,num=3,type=test2 master的标签有 key=a node1的标签有 type=test1 num=1 node2的标签有 key=a type=test2 num=3 根据具有相同键名键值标签的节点处于同一个拓扑域,我们就可以晓得 master和node2为同一个拓扑域 然后在使用系统默认的beta.kubernetes.io/arch标签,这样三个节点也为同一个拓扑域
在写pod亲和性之前我们先运行如下的pod用于测试
[root@master ~]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
myapp 1/1 Running 0 19s app=myapp
[root@master ~]# vi affinity_pod.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy
labels:
app: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:#硬亲和
- labelSelector:
matchExpressions:
- key: app #已经运行pod的标签
operator: In
values:
- myapp
topologyKey: key #拓扑域标签的键名
综上所写最终pod将会在node2和master节点之间进行调用,node1节点不会进行调度
上述yaml文件的意思是在key标签拓扑域中,如果存在标签为app=nginx的主机,那么就将该pod调度到该拓扑域的范围,没有则显示pending
3、pod反亲和性
[root@master ~]# vi affinity_pod.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy
labels:
app: nginx
spec:
replicas: 6
selector:
matchLabels:
app: nginx
template:
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution: #软亲和
- weight: 100 #权重
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- myapp
topologyKey: key #拓扑域
综上所写pod最终会优先调度到node1节点上,因为node2和master在同一个拓扑域中,且运行了标签为app=myapp的pod
pod的反亲和就是针对拓扑域来约束的,如果判断的pod在指定的拓扑域当中,那么就不优先调度该拓扑域中的pod,而会优先选择其他pod来进行调度
简而言之,反亲和就是调度到除了本身定义的拓扑域之外的节点

您可以选择一种方式赞助本站
支付宝扫一扫赞助
微信钱包扫描赞助
赏