Browse Source

Remove outdated CriticalAddonsOnly toleration and critical-pod annotation (#6202)

pull/6249/head
Florian Ruynat 4 years ago
committed by GitHub
parent
commit
101686c665
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 0 additions and 46 deletions
  1. 2
      roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
  2. 3
      roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2
  3. 2
      roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2
  4. 3
      roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2
  5. 3
      roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2
  6. 2
      roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2
  7. 2
      roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
  8. 3
      roles/network_plugin/canal/templates/canal-node.yaml.j2
  9. 5
      roles/network_plugin/cilium/templates/cilium-ds.yml.j2
  10. 3
      roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
  11. 3
      roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
  12. 3
      roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
  13. 3
      roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
  14. 3
      roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
  15. 3
      roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
  16. 3
      roles/network_plugin/kube-router/templates/kube-router.yml.j2

2
roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2

@ -31,8 +31,6 @@ spec:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
{% if dns_extra_tolerations | default(None) %} {% if dns_extra_tolerations | default(None) %}
{{ dns_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }} {{ dns_extra_tolerations | list | to_nice_yaml(indent=2) | indent(8) }}
{% endif %} {% endif %}

3
roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2

@ -30,7 +30,6 @@ spec:
labels: labels:
k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }} k8s-app: dns-autoscaler{{ coredns_ordinal_suffix }}
annotations: annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec: spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
@ -43,8 +42,6 @@ spec:
- effect: NoSchedule - effect: NoSchedule
operator: Equal operator: Equal
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity: affinity:
podAntiAffinity: podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:

2
roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2

@ -27,8 +27,6 @@ spec:
operator: "Exists" operator: "Exists"
- effect: NoExecute - effect: NoExecute
operator: "Exists" operator: "Exists"
- key: "CriticalAddonsOnly"
operator: "Exists"
containers: containers:
- name: node-cache - name: node-cache
image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}" image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}"

3
roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-controllerservice.yml.j2

@ -20,9 +20,6 @@ spec:
kubernetes.io/os: linux kubernetes.io/os: linux
serviceAccount: ebs-csi-controller-sa serviceAccount: ebs-csi-controller-sa
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
containers: containers:
- name: ebs-plugin - name: ebs-plugin
image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }} image: {{ aws_ebs_csi_plugin_image_repo }}:{{ aws_ebs_csi_plugin_image_tag }}

3
roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-nodeservice.yml.j2

@ -20,9 +20,6 @@ spec:
kubernetes.io/os: linux kubernetes.io/os: linux
hostNetwork: true hostNetwork: true
priorityClassName: system-node-critical priorityClassName: system-node-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
containers: containers:
- name: ebs-plugin - name: ebs-plugin
securityContext: securityContext:

2
roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2

@ -120,8 +120,6 @@ spec:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
{% endif %} {% endif %}
affinity: affinity:
nodeAffinity: nodeAffinity:

2
roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2

@ -24,8 +24,6 @@ spec:
hostNetwork: true hostNetwork: true
serviceAccountName: calico-kube-controllers serviceAccountName: calico-kube-controllers
tolerations: tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical

3
roles/network_plugin/canal/templates/canal-node.yaml.j2

@ -21,9 +21,6 @@ spec:
serviceAccountName: canal serviceAccountName: canal
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
volumes: volumes:
# Used by calico/node. # Used by calico/node.
- name: lib-modules - name: lib-modules

5
roles/network_plugin/cilium/templates/cilium-ds.yml.j2

@ -16,11 +16,6 @@ spec:
prometheus.io/port: "9090" prometheus.io/port: "9090"
prometheus.io/scrape: "true" prometheus.io/scrape: "true"
{% endif %} {% endif %}
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
labels: labels:
k8s-app: cilium k8s-app: cilium

3
roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2

@ -29,9 +29,6 @@ spec:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netmaster serviceAccountName: contiv-netmaster
containers: containers:
- name: contiv-api-proxy - name: contiv-api-proxy

3
roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2

@ -21,9 +21,6 @@ spec:
hostPID: true hostPID: true
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netplugin serviceAccountName: contiv-netplugin
containers: containers:
- name: contiv-ovs-cleanup - name: contiv-ovs-cleanup

3
roles/network_plugin/contiv/templates/contiv-etcd.yml.j2

@ -23,9 +23,6 @@ spec:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
initContainers: initContainers:
- name: contiv-etcd-init - name: contiv-etcd-init
image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }} image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}

3
roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2

@ -29,9 +29,6 @@ spec:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netmaster serviceAccountName: contiv-netmaster
containers: containers:
- name: contiv-netmaster - name: contiv-netmaster

3
roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2

@ -26,9 +26,6 @@ spec:
hostPID: true hostPID: true
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netplugin serviceAccountName: contiv-netplugin
initContainers: initContainers:
- name: contiv-netplugin-init - name: contiv-netplugin-init

3
roles/network_plugin/contiv/templates/contiv-ovs.yml.j2

@ -23,9 +23,6 @@ spec:
hostPID: true hostPID: true
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
containers: containers:
# Runs ovs containers on each Kubernetes node. # Runs ovs containers on each Kubernetes node.
- name: contiv-ovsdb-server - name: contiv-ovsdb-server

3
roles/network_plugin/kube-router/templates/kube-router.yml.j2

@ -112,9 +112,6 @@ spec:
{% endif %} {% endif %}
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
volumes: volumes:
{% if kube_router_enable_dsr %} {% if kube_router_enable_dsr %}
- name: docker-socket - name: docker-socket

Loading…
Cancel
Save