Browse Source

ensure there is pin priority for docker package to avoid upgrade of docker to incompatible version

ensure there is pin priority for docker package to avoid upgrade of docker to incompatible version

remove empty when line

ensure there is pin priority for docker package to avoid upgrade of docker to incompatible version

force kubeadm upgrade due to failure without --force flag

ensure there is pin priority for docker package to avoid upgrade of docker to incompatible version

added nodeSelector to have compatibility with hybrid cluster with win nodes, also fix for download with missing container type

fixes in syntax and LF for newline in files

fix on yamllint check

ensure there is pin priority for docker package to avoid upgrade of docker to incompatible version

some cleanup for innecesary lines

remove conditions for nodeselector
pull/3200/head
Pablo Estigarribia 6 years ago
parent
commit
7cbe3c2171
19 changed files with 85 additions and 1 deletions
  1. 1
      cluster.yml
  2. 3
      roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2
  3. 3
      roles/dnsmasq/templates/dnsmasq-deploy.yml.j2
  4. 9
      roles/docker/tasks/main.yml
  5. 3
      roles/docker/templates/apt_preferences.d/debian_docker.j2
  6. 1
      roles/etcd/tasks/main.yml
  7. 3
      roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2
  8. 3
      roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2
  9. 3
      roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
  10. 3
      roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
  11. 3
      roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2
  12. 3
      roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2
  13. 1
      roles/kubernetes/master/tasks/kubeadm-setup.yml
  14. 3
      roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2
  15. 3
      roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2
  16. 3
      roles/network_plugin/flannel/templates/cni-flannel.yml.j2
  17. 3
      roles/win_nodes/kubernetes_patch/defaults/main.yml
  18. 1
      roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json
  19. 34
      roles/win_nodes/kubernetes_patch/tasks/main.yml

1
cluster.yml

@ -93,6 +93,7 @@
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: win_nodes/kubernetes_patch, tags: win_nodes, when: "kubeadm_enabled" }
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

3
roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2

@ -52,3 +52,6 @@ spec:
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
- --logtostderr=true
- --v={{ kube_log_level }}
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux

3
roles/dnsmasq/templates/dnsmasq-deploy.yml.j2

@ -24,6 +24,9 @@ spec:
tolerations:
- effect: NoSchedule
operator: Exists
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: dnsmasq
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"

9
roles/docker/tasks/main.yml

@ -136,6 +136,15 @@
msg: "{{available_packages}}"
when: docker_task_result|failed
# This is required to ensure any apt upgrade will not break kubernetes
- name: Set docker pin priority to apt_preferences on Debian family
template:
src: "apt_preferences.d/debian_docker.j2"
dest: "/etc/apt/preferences.d/docker"
owner: "root"
mode: 0644
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse"] or is_atomic)
- name: ensure service is started if docker packages are already present
service:
name: docker

3
roles/docker/templates/apt_preferences.d/debian_docker.j2

@ -0,0 +1,3 @@
Package: docker-ce
Pin: version {{ docker_version }}.*
Pin-Priority: 1001

1
roles/etcd/tasks/main.yml

@ -6,7 +6,6 @@
- facts
- include_tasks: "gen_certs_{{ cert_management }}.yml"
when:
tags:
- etcd-secrets

3
roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2

@ -28,6 +28,9 @@ spec:
labels:
k8s-app: kubedns-autoscaler
spec:
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
operator: Equal

3
roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2

@ -27,6 +27,9 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"

3
roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2

@ -15,6 +15,9 @@ spec:
tolerations:
- effect: NoSchedule
operator: Exists
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: netchecker-agent
image: "{{ agent_img }}"

3
roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2

@ -13,6 +13,9 @@ spec:
app: netchecker-agent-hostnet
spec:
hostNetwork: True
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}

3
roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2

@ -29,6 +29,9 @@ spec:
spec:
priorityClassName: system-node-critical
serviceAccountName: efk
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"

3
roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/deploy-default-backend.yml.j2

@ -42,3 +42,6 @@ spec:
requests:
cpu: 10m
memory: 20Mi
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux

1
roles/kubernetes/master/tasks/kubeadm-setup.yml

@ -121,6 +121,7 @@
--ignore-preflight-errors=all
--allow-experimental-upgrades
--allow-release-candidate-upgrades
--force
register: kubeadm_upgrade
# Retry is because upload config sometimes fails
retries: 3

3
roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2

@ -12,6 +12,9 @@ spec:
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirst
{% endif %}
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: kube-proxy
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}

3
roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2

@ -7,6 +7,9 @@ metadata:
k8s-app: kube-nginx
spec:
hostNetwork: true
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: nginx-proxy
image: {{ nginx_image_repo }}:{{ nginx_image_tag }}

3
roles/network_plugin/flannel/templates/cni-flannel.yml.j2

@ -53,6 +53,9 @@ spec:
k8s-app: flannel
spec:
serviceAccountName: flannel
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: kube-flannel
image: {{ flannel_image_repo }}:{{ flannel_image_tag }}

3
roles/win_nodes/kubernetes_patch/defaults/main.yml

@ -0,0 +1,3 @@
---
kubernetes_user_manifests_path: "{{ ansible_env.HOME }}/kube-manifests"

1
roles/win_nodes/kubernetes_patch/files/nodeselector-os-linux-patch.json

@ -0,0 +1 @@
{"spec":{"template":{"spec":{"nodeSelector":{"beta.kubernetes.io/os":"linux"}}}}}

34
roles/win_nodes/kubernetes_patch/tasks/main.yml

@ -0,0 +1,34 @@
---
- name: Ensure that user manifests directory exists
file:
path: "{{ kubernetes_user_manifests_path }}/kubernetes"
state: directory
recurse: yes
tags: [init, cni]
- name: Apply kube-proxy nodeselector
block:
- name: Copy kube-proxy daemonset nodeselector patch
copy:
src: nodeselector-os-linux-patch.json
dest: "{{ kubernetes_user_manifests_path }}/nodeselector-os-linux-patch.json"
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
- name: Check current nodeselector for kube-proxy daemonset
shell: kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta\.kubernetes\.io/os}'
register: current_kube_proxy_state
- name: Apply nodeselector patch for kube-proxy daemonset
shell: kubectl patch ds kube-proxy --namespace=kube-system --type=strategic -p "$(cat nodeselector-os-linux-patch.json)"
args:
chdir: "{{ kubernetes_user_manifests_path }}"
register: patch_kube_proxy_state
when: current_kube_proxy_state.stdout | trim | lower != "linux"
- debug: msg={{ patch_kube_proxy_state.stdout_lines }}
when: patch_kube_proxy_state is not skipped
- debug: msg={{ patch_kube_proxy_state.stderr_lines }}
when: patch_kube_proxy_state is not skipped
tags: init
Loading…
Cancel
Save