Browse Source

fix apply for netchecker upgrade (#1659)

* fix apply for netchecker upgrade and graceful upgrade

* Speed up daemonset upgrades. Make check wait for ds upgrades.
pull/1665/head
Matthew Mosesohn 7 years ago
committed by GitHub
parent
commit
b294db5aed
9 changed files with 21 additions and 21 deletions
  1. 1
      roles/kubernetes-apps/ansible/tasks/netchecker.yml
  2. 3
      roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2
  3. 2
      roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2
  4. 10
      roles/kubernetes/master/tasks/pre-upgrade.yml
  5. 2
      roles/network_plugin/calico/templates/calico-node.yml.j2
  6. 2
      roles/network_plugin/canal/templates/canal-node.yaml.j2
  7. 4
      roles/network_plugin/flannel/templates/cni-flannel.yml.j2
  8. 2
      roles/network_plugin/weave/templates/weave-net.yml.j2
  9. 16
      tests/testcases/030_check-network.yml

1
roles/kubernetes-apps/ansible/tasks/netchecker.yml

@ -10,6 +10,7 @@
kube:
name: "netchecker-server"
namespace: "{{ netcheck_namespace }}"
filename: "{{ netchecker_server_manifest.stat.path }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "deploy"
state: latest

3
roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2

@ -42,6 +42,5 @@ spec:
memory: {{ netchecker_agent_memory_requests }}
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: 100%
type: RollingUpdate

2
roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2

@ -46,5 +46,5 @@ spec:
memory: {{ netchecker_agent_memory_requests }}
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: 100%
type: RollingUpdate

10
roles/kubernetes/master/tasks/pre-upgrade.yml

@ -13,22 +13,18 @@
kube_apiserver_storage_backend: "etcd2"
when: old_data_exists.rc == 0 and not force_etcd3|bool
- name: "Pre-upgrade | Delete master manifests on all kube-masters"
- name: "Pre-upgrade | Delete master manifests"
file:
path: "/etc/kubernetes/manifests/{{item[1]}}.manifest"
path: "/etc/kubernetes/manifests/{{item}}.manifest"
state: absent
delegate_to: "{{item[0]}}"
with_nested:
- "{{groups['kube-master']}}"
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
register: kube_apiserver_manifest_replaced
when: (secret_changed|default(false) or etcd_secret_changed|default(false))
- name: "Pre-upgrade | Delete master containers forcefully on all kube-masters"
- name: "Pre-upgrade | Delete master containers forcefully"
shell: "docker ps -f name=k8s-{{item}}* -q | xargs --no-run-if-empty docker rm -f"
delegate_to: "{{item[0]}}"
with_nested:
- "{{groups['kube-master']}}"
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: kube_apiserver_manifest_replaced.changed
run_once: true

2
roles/network_plugin/calico/templates/calico-node.yml.j2

@ -161,6 +161,6 @@ spec:
path: "{{ calico_cert_dir }}"
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

2
roles/network_plugin/canal/templates/canal-node.yaml.j2

@ -190,5 +190,5 @@ spec:
readOnly: true
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

4
roles/network_plugin/flannel/templates/cni-flannel.yml.j2

@ -121,5 +121,5 @@ spec:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

2
roles/network_plugin/weave/templates/weave-net.yml.j2

@ -156,6 +156,6 @@ items:
path: /lib/modules
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

16
tests/testcases/030_check-network.yml

@ -12,16 +12,11 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: Check kubectl output
shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
register: get_pods
- debug: msg="{{get_pods.stdout.split('\n')}}"
- name: Get pod names
shell: "{{bin_dir}}/kubectl get pods -o json"
register: pods
until: '"ContainerCreating" not in pods.stdout'
until: '"ContainerCreating" not in pods.stdout and "Terminating" not in pods.stdout'
retries: 60
delay: 2
no_log: true
@ -30,11 +25,20 @@
command: "{{bin_dir}}/kubectl get pods -o
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: hostnet_pods
no_log: true
- name: Get running pods
command: "{{bin_dir}}/kubectl get pods -o
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: running_pods
no_log: true
- name: Check kubectl output
shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
register: get_pods
no_log: true
- debug: msg="{{get_pods.stdout.split('\n')}}"
- set_fact:
kube_pods_subnet: 10.233.64.0/18

Loading…
Cancel
Save