Browse Source

Refactor "multi" handlers to use listen (#10542)

* containerd: refactor handlers to use 'listen'

* cri-dockerd: refactor handlers to use 'listen'

* cri-o: refactor handlers to use 'listen'

* docker: refactor handlers to use 'listen'

* etcd: refactor handlers to use 'listen'

* control-plane: refactor handlers to use 'listen'

* kubeadm: refactor handlers to use 'listen'

* node: refactor handlers to use 'listen'

* preinstall: refactor handlers to use 'listen'

* calico: refactor handlers to use 'listen'

* kube-router: refactor handlers to use 'listen'

* macvlan: refactor handlers to use 'listen'
pull/10606/head
Max Gautier 1 year ago
committed by GitHub
parent
commit
8ebeb88e57
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 85 additions and 158 deletions
  1. 8
      roles/container-engine/containerd/handlers/main.yml
  2. 14
      roles/container-engine/cri-dockerd/handlers/main.yml
  3. 8
      roles/container-engine/cri-o/handlers/main.yml
  4. 14
      roles/container-engine/docker/handlers/main.yml
  5. 25
      roles/etcd/handlers/backup.yml
  6. 8
      roles/etcd/handlers/backup_cleanup.yml
  7. 23
      roles/etcd/handlers/main.yml
  8. 52
      roles/kubernetes/control-plane/handlers/main.yml
  9. 8
      roles/kubernetes/kubeadm/handlers/main.yml
  10. 8
      roles/kubernetes/node/handlers/main.yml
  11. 40
      roles/kubernetes/preinstall/handlers/main.yml
  12. 20
      roles/network_plugin/calico/handlers/main.yml
  13. 8
      roles/network_plugin/kube-router/handlers/main.yml
  14. 7
      roles/network_plugin/macvlan/handlers/main.yml

8
roles/container-engine/containerd/handlers/main.yml

@ -1,10 +1,4 @@
---
- name: Restart containerd
command: /bin/true
notify:
- Containerd | restart containerd
- Containerd | wait for containerd
- name: Containerd | restart containerd
systemd:
name: containerd
@ -12,6 +6,7 @@
enabled: yes
daemon-reload: yes
masked: no
listen: Restart containerd
- name: Containerd | wait for containerd
command: "{{ containerd_bin_dir }}/ctr images ls -q"
@ -19,3 +14,4 @@
retries: 8
delay: 4
until: containerd_ready.rc == 0
listen: Restart containerd

14
roles/container-engine/cri-dockerd/handlers/main.yml

@ -1,35 +1,31 @@
---
- name: Restart and enable cri-dockerd
command: /bin/true
notify:
- Cri-dockerd | reload systemd
- Cri-dockerd | restart docker.service
- Cri-dockerd | reload cri-dockerd.socket
- Cri-dockerd | reload cri-dockerd.service
- Cri-dockerd | enable cri-dockerd service
- name: Cri-dockerd | reload systemd
systemd:
name: cri-dockerd
daemon_reload: true
masked: no
listen: Restart and enable cri-dockerd
- name: Cri-dockerd | restart docker.service
service:
name: docker.service
state: restarted
listen: Restart and enable cri-dockerd
- name: Cri-dockerd | reload cri-dockerd.socket
service:
name: cri-dockerd.socket
state: restarted
listen: Restart and enable cri-dockerd
- name: Cri-dockerd | reload cri-dockerd.service
service:
name: cri-dockerd.service
state: restarted
listen: Restart and enable cri-dockerd
- name: Cri-dockerd | enable cri-dockerd service
service:
name: cri-dockerd.service
enabled: yes
listen: Restart and enable cri-dockerd

8
roles/container-engine/cri-o/handlers/main.yml

@ -1,16 +1,12 @@
---
- name: Restart crio
command: /bin/true
notify:
- CRI-O | reload systemd
- CRI-O | reload crio
- name: CRI-O | reload systemd
systemd:
daemon_reload: true
listen: Restart crio
- name: CRI-O | reload crio
service:
name: crio
state: restarted
enabled: yes
listen: Restart crio

14
roles/container-engine/docker/handlers/main.yml

@ -1,28 +1,25 @@
---
- name: Restart docker
command: /bin/true
notify:
- Docker | reload systemd
- Docker | reload docker.socket
- Docker | reload docker
- Docker | wait for docker
- name: Docker | reload systemd
systemd:
name: docker
daemon_reload: true
masked: no
listen: Restart docker
- name: Docker | reload docker.socket
service:
name: docker.socket
state: restarted
when: ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] or is_fedora_coreos
listen: Restart docker
- name: Docker | reload docker
service:
name: docker
state: restarted
listen: Restart docker
- name: Docker | wait for docker
command: "{{ docker_bin_dir }}/docker images"
@ -30,3 +27,4 @@
retries: 20
delay: 1
until: docker_ready.rc == 0
listen: Restart docker

25
roles/etcd/handlers/backup.yml

@ -1,22 +1,14 @@
---
- name: Backup etcd data
command: /bin/true
notify:
- Refresh Time Fact
- Set Backup Directory
- Create Backup Directory
- Stat etcd v2 data directory
- Backup etcd v2 data
- Backup etcd v3 data
when: etcd_cluster_is_healthy.rc == 0
- name: Refresh Time Fact
setup:
filter: ansible_date_time
listen: Restart etcd
when: etcd_cluster_is_healthy.rc == 0
- name: Set Backup Directory
set_fact:
etcd_backup_directory: "{{ etcd_backup_prefix }}/etcd-{{ ansible_date_time.date }}_{{ ansible_date_time.time }}"
listen: Restart etcd
- name: Create Backup Directory
file:
@ -25,6 +17,8 @@
owner: root
group: root
mode: 0600
listen: Restart etcd
when: etcd_cluster_is_healthy.rc == 0
- name: Stat etcd v2 data directory
stat:
@ -33,9 +27,13 @@
get_checksum: no
get_mime: no
register: etcd_data_dir_member
listen: Restart etcd
when: etcd_cluster_is_healthy.rc == 0
- name: Backup etcd v2 data
when: etcd_data_dir_member.stat.exists
when:
- etcd_data_dir_member.stat.exists
- etcd_cluster_is_healthy.rc == 0
command: >-
{{ bin_dir }}/etcdctl backup
--data-dir {{ etcd_data_dir }}
@ -46,6 +44,7 @@
register: backup_v2_command
until: backup_v2_command.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
listen: Restart etcd
- name: Backup etcd v3 data
command: >-
@ -61,3 +60,5 @@
register: etcd_backup_v3_command
until: etcd_backup_v3_command.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
listen: Restart etcd
when: etcd_cluster_is_healthy.rc == 0

8
roles/etcd/handlers/backup_cleanup.yml

@ -1,10 +1,4 @@
---
- name: Cleanup etcd backups
command: /bin/true
notify:
- Find old etcd backups
- Remove old etcd backups
- name: Find old etcd backups
ansible.builtin.find:
file_type: directory
@ -13,6 +7,7 @@
patterns: "etcd-*"
register: _etcd_backups
when: etcd_backup_retention_count >= 0
listen: Restart etcd
- name: Remove old etcd backups
ansible.builtin.file:
@ -20,3 +15,4 @@
path: "{{ item }}"
loop: "{{ (_etcd_backups.files | sort(attribute='ctime', reverse=True))[etcd_backup_retention_count:] | map(attribute='path') }}"
when: etcd_backup_retention_count >= 0
listen: Restart etcd

23
roles/etcd/handlers/main.yml

@ -1,38 +1,27 @@
---
- name: Restart etcd
command: /bin/true
notify:
- Backup etcd data
- Etcd | reload systemd
- Reload etcd
- Wait for etcd up
- Cleanup etcd backups
- name: Restart etcd-events
command: /bin/true
notify:
- Etcd | reload systemd
- Reload etcd-events
- Wait for etcd-events up
- name: Backup etcd
import_tasks: backup.yml
- name: Etcd | reload systemd
systemd:
daemon_reload: true
listen:
- Restart etcd
- Restart etcd-events
- name: Reload etcd
service:
name: etcd
state: restarted
when: is_etcd_master
listen: Restart etcd
- name: Reload etcd-events
service:
name: etcd-events
state: restarted
when: is_etcd_master
listen: Restart etcd-events
- name: Wait for etcd up
uri:
@ -44,6 +33,7 @@
until: result.status is defined and result.status == 200
retries: 60
delay: 1
listen: Restart etcd
- name: Cleanup etcd backups
import_tasks: backup_cleanup.yml
@ -58,6 +48,7 @@
until: result.status is defined and result.status == 200
retries: 60
delay: 1
listen: Restart etcd-events
- name: Set etcd_secret_changed
set_fact:

52
roles/kubernetes/control-plane/handlers/main.yml

@ -1,47 +1,14 @@
---
- name: Master | restart kubelet
command: /bin/true
notify:
- Master | reload systemd
- Master | reload kubelet
- Master | wait for master static pods
- name: Master | wait for master static pods
command: /bin/true
notify:
- Master | wait for the apiserver to be running
- Master | wait for kube-scheduler
- Master | wait for kube-controller-manager
- name: Master | Restart apiserver
command: /bin/true
notify:
- Master | Remove apiserver container docker
- Master | Remove apiserver container containerd/crio
- Master | wait for the apiserver to be running
- name: Master | Restart kube-scheduler
command: /bin/true
notify:
- Master | Remove scheduler container docker
- Master | Remove scheduler container containerd/crio
- Master | wait for kube-scheduler
- name: Master | Restart kube-controller-manager
command: /bin/true
notify:
- Master | Remove controller manager container docker
- Master | Remove controller manager container containerd/crio
- Master | wait for kube-controller-manager
- name: Master | reload systemd
systemd:
daemon_reload: true
listen: Master | restart kubelet
- name: Master | reload kubelet
service:
name: kubelet
state: restarted
listen: Master | restart kubelet
- name: Master | Remove apiserver container docker
shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
@ -52,6 +19,7 @@
until: remove_apiserver_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart apiserver
- name: Master | Remove apiserver container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@ -62,6 +30,7 @@
until: remove_apiserver_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart apiserver
- name: Master | Remove scheduler container docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@ -72,6 +41,7 @@
until: remove_scheduler_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart kube-scheduler
- name: Master | Remove scheduler container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@ -82,6 +52,7 @@
until: remove_scheduler_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart kube-scheduler
- name: Master | Remove controller manager container docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@ -92,6 +63,7 @@
until: remove_cm_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart kube-controller-manager
- name: Master | Remove controller manager container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@ -102,6 +74,7 @@
until: remove_cm_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart kube-controller-manager
- name: Master | wait for kube-scheduler
vars:
@ -113,6 +86,9 @@
until: scheduler_result.status == 200
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart kube-scheduler
- name: Master | wait for kube-controller-manager
vars:
@ -124,6 +100,9 @@
until: controller_manager_result.status == 200
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart kube-controller-manager
- name: Master | wait for the apiserver to be running
uri:
@ -133,3 +112,6 @@
until: result.status == 200
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart apiserver

8
roles/kubernetes/kubeadm/handlers/main.yml

@ -1,15 +1,11 @@
---
- name: Kubeadm | restart kubelet
command: /bin/true
notify:
- Kubeadm | reload systemd
- Kubeadm | reload kubelet
- name: Kubeadm | reload systemd
systemd:
daemon_reload: true
listen: Kubeadm | restart kubelet
- name: Kubeadm | reload kubelet
service:
name: kubelet
state: restarted
listen: Kubeadm | restart kubelet

8
roles/kubernetes/node/handlers/main.yml

@ -1,15 +1,11 @@
---
- name: Node | restart kubelet
command: /bin/true
notify:
- Kubelet | reload systemd
- Kubelet | restart kubelet
- name: Kubelet | reload systemd
systemd:
daemon_reload: true
listen: Node | restart kubelet
- name: Kubelet | restart kubelet
service:
name: kubelet
state: restarted
listen: Node | restart kubelet

40
roles/kubernetes/preinstall/handlers/main.yml

@ -1,38 +1,14 @@
---
- name: Preinstall | propagate resolvconf to k8s components
command: /bin/true
notify:
- Preinstall | reload kubelet
- Preinstall | kube-controller configured
- Preinstall | kube-apiserver configured
- Preinstall | restart kube-controller-manager docker
- Preinstall | restart kube-controller-manager crio/containerd
- Preinstall | restart kube-apiserver docker
- Preinstall | restart kube-apiserver crio/containerd
- Preinstall | wait for the apiserver to be running
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
- name: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
command: /bin/true
notify:
- Preinstall | apply resolvconf cloud-init
- Preinstall | reload kubelet
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Preinstall | apply resolvconf cloud-init
command: /usr/bin/coreos-cloudinit --from-file {{ resolveconf_cloud_init_conf }}
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Preinstall | update resolvconf for networkmanager
command: /bin/true
notify:
- Preinstall | reload NetworkManager
- Preinstall | reload kubelet
listen: Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
- name: Preinstall | reload NetworkManager
service:
name: NetworkManager.service
state: restarted
listen: Preinstall | update resolvconf for networkmanager
- name: Preinstall | reload kubelet
service:
@ -46,6 +22,10 @@
- Preinstall | restart kube-apiserver docker
- Preinstall | restart kube-apiserver crio/containerd
when: not dns_early | bool
listen:
- Preinstall | propagate resolvconf to k8s components
- Preinstall | update resolvconf for Flatcar Container Linux by Kinvolk
- Preinstall | update resolvconf for networkmanager
# FIXME(mattymo): Also restart for kubeadm mode
- name: Preinstall | kube-apiserver configured
@ -56,6 +36,7 @@
get_mime: no
register: kube_apiserver_set
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
# FIXME(mattymo): Also restart for kubeadm mode
- name: Preinstall | kube-controller configured
@ -66,6 +47,7 @@
get_mime: no
register: kube_controller_set
when: inventory_hostname in groups['kube_control_plane'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
listen: Preinstall | propagate resolvconf to k8s components
- name: Preinstall | restart kube-controller-manager docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@ -77,6 +59,7 @@
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
- name: Preinstall | restart kube-controller-manager crio/containerd
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@ -92,6 +75,7 @@
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_controller_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
- name: Preinstall | restart kube-apiserver docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -f name=k8s_POD_kube-apiserver* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@ -103,6 +87,7 @@
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_apiserver_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
- name: Preinstall | restart kube-apiserver crio/containerd
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
@ -118,6 +103,7 @@
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- kube_apiserver_set.stat.exists
listen: Preinstall | propagate resolvconf to k8s components
# When running this as the last phase ensure we wait for kube-apiserver to come up
- name: Preinstall | wait for the apiserver to be running
@ -133,6 +119,8 @@
- inventory_hostname in groups['kube_control_plane']
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and not is_fedora_coreos
listen: Preinstall | propagate resolvconf to k8s components
- name: Preinstall | Restart systemd-resolved
service:

20
roles/network_plugin/calico/handlers/main.yml

@ -1,16 +1,10 @@
---
- name: Reset_calico_cni
command: /bin/true
when: calico_cni_config is defined
notify:
- Delete 10-calico.conflist
- Calico | delete calico-node docker containers
- Calico | delete calico-node crio/containerd containers
- name: Delete 10-calico.conflist
file:
path: /etc/cni/net.d/10-calico.conflist
state: absent
listen: Reset_calico_cni
when: calico_cni_config is defined
- name: Calico | delete calico-node docker containers
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
@ -19,7 +13,10 @@
register: docker_calico_node_remove
until: docker_calico_node_remove is succeeded
retries: 5
when: container_manager in ["docker"]
when:
- container_manager in ["docker"]
- calico_cni_config is defined
listen: Reset_calico_cni
- name: Calico | delete calico-node crio/containerd containers
shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name calico-node-* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
@ -28,4 +25,7 @@
register: crictl_calico_node_remove
until: crictl_calico_node_remove is succeeded
retries: 5
when: container_manager in ["crio", "containerd"]
when:
- container_manager in ["crio", "containerd"]
- calico_cni_config is defined
listen: Reset_calico_cni

8
roles/network_plugin/kube-router/handlers/main.yml

@ -1,10 +1,4 @@
---
- name: Reset_kube_router
command: /bin/true
notify:
- Kube-router | delete kube-router docker containers
- Kube-router | delete kube-router crio/containerd containers
- name: Kube-router | delete kube-router docker containers
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_POD_kube-router* -q | xargs --no-run-if-empty docker rm -f"
args:
@ -13,6 +7,7 @@
until: docker_kube_router_remove is succeeded
retries: 5
when: container_manager in ["docker"]
listen: Reset_kube_router
- name: Kube-router | delete kube-router crio/containerd containers
shell: 'set -o pipefail && {{ bin_dir }}/crictl pods --name kube-router* -q | xargs -I% --no-run-if-empty bash -c "{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %"'
@ -22,3 +17,4 @@
until: crictl_kube_router_remove is succeeded
retries: 5
when: container_manager in ["crio", "containerd"]
listen: Reset_kube_router

7
roles/network_plugin/macvlan/handlers/main.yml

@ -1,10 +1,4 @@
---
- name: Macvlan | restart network
command: /bin/true
notify:
- Macvlan | reload network
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Macvlan | reload network
service:
# noqa: jinja[spacing]
@ -18,3 +12,4 @@
{%- endif %}
state: restarted
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"] and kube_network_plugin not in ['calico']
listen: Macvlan | restart network
Loading…
Cancel
Save