Browse Source

Fold kubernetes-apps/network_plugin into network_plugin (#12506)

For what I can see, there is no reason for the split, and it makes
things confusing.
pull/12512/head
Max Gautier 1 month ago
committed by GitHub
parent
commit
84504d156f
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
12 changed files with 66 additions and 97 deletions
  1. 1
      playbooks/cluster.yml
  2. 1
      playbooks/upgrade_cluster.yml
  3. 2
      roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
  4. 17
      roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
  5. 9
      roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml
  6. 23
      roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml
  7. 26
      roles/kubernetes-apps/network_plugin/meta/main.yml
  8. 18
      roles/kubernetes-apps/network_plugin/multus/tasks/main.yml
  9. 17
      roles/network_plugin/flannel/tasks/main.yml
  10. 9
      roles/network_plugin/kube-ovn/tasks/main.yml
  11. 22
      roles/network_plugin/kube-router/tasks/main.yml
  12. 18
      roles/network_plugin/multus/tasks/main.yml

1
playbooks/cluster.yml

@ -81,7 +81,6 @@
roles: roles:
- { role: kubespray_defaults } - { role: kubespray_defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller } - { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller } - { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner } - { role: kubernetes-apps/external_provisioner, tags: external-provisioner }

1
playbooks/upgrade_cluster.yml

@ -73,7 +73,6 @@
- { role: kubespray_defaults } - { role: kubespray_defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } - { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller } - { role: kubernetes-apps/policy_controller, tags: policy-controller }
- name: Finally handle worker upgrades, based on given batch size - name: Finally handle worker upgrades, based on given batch size

2
roles/kubernetes-apps/network_plugin/calico/tasks/main.yml

@ -1,2 +0,0 @@
---
# TODO: Handle Calico etcd -> kdd migration

17
roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml

@ -1,17 +0,0 @@
---
- name: Flannel | Start Resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ flannel_node_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:
path: /run/flannel/subnet.env
delay: 5
timeout: 600

9
roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml

@ -1,9 +0,0 @@
---
- name: Kube-OVN | Start Resources
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ kube_ovn_node_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

23
roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml

@ -1,23 +0,0 @@
---
- name: Kube-router | Start Resources
kube:
name: "kube-router"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/kube-router.yml"
resource: "ds"
namespace: "kube-system"
state: "latest"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
- name: Kube-router | Wait for kube-router pods to be ready
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1
retries: 30
delay: 10
ignore_errors: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
changed_when: false

26
roles/kubernetes-apps/network_plugin/meta/main.yml

@ -1,26 +0,0 @@
---
dependencies:
- role: kubernetes-apps/network_plugin/calico
when: kube_network_plugin == 'calico'
tags:
- calico
- role: kubernetes-apps/network_plugin/flannel
when: kube_network_plugin == 'flannel'
tags:
- flannel
- role: kubernetes-apps/network_plugin/kube-ovn
when: kube_network_plugin == 'kube-ovn'
tags:
- kube-ovn
- role: kubernetes-apps/network_plugin/kube-router
when: kube_network_plugin == 'kube-router'
tags:
- kube-router
- role: kubernetes-apps/network_plugin/multus
when: kube_network_plugin_multus
tags:
- multus

18
roles/kubernetes-apps/network_plugin/multus/tasks/main.yml

@ -1,18 +0,0 @@
---
- name: Multus | Start resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
with_items: "{{ (multus_manifest_1.results | default([])) + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | map('default', []) | list | json_query('[].results')) }}"
loop_control:
label: "{{ item.item.name if item != None else 'skipped' }}"
vars:
multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}"
when:
- not item is skipped

17
roles/network_plugin/flannel/tasks/main.yml

@ -19,3 +19,20 @@
register: flannel_node_manifests register: flannel_node_manifests
when: when:
- inventory_hostname == groups['kube_control_plane'][0] - inventory_hostname == groups['kube_control_plane'][0]
- name: Flannel | Start Resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ flannel_node_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:
path: /run/flannel/subnet.env
delay: 5
timeout: 600

9
roles/network_plugin/kube-ovn/tasks/main.yml

@ -15,3 +15,12 @@
- {name: ovn, file: cni-ovn.yml} - {name: ovn, file: cni-ovn.yml}
- {name: kube-ovn, file: cni-kube-ovn.yml} - {name: kube-ovn, file: cni-kube-ovn.yml}
register: kube_ovn_node_manifests register: kube_ovn_node_manifests
- name: Kube-OVN | Start Resources
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ kube_ovn_node_manifests.results }}"
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

22
roles/network_plugin/kube-router/tasks/main.yml

@ -60,3 +60,25 @@
mode: "0644" mode: "0644"
delegate_to: "{{ groups['kube_control_plane'] | first }}" delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true run_once: true
- name: Kube-router | Start Resources
kube:
name: "kube-router"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/kube-router.yml"
resource: "ds"
namespace: "kube-system"
state: "latest"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
- name: Kube-router | Wait for kube-router pods to be ready
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1
retries: 30
delay: 10
ignore_errors: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
changed_when: false

18
roles/network_plugin/multus/tasks/main.yml

@ -34,3 +34,21 @@
- item.engine in container_manager_types - item.engine in container_manager_types
- hostvars[inventory_hostname].container_manager == item.engine - hostvars[inventory_hostname].container_manager == item.engine
- inventory_hostname == vars_from_node - inventory_hostname == vars_from_node
- name: Multus | Start resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
with_items: "{{ (multus_manifest_1.results | default([])) + (multus_nodes_list | map('extract', hostvars, 'multus_manifest_2') | map('default', []) | list | json_query('[].results')) }}"
loop_control:
label: "{{ item.item.name if item != None else 'skipped' }}"
vars:
multus_nodes_list: "{{ groups['k8s_cluster'] if ansible_play_batch | length == ansible_play_hosts_all | length else ansible_play_batch }}"
when:
- not item is skipped
Loading…
Cancel
Save