Browse Source

CI: use kubevirt.core dynamic inventory

This allows a single source of truth for the virtual machines in a
kubevirt ci-run.

`etcd_member_name` should be correctly handled in kubespray-defaults for
testing the recover cases.
pull/11530/head
Max Gautier 2 months ago
parent
commit
329ffd45f0
Failed to extract signature
6 changed files with 56 additions and 132 deletions
  1. 2
      pipeline.Dockerfile
  2. 31
      tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
  3. 16
      tests/cloud_playbooks/roles/packet-ci/templates/inv.kubevirt.yml.j2
  4. 98
      tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
  5. 3
      tests/cloud_playbooks/roles/packet-ci/templates/vm.yml.j2
  6. 38
      tests/cloud_playbooks/roles/packet-ci/vars/main.yml

2
pipeline.Dockerfile

@ -60,4 +60,4 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
&& vagrant plugin install vagrant-libvirt \
# Install Kubernetes collections
&& pip install --no-compile --no-cache-dir kubernetes \
&& ansible-galaxy collection install kubernetes.core
&& ansible-galaxy collection install kubernetes.core kubevirt.core

31
tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml

@ -1,24 +1,11 @@
---
- name: "Create temp dir /tmp/{{ test_name }} for CI files"
file:
path: "/tmp/{{ test_name }}"
state: directory
mode: "0755"
- name: Template vm files for CI job
set_fact:
vms_files: "{{ vms_files + [lookup('ansible.builtin.template', 'vm.yml.j2') | from_yaml] }}"
vars:
vms_files: []
loop: "{{ range(1, vm_count | int + 1, 1) | list }}"
loop_control:
index_var: vm_id
- name: Start vms for CI job
vars:
tvars:
kubespray_groups: "{{ item }}"
kubernetes.core.k8s:
definition: "{{ item }}"
changed_when: false
loop: "{{ vms_files }}"
definition: "{{ lookup('template', 'vm.yml.j2', template_vars=tvars) }}"
loop: "{{ scenarios[mode | d('default')] }}"
- name: Wait for vms to have IP addresses
kubernetes.core.k8s_info:
@ -34,10 +21,8 @@
retries: 30
delay: 10
- name: "Create inventory for CI test in file /tmp/{{ test_name }}/inventory"
- name: "Create inventory for CI tests"
template:
src: "inventory.j2"
dest: "{{ inventory_path }}"
src: "inv.kubevirt.yml.j2"
dest: "{{ inventory_path }}/inv.kubevirt.yml"
mode: "0644"
vars:
vms: "{{ vm_ips }}"

16
tests/cloud_playbooks/roles/packet-ci/templates/inv.kubevirt.yml.j2

@ -0,0 +1,16 @@
plugin: kubevirt.core.kubevirt
namespaces:
- {{ pod_namespace }}
label_selector: ci_job_id={{ ci_job_id }}
create_groups: true
compose:
ci_groups: |
group_names |
select('ansible.builtin.match', 'label_kubespray_io*') |
map('regex_replace', 'label_kubespray_io_(.*)_true', '\1')
use_service: false
host_format: "{name}"
keyed_groups:
- key: ci_groups
prefix: ""
separator: ""

98
tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2

@ -1,98 +0,0 @@
[all]
{% for instance in vms.results %}
instance-{{ loop.index }} ansible_host={{instance.stdout}}
{% endfor %}
{% if mode == "separate" %}
[kube_control_plane]
instance-1
[kube_node]
instance-2
[etcd]
instance-3
{% elif mode == "ha" %}
[kube_control_plane]
instance-1
instance-2
[kube_node]
instance-3
[etcd]
instance-1
instance-2
instance-3
{% elif mode == "default" %}
[kube_control_plane]
instance-1
[kube_node]
instance-2
[etcd]
instance-1
{% elif mode == "all-in-one" %}
[kube_control_plane]
instance-1
[kube_node]
instance-1
[etcd]
instance-1
{% elif mode == "ha-recover" %}
[kube_control_plane]
instance-1
instance-2
[kube_node]
instance-3
[etcd]
instance-3
instance-1
instance-2
[broken_kube_control_plane]
instance-2
[broken_etcd]
instance-2 etcd_member_name=etcd3
{% elif mode == "ha-recover-noquorum" %}
[kube_control_plane]
instance-3
instance-1
instance-2
[kube_node]
instance-3
[etcd]
instance-3
instance-1
instance-2
[broken_kube_control_plane]
instance-1
instance-2
[broken_etcd]
instance-1 etcd_member_name=etcd2
instance-2 etcd_member_name=etcd3
{% elif mode == "node-etcd-client" %}
[kube_control_plane]
instance-1
[etcd]
instance-1
instance-2
instance-3
[kube_node]
instance-1
instance-2
instance-3
instance-4
{% endif %}

3
tests/cloud_playbooks/roles/packet-ci/templates/vm.yml.j2

@ -12,6 +12,9 @@ metadata:
kubevirt.io/domain: "{{ test_name }}"
ci_job_id: "{{ ci_job_id }}"
ci_job_name: "{{ ci_job_name }}"
{% for group in kubespray_groups -%}
kubespray.io/{{ group }}: "true"
{% endfor -%}
# leverage the Kubernetes GC for resources cleanup
ownerReferences:
- apiVersion: v1

38
tests/cloud_playbooks/roles/packet-ci/vars/main.yml

@ -1,14 +1,32 @@
---
_vm_count_dict:
separate: 3
ha: 3
ha-recover: 3
ha-recover-noquorum: 3
all-in-one: 1
node-etcd-client: 4
default: 2
vm_count: "{{ _vm_count_dict[mode | d('default')] }}"
# This is a list of nodes with groups for each scenario/cluster layouts
scenarios:
separate:
- ['kube_control_plane']
- ['kube_node']
- ['etcd']
ha:
- ['kube_control_plane', 'etcd']
- ['kube_control_plane', 'etcd']
- ['kube_node', 'etcd']
default:
- ['kube_control_plane', 'etcd']
- ['kube_node']
all-in-one:
- ['kube_control_plane', 'etcd', 'kube_node']
ha-recover:
- ['kube_control_plane', 'etcd']
- ['kube_control_plane', 'etcd', 'broken_kube_control_plane', 'broken_etcd']
- ['kube_node', 'etcd']
ha-recover-noquorum:
- ['kube_control_plane', 'etcd', 'broken_kube_control_plane', 'broken_etcd']
- ['kube_control_plane', 'etcd', 'broken_kube_control_plane', 'broken_etcd']
- ['kube_node', 'etcd']
node-etcd-client:
- ['kube_node', 'kube_control_plane', 'etcd']
- ['kube_node', 'etcd']
- ['kube_node', 'etcd']
- ['kube_node']
# Get pod metadata / CI vars from environment

Loading…
Cancel
Save