Browse Source

Merge pull request #12295 from VannTen/ci/collection

CI: Simplify running playbooks as collection + various CI Fixes
pull/12285/head
Kubernetes Prow Robot 3 months ago
committed by GitHub
parent
commit
d4cb5da017
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
8 changed files with 392 additions and 407 deletions
  1. 76
      tests/scripts/testcases_run.sh
  2. 33
      tests/testcases/010_check-apiserver.yml
  3. 34
      tests/testcases/015_check-nodes-ready.yml
  4. 48
      tests/testcases/020_check-pods-running.yml
  5. 221
      tests/testcases/030_check-network.yml
  6. 283
      tests/testcases/040_check-network-adv.yml
  7. 50
      tests/testcases/100_check-k8s-conformance.yml
  8. 54
      tests/testcases/tests.yml

76
tests/scripts/testcases_run.sh

@ -30,38 +30,16 @@ export ANSIBLE_BECOME_USER=root
if [[ "${TESTCASE}" =~ "collection" ]]; then
# Build and install collection
ansible-galaxy collection build
ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
# Simply remove all of our files and directories except for our tests directory
# to be absolutely certain that none of our playbooks or roles
# are interfering with our collection
find -mindepth 1 -maxdepth 1 ! -regex './\(tests\|inventory\)' -exec rm -rfv {} +
cat > cluster.yml <<EOF
- name: Install Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
EOF
cat > upgrade-cluster.yml <<EOF
- name: Install Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.upgrade-cluster
EOF
cat > reset.yml <<EOF
- name: Remove Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
EOF
cat > remove-node.yml <<EOF
- name: Remove node from Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remove_node
EOF
ansible-galaxy collection install kubernetes_sigs-kubespray-*.tar.gz
fi
run_playbook () {
playbook=$1
if [[ "${TESTCASE}" =~ "collection" ]]; then
playbook=kubernetes_sigs.kubespray.$1
else
playbook=$1.yml
fi
shift
ansible-playbook \
-e @tests/common_vars.yml \
-e @tests/${TESTCASE_FILE} \
@ -71,11 +49,10 @@ ansible-playbook \
}
## START KUBESPRAY
# Create cluster
run_playbook cluster.yml
run_playbook cluster
# Repeat deployment if testing upgrade
if [ "${UPGRADE_TEST}" != "false" ]; then
@ -85,10 +62,10 @@ if [ "${UPGRADE_TEST}" != "false" ]; then
case "${UPGRADE_TEST}" in
"basic")
run_playbook cluster.yml
run_playbook cluster
;;
"graceful")
run_playbook upgrade-cluster.yml
run_playbook upgrade-cluster
;;
*)
;;
@ -97,36 +74,23 @@ fi
# Test control plane recovery
if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
run_playbook reset.yml --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}" -e reset_confirmation=yes
run_playbook recover-control-plane.yml -e etcd_retries=10 --limit "etcd:kube_control_plane"
fi
# Tests Cases
## Test Control Plane API
run_playbook tests/testcases/010_check-apiserver.yml
run_playbook tests/testcases/015_check-nodes-ready.yml
## Test that all nodes are Ready
if [[ ! ( "$TESTCASE" =~ "macvlan" ) ]]; then
run_playbook tests/testcases/020_check-pods-running.yml
run_playbook tests/testcases/030_check-network.yml
if [[ ! ( "$TESTCASE" =~ "hardening" ) ]]; then
# TODO: We need to remove this condition by finding alternative container
# image instead of netchecker which doesn't work at hardening environments.
run_playbook tests/testcases/040_check-network-adv.yml
fi
run_playbook reset --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}" -e reset_confirmation=yes
run_playbook recover-control-plane -e etcd_retries=10 --limit "etcd:kube_control_plane"
fi
## Kubernetes conformance tests
run_playbook tests/testcases/100_check-k8s-conformance.yml
# Run tests
ansible-playbook \
-e @tests/common_vars.yml \
-e @tests/${TESTCASE_FILE} \
-e local_release_dir=${PWD}/downloads \
tests/testcases/tests.yml
# Test node removal procedure
if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
run_playbook remove-node.yml -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME}
run_playbook remove-node -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME}
fi
# Clean up at the end, this is to allow stage1 tests to include cleanup test
if [ "${RESET_CHECK}" = "true" ]; then
run_playbook reset.yml -e reset_confirmation=yes
run_playbook reset -e reset_confirmation=yes
fi

33
tests/testcases/010_check-apiserver.yml

@ -1,21 +1,16 @@
---
- name: Testcases for apiserver
hosts: kube_control_plane
- name: Check the API servers are responding
uri:
url: "https://{{ (access_ip if (ipv4_stack | default(true)) else access_ip6) | default(ansible_default_ipv4.address if (ipv4_stack | default(true)) else ansible_default_ipv6.address) | ansible.utils.ipwrap }}:{{ kube_apiserver_port | default(6443) }}/version"
validate_certs: false
status_code: 200
register: apiserver_response
retries: 12
delay: 5
until: apiserver_response is success
tasks:
- name: Check the API servers are responding
uri:
url: "https://{{ (access_ip if (ipv4_stack | default(true)) else access_ip6) | default(ansible_default_ipv4.address if (ipv4_stack | default(true)) else ansible_default_ipv6.address) | ansible.utils.ipwrap }}:{{ kube_apiserver_port | default(6443) }}/version"
validate_certs: false
status_code: 200
register: apiserver_response
retries: 12
delay: 5
until: apiserver_response is success
- name: Check API servers version
assert:
that:
- apiserver_response.json.gitVersion == kube_version
fail_msg: "apiserver version different than expected {{ kube_version }}"
when: kube_version is defined
- name: Check API servers version
assert:
that:
- apiserver_response.json.gitVersion == ('v' + kube_version)
fail_msg: "apiserver is {{ apiserver_response.json.gitVersion }}, expected {{ kube_version }}"

34
tests/testcases/015_check-nodes-ready.yml

@ -1,22 +1,18 @@
---
- name: Testcases checking nodes
hosts: kube_control_plane[0]
tasks:
- import_role: # noqa name[missing]
name: cluster-dump
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get nodes"
changed_when: false
register: get_nodes
- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get nodes"
changed_when: false
register: get_nodes
- name: Check that all nodes are running and ready
command: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml"
changed_when: false
register: get_nodes_yaml
until:
# Check that all nodes are Status=Ready
- '(get_nodes_yaml.stdout | from_yaml)["items"] | map(attribute = "status.conditions") | map("items2dict", key_name="type", value_name="status") | map(attribute="Ready") | list | min'
retries: 30
delay: 10
- name: Check that all nodes are running and ready
command: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml"
changed_when: false
register: get_nodes_yaml
until:
# Check that all nodes are Status=Ready
- '(get_nodes_yaml.stdout | from_yaml)["items"] | map(attribute = "status.conditions") | map("items2dict", key_name="type", value_name="status") | map(attribute="Ready") | list | min'
retries: 30
delay: 10

48
tests/testcases/020_check-pods-running.yml

@ -1,27 +1,39 @@
---
- name: Testcases checking pods
hosts: kube_control_plane[0]
tasks:
- import_role: # noqa name[missing]
name: cluster-dump
- import_role: # noqa name[missing]
name: cluster-dump
- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
- name: Check that all pods are running and ready
command: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
changed_when: false
- name: Check pods
vars:
query_pods_not_running: "items[?status.phase != 'Running']"
query_pods_not_ready: "items[?(status.conditions[?type == 'Ready'])[0].status != 'True']"
pods_not_running: "{{ run_pods_log.stdout | from_json | json_query(query_pods_not_running + '.metadata') }}"
pods_not_ready: "{{ run_pods_log.stdout | from_json | json_query(query_pods_not_ready + '.metadata') }}"
block:
- name: Check that all pods are running
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -o json"
register: run_pods_log
changed_when: false
until:
# Check that all pods are running
- '(run_pods_log.stdout | from_yaml)["items"] | map(attribute = "status.phase") | unique | list == ["Running"]'
- run_pods_log.stdout | from_json | json_query(query_pods_not_running) == []
# Check that all pods are ready
- '(run_pods_log.stdout | from_yaml)["items"] | map(attribute = "status.containerStatuses") | map("map", attribute = "ready") | map("min") | min'
- run_pods_log.stdout | from_json | json_query(query_pods_not_ready) == []
retries: 30
delay: 10
- name: Check kubectl output
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
rescue:
- name: Describe broken pods
command: "{{ bin_dir }}/kubectl describe pod -n {{ item.namespace }} {{ item.name }}"
loop: "{{ pods_not_running + pods_not_ready }}"
loop_control:
label: "{{ item.namespace }}/{{ item.name }}"
- name: Get logs from broken pods
command: "{{ bin_dir }}/kubectl logs -n {{ item.namespace }} {{ item.pod }}"
loop: "{{ pods_not_running + pods_not_ready }}"
loop_control:
label: "{{ item.namespace }}/{{ item.name }}"
- name: Fail CI
fail: {}

221
tests/testcases/030_check-network.yml

@ -1,128 +1,119 @@
---
- name: Testcases for network
hosts: kube_control_plane[0]
- name: Check kubelet serving certificates approved with kubelet_csr_approver
when:
- kubelet_rotate_server_certificates | default(false)
- kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false))
vars:
test_image_repo: registry.k8s.io/e2e-test-images/agnhost
test_image_tag: "2.40"
# TODO: source those from kubespray_defaults instead.
# Needs kubespray_defaults to be decoupled from no-proxy stuff
kube_pods_subnet: "{{ 'fd85:ee78:d8a6:8607::1:0000/112' if not (ipv4_stack | default(true)) else '10.233.64.0/18' }}"
csrs: "{{ csr_json.stdout | from_json }}"
block:
tasks:
- name: Check kubelet serving certificates approved with kubelet_csr_approver
when:
- kubelet_rotate_server_certificates | default(false)
- kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false))
vars:
csrs: "{{ csr_json.stdout | from_json }}"
block:
- name: Get certificate signing requests
command: "{{ bin_dir }}/kubectl get csr -o jsonpath-as-json={.items[*]}"
register: csr_json
changed_when: false
- name: Check there are csrs
assert:
that: csrs | length > 0
fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found
- name: Check there are Denied/Pending csrs
assert:
that:
- csrs | rejectattr('status') | length == 0 # Pending == no status
- csrs | map(attribute='status.conditions') | flatten | selectattr('type', 'equalto', 'Denied') | length == 0 # Denied
fail_msg: kubelet_csr_approver is enabled but CSRs are not approved
- name: Get certificate signing requests
command: "{{ bin_dir }}/kubectl get csr -o jsonpath-as-json={.items[*]}"
register: csr_json
changed_when: false
- name: Approve kubelet serving certificates
when:
- kubelet_rotate_server_certificates | default(false)
- not (kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false)))
block:
- name: Check there are csrs
assert:
that: csrs | length > 0
fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found
- name: Get certificate signing requests
command: "{{ bin_dir }}/kubectl get csr -o name"
register: get_csr
changed_when: false
- name: Check there are Denied/Pending csrs
assert:
that:
- csrs | rejectattr('status') | length == 0 # Pending == no status
- csrs | map(attribute='status.conditions') | flatten | selectattr('type', 'equalto', 'Denied') | length == 0 # Denied
- name: Check there are csrs
assert:
that: get_csr.stdout_lines | length > 0
fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found
fail_msg: kubelet_csr_approver is enabled but CSRs are not approved
- name: Approve certificates
command: "{{ bin_dir }}/kubectl certificate approve {{ get_csr.stdout_lines | join(' ') }}"
register: certificate_approve
when: get_csr.stdout_lines | length > 0
changed_when: certificate_approve.stdout
- name: Approve kubelet serving certificates
when:
- kubelet_rotate_server_certificates | default(false)
- not (kubelet_csr_approver_enabled | default(kubelet_rotate_server_certificates | default(false)))
block:
- name: Create test namespace
command: "{{ bin_dir }}/kubectl create namespace test"
- name: Get certificate signing requests
command: "{{ bin_dir }}/kubectl get csr -o name"
register: get_csr
changed_when: false
- name: Run 2 agnhost pods in test ns
command:
cmd: "{{ bin_dir }}/kubectl apply --namespace test -f -"
stdin: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: agnhost
spec:
replicas: 2
selector:
matchLabels:
- name: Check there are csrs
assert:
that: get_csr.stdout_lines | length > 0
fail_msg: kubelet_rotate_server_certificates is {{ kubelet_rotate_server_certificates }} but no csr's found
- name: Approve certificates
command: "{{ bin_dir }}/kubectl certificate approve {{ get_csr.stdout_lines | join(' ') }}"
register: certificate_approve
when: get_csr.stdout_lines | length > 0
changed_when: certificate_approve.stdout
- name: Create test namespace
command: "{{ bin_dir }}/kubectl create namespace test"
changed_when: false
- name: Run 2 agnhost pods in test ns
command:
cmd: "{{ bin_dir }}/kubectl apply --namespace test -f -"
stdin: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: agnhost
spec:
replicas: 2
selector:
matchLabels:
app: agnhost
template:
metadata:
labels:
app: agnhost
template:
metadata:
labels:
app: agnhost
spec:
containers:
- name: agnhost
image: {{ test_image_repo }}:{{ test_image_tag }}
command: ['/agnhost', 'netexec', '--http-port=8080']
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
spec:
containers:
- name: agnhost
image: {{ test_image_repo }}:{{ test_image_tag }}
command: ['/agnhost', 'netexec', '--http-port=8080']
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ['ALL']
runAsUser: 1000
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
changed_when: false
- name: Check that all pods are running and ready
vars:
pods: "{{ (pods_json.stdout | from_json)['items'] }}"
block:
- name: Check Deployment is ready
command: "{{ bin_dir }}/kubectl rollout status deploy --namespace test agnhost --timeout=180s"
changed_when: false
- name: Get pod names
command: "{{ bin_dir }}/kubectl get pods -n test -o json"
changed_when: false
register: pods_json
- name: Check pods IP are in correct network
assert:
that: pods
| selectattr('status.phase', '==', 'Running')
| selectattr('status.podIP', 'ansible.utils.in_network', kube_pods_subnet)
| length == 2
- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0].metadata.name }} -- curl {{ item[1].status.podIP | ansible.utils.ipwrap}}:8080"
with_nested:
- "{{ pods }}"
- "{{ pods }}"
loop_control:
label: "{{ item[0].metadata.name + ' --> ' + item[1].metadata.name }}"
rescue:
- name: List pods cluster-wide
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
- name: Check that all pods are running and ready
vars:
pods: "{{ (pods_json.stdout | from_json)['items'] }}"
block:
- name: Check Deployment is ready
command: "{{ bin_dir }}/kubectl rollout status deploy --namespace test agnhost --timeout=180s"
changed_when: false
- name: Get pod names
command: "{{ bin_dir }}/kubectl get pods -n test -o json"
changed_when: false
register: pods_json
- name: Check pods IP are in correct network
assert:
that: pods
| selectattr('status.phase', '==', 'Running')
| selectattr('status.podIP', 'ansible.utils.in_network', kube_pods_subnet)
| length == 2
- name: Curl between pods is working
command: "{{ bin_dir }}/kubectl -n test exec {{ item[0].metadata.name }} -- curl {{ item[1].status.podIP | ansible.utils.ipwrap}}:8080"
with_nested:
- "{{ pods }}"
- "{{ pods }}"
rescue:
- name: List pods cluster-wide
command: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
changed_when: false
- import_role: # noqa name[missing]
name: cluster-dump
- fail: # noqa name[missing]
- import_role: # noqa name[missing]
name: cluster-dump
- fail: # noqa name[missing]

283
tests/testcases/040_check-network-adv.yml

@ -1,160 +1,149 @@
---
- name: Testcases for calico
hosts: kube_node
tasks:
- name: Test tunl0 routes
command: "/sbin/ip route"
register: routes
failed_when: routes.stdout_lines
| select('contains', '/' ~ calico_pool_blocksize|d(26))
| select('contains', 'tunl0') | length == 0
when:
- (calico_ipip_mode is defined and calico_ipip_mode != 'Never')
- kube_network_plugin | default('calico') == 'calico'
- name: Test tunl0 routes
command: "/sbin/ip route"
register: routes
failed_when: routes.stdout_lines
| select('contains', '/' ~ calico_pool_blocksize|d(26))
| select('contains', 'tunl0') | length == 0
when:
- ('kube_node' in group_names)
- (calico_ipip_mode is defined and calico_ipip_mode != 'Never')
- kube_network_plugin | default('calico') == 'calico'
- import_role: # noqa name[missing]
name: cluster-dump
- name: Advanced testcases for network
hosts: k8s_cluster
vars:
agent_report_interval: 10
netcheck_namespace: default
netchecker_port: 31081
- name: Wait for netchecker server
command: "{{ bin_dir }}/kubectl get pods --field-selector=status.phase==Running -o jsonpath-as-json={.items[*].metadata.name} --namespace {{ netcheck_namespace }}"
register: pods_json
until:
- pods_json.stdout | from_json | select('match', 'netchecker-server.*') | length == 1
- (pods_json.stdout | from_json | select('match', 'netchecker-agent.*') | length)
>= (groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2)
retries: 3
delay: 10
when: inventory_hostname == groups['kube_control_plane'][0]
tasks:
- import_role: # noqa name[missing]
name: cluster-dump
- name: Get netchecker pods
command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
with_items:
- netchecker-agent
- netchecker-agent-hostnet
when: not pods_json is success
- name: Wait for netchecker server
command: "{{ bin_dir }}/kubectl get pods --field-selector=status.phase==Running -o jsonpath-as-json={.items[*].metadata.name} --namespace {{ netcheck_namespace }}"
register: pods_json
- name: Perform netchecker tests
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
block:
- name: Get netchecker agents
uri:
url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/agents/"
return_content: true
headers:
Accept: application/json
register: agents
retries: 18
delay: "{{ agent_report_interval }}"
until:
- pods_json.stdout | from_json | select('match', 'netchecker-server.*') | length == 1
- (pods_json.stdout | from_json | select('match', 'netchecker-agent.*') | length)
>= (groups['k8s_cluster'] | intersect(ansible_play_hosts) | length * 2)
retries: 3
delay: 10
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Get netchecker pods
command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
with_items:
- netchecker-agent
- netchecker-agent-hostnet
when: not pods_json is success
- name: Perform netchecker tests
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
block:
- name: Get netchecker agents
uri:
url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/agents/"
return_content: true
headers:
Accept: application/json
register: agents
retries: 18
delay: "{{ agent_report_interval }}"
until:
- agents is success
- (agents.content | from_json | length) == (groups['k8s_cluster'] | length * 2)
- agents is success
- (agents.content | from_json | length) == (groups['k8s_cluster'] | length * 2)
- name: Check netchecker status
uri:
url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/connectivity_check"
return_content: true
headers:
Accept: application/json
register: connectivity_check
retries: 3
delay: "{{ agent_report_interval }}"
until:
- connectivity_check is success
- connectivity_check.content | from_json
- name: Check netchecker status
uri:
url: "http://{{ (ansible_default_ipv6.address if not (ipv4_stack | default(true)) else ansible_default_ipv4.address) | ansible.utils.ipwrap }}:{{ netchecker_port }}/api/v1/connectivity_check"
return_content: true
headers:
Accept: application/json
register: connectivity_check
retries: 3
delay: "{{ agent_report_interval }}"
until:
- connectivity_check is success
- connectivity_check.content | from_json
rescue:
- name: Get kube-proxy logs
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
rescue:
- name: Get kube-proxy logs
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
- name: Get logs from other apps
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
with_items:
- kube-router
- flannel
- canal-node
- calico-node
- cilium
- name: Get logs from other apps
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
with_items:
- kube-router
- flannel
- canal-node
- calico-node
- cilium
- name: Netchecker tests failed
fail:
msg: "netchecker tests failed"
- name: Netchecker tests failed
fail:
msg: "netchecker tests failed"
- name: Check connectivity with all netchecker agents
vars:
connectivity_check_result: "{{ connectivity_check.content | from_json }}"
agents_check_result: "{{ agents.content | from_json }}"
assert:
that:
- agents_check_result is defined
- connectivity_check_result is defined
- agents_check_result.keys() | length > 0
- not connectivity_check_result.Absent
- not connectivity_check_result.Outdated
msg: "Connectivity check to netchecker agents failed"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
- name: Check connectivity with all netchecker agents
vars:
connectivity_check_result: "{{ connectivity_check.content | from_json }}"
agents_check_result: "{{ agents.content | from_json }}"
assert:
that:
- agents_check_result is defined
- connectivity_check_result is defined
- agents_check_result.keys() | length > 0
- not connectivity_check_result.Absent
- not connectivity_check_result.Outdated
msg: "Connectivity check to netchecker agents failed"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
- name: Create macvlan network conf
command:
cmd: "{{ bin_dir }}/kubectl create -f -"
stdin: |
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.4.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.1.0/24",
"rangeStart": "192.168.1.200",
"rangeEnd": "192.168.1.216",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.1.1"
}
}'
---
apiVersion: v1
kind: Pod
metadata:
name: samplepod
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "sleep 2000000000000"]
image: dougbtv/centos-network
delegate_to: groups['kube_control_plane'][0]
run_once: true
when:
- kube_network_plugin_multus | default(false) | bool
- name: Create macvlan network conf
command:
cmd: "{{ bin_dir }}/kubectl create -f -"
stdin: |
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.4.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.1.0/24",
"rangeStart": "192.168.1.200",
"rangeEnd": "192.168.1.216",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.1.1"
}
}'
---
apiVersion: v1
kind: Pod
metadata:
name: samplepod
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "sleep 2000000000000"]
image: dougbtv/centos-network
delegate_to: groups['kube_control_plane'][0]
run_once: true
when:
- kube_network_plugin_multus | default(false) | bool
- name: Check secondary macvlan interface
command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
register: output
until: output.rc == 0
retries: 90
changed_when: false
delegate_to: groups['kube_control_plane'][0]
run_once: true
when:
- kube_network_plugin_multus | default(false) | bool
- name: Check secondary macvlan interface
command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
register: output
until: output.rc == 0
retries: 90
changed_when: false
delegate_to: groups['kube_control_plane'][0]
run_once: true
when:
- kube_network_plugin_multus | default(false) | bool

50
tests/testcases/100_check-k8s-conformance.yml

@ -1,38 +1,22 @@
---
- name: Testcases for kubernetes conformance
hosts: kube_control_plane[0]
vars:
sonobuoy_version: 0.56.11
sonobuoy_arch: amd64
sonobuoy_parallel: 30
sonobuoy_path: /usr/local/bin/sonobuoy
sonobuoy_mode: Quick
- name: Download sonobuoy
get_url:
url: "https://github.com/vmware-tanzu/sonobuoy/releases/download/v{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version }}_linux_{{ sonobuoy_arch }}.tar.gz"
dest: /tmp/sonobuoy.tar.gz
mode: "0644"
tasks:
- name: Run sonobuoy
when:
- sonobuoy_enabled is defined
- sonobuoy_enabled
block:
- name: Download sonobuoy
get_url:
url: "https://github.com/heptio/sonobuoy/releases/download/v{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version }}_linux_{{ sonobuoy_arch }}.tar.gz"
dest: /tmp/sonobuoy.tar.gz
mode: "0644"
- name: Extract sonobuoy
unarchive:
src: /tmp/sonobuoy.tar.gz
dest: /usr/local/bin/
copy: false
- name: Extract sonobuoy
unarchive:
src: /tmp/sonobuoy.tar.gz
dest: /usr/local/bin/
copy: false
- name: Run sonobuoy
command: "{{ sonobuoy_path }} run --mode {{ sonobuoy_mode }} --e2e-parallel {{ sonobuoy_parallel }} --wait"
- name: Run sonobuoy
command: "{{ sonobuoy_path }} run --mode {{ sonobuoy_mode }} --e2e-parallel {{ sonobuoy_parallel }} --wait"
when: sonobuoy_enabled | default(false)
- name: Run sonobuoy retrieve
command: "{{ sonobuoy_path }} retrieve"
register: sonobuoy_retrieve
- name: Run sonobuoy retrieve
command: "{{ sonobuoy_path }} retrieve"
register: sonobuoy_retrieve
- name: Run inspect results
command: "{{ sonobuoy_path }} results {{ sonobuoy_retrieve.stdout }} --plugin e2e --mode report"
- name: Run inspect results
command: "{{ sonobuoy_path }} results {{ sonobuoy_retrieve.stdout }} --plugin e2e --mode report"

54
tests/testcases/tests.yml

@ -0,0 +1,54 @@
---
- name: Define dynamic groups
import_playbook: ../../playbooks/boilerplate.yml
- name: Kubespray CI tests
hosts: k8s_cluster
gather_facts: false
vars:
testcase: "{{ lookup('env', 'TESTCASE') }}"
tasks:
- name: Import Kubespray variables
import_role:
name: ../../roles/kubespray_defaults
- name: Testcases for apiserver
import_tasks: 010_check-apiserver.yml
when:
- ('kube_control_plane') in group_names
- name: Test using API
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
block:
- name: Testcases checking nodes
import_tasks: 015_check-nodes-ready.yml
- name: Testcases checking pods
import_tasks: 020_check-pods-running.yml
when: ('macvlan' not in testcase)
- name: Testcases for network
import_tasks: 030_check-network.yml
when: ('macvlan' not in testcase)
vars:
test_image_repo: registry.k8s.io/e2e-test-images/agnhost
test_image_tag: "2.40"
- name: Testcases for calico / advanced network
import_tasks: 040_check-network-adv.yml
when:
- ('macvlan' not in testcase)
- ('hardening' not in testcase)
vars:
agent_report_interval: 10
netcheck_namespace: default
netchecker_port: 31081
- name: Testcases for kubernetes conformance
import_tasks: 100_check-k8s-conformance.yml
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
when:
- sonobuoy_enabled is defined
- sonobuoy_enabled
vars:
sonobuoy_version: 0.56.11
sonobuoy_arch: amd64
sonobuoy_parallel: 30
sonobuoy_path: /usr/local/bin/sonobuoy
sonobuoy_mode: Quick
Loading…
Cancel
Save