@ -47,21 +47,6 @@ if [[ "$CI_JOB_NAME" =~ "ubuntu" ]]; then
CI_TEST_ADDITIONAL_VARS = "-e ansible_python_interpreter=/usr/bin/python3"
fi
ENABLE_020_TEST = "true"
ENABLE_030_TEST = "true"
ENABLE_040_TEST = "true"
if [ [ " $CI_JOB_NAME " = ~ "macvlan" ] ] ; then
ENABLE_020_TEST = "false"
ENABLE_030_TEST = "false"
ENABLE_040_TEST = "false"
fi
if [ [ " $CI_JOB_NAME " = ~ "hardening" ] ] ; then
# TODO: We need to remove this condition by finding alternative container
# image instead of netchecker which doesn't work at hardening environments.
ENABLE_040_TEST = "false"
fi
# Check out latest tag if testing upgrade
test " ${ UPGRADE_TEST } " != "false" && git fetch --all && git checkout " $KUBESPRAY_VERSION "
# Checkout the CI vars file so it is available
@ -69,21 +54,41 @@ test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" tests/files
test " ${ UPGRADE_TEST } " != "false" && git checkout " ${ CI_COMMIT_SHA } " ${ CI_TEST_REGISTRY_MIRROR }
test " ${ UPGRADE_TEST } " != "false" && git checkout " ${ CI_COMMIT_SHA } " ${ CI_TEST_SETTING }
run_playbook ( ) {
playbook = $1
shift
# We can set --limit here and still pass it as supplemental args because `--limit` is a 'last one wins' option
ansible-playbook --limit "all:!fake_hosts" \
$ANSIBLE_LOG_LEVEL \
-e @${ CI_TEST_SETTING } \
-e @${ CI_TEST_REGISTRY_MIRROR } \
-e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } \
-e local_release_dir = ${ PWD } /downloads \
" $@ " \
${ playbook }
}
# Create cluster
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } -e local_release_dir = ${ PWD } /downloads --limit "all:!fake_hosts" cluster.yml
run_playbook cluster.yml
# Repeat deployment if testing upgrade
if [ " ${ UPGRADE_TEST } " != "false" ] ; then
test " ${ UPGRADE_TEST } " = = "basic" && PLAYBOOK = "cluster.yml"
test " ${ UPGRADE_TEST } " = = "graceful" && PLAYBOOK = "upgrade-cluster.yml"
git checkout " ${ CI_COMMIT_SHA } "
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } -e local_release_dir = ${ PWD } /downloads --limit "all:!fake_hosts" $PLAYBOOK
fi
case " ${ UPGRADE_TEST } " in
"basic" )
run_playbook cluster.yml
; ;
"graceful" )
run_playbook upgrade-cluster.yml
; ;
*)
; ;
esac
# Test control plane recovery
if [ " ${ RECOVER_CONTROL_PLANE_TEST } " != "false" ] ; then
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } -e local_release_dir = ${ PWD } /downloads --limit " ${ RECOVER_CONTROL_PLANE_TEST_GROUPS } :!fake_hosts " -e reset_confirmation = yes reset.yml
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } -e local_release_dir = ${ PWD } /downloads -e etcd_retries = 10 --limit "etcd:kube_control_plane:!fake_hosts" recover-control-plane.yml
run_playbook reset.yml --limit " ${ RECOVER_CONTROL_PLANE_TEST_GROUPS } :!fake_hosts " -e reset_confirmation = yes
run_playbook recover-control-plane.yml -e etcd_retries = 10 --limit "etcd:kube_control_plane:!fake_hosts"
fi
# Test collection build and install by installing our collection, emptying our repository, adding
@ -114,61 +119,59 @@ EOF
# Write remove-node.yml
cat > remove-node.yml <<EOF
- name: Remove node from Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remote- node
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remove_ node
EOF
fi
# Tests Cases
## Test Master API
ansible-playbook --limit "all:!fake_hosts" -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
run_playbook tests/testcases/010_check-apiserver.yml
run_playbook tests/testcases/015_check-nodes-ready.yml
## Test that all nodes are Ready
ansible-playbook --limit "all:!fake_hosts" -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } tests/testcases/015_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL
## Test that all pods are Running
if [ " ${ ENABLE_020_TEST } " = "true" ] ; then
ansible-playbook --limit "all:!fake_hosts" -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } tests/testcases/020_check-pods-running.yml $ANSIBLE_LOG_LEVEL
fi
## Test pod creation and ping between them
if [ " ${ ENABLE_030_TEST } " = "true" ] ; then
ansible-playbook --limit "all:!fake_hosts" -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
fi
## Advanced DNS checks
if [ " ${ ENABLE_040_TEST } " = "true" ] ; then
ansible-playbook --limit "all:!fake_hosts" -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
if [ [ ! ( " $CI_JOB_NAME " = ~ "macvlan" ) ] ] ; then
run_playbook tests/testcases/020_check-pods-running.yml
run_playbook tests/testcases/030_check-network.yml
if [ [ ! ( " $CI_JOB_NAME " = ~ "hardening" ) ] ] ; then
# TODO: We need to remove this condition by finding alternative container
# image instead of netchecker which doesn't work at hardening environments.
run_playbook tests/testcases/040_check-network-adv.yml
fi
fi
## Kubernetes conformance tests
ansible-playbook -i ${ ANSIBLE_INVENTORY } -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL
run_playbook tests/testcases/100_check-k8s-conformance.yml
if [ " ${ IDEMPOT_CHECK } " = "true" ] ; then
## Idempotency checks 1/5 (repeat deployment)
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } ${ CI_TEST_ADDITIONAL_VARS } -e @${ CI_TEST_VARS } -e local_release_dir = ${ PWD } /downloads --limit "all:!fake_hosts" cluster.yml
run_playbook cluster.yml
## Idempotency checks 2/5 (Advanced DNS checks)
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
if [ [ ! ( " $CI_JOB_NAME " = ~ "hardening" ) ] ] ; then
run_playbook tests/testcases/040_check-network-adv.yml
fi
if [ " ${ RESET_CHECK } " = "true" ] ; then
## Idempotency checks 3/5 (reset deployment)
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } -e reset_confirmation = yes --limit "all:!fake_hosts" reset.yml
run_playbook reset.yml -e reset_confirmation = yes
## Idempotency checks 4/5 (redeploy after reset)
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } -e local_release_dir = ${ PWD } /downloads --limit "all:!fake_hosts" cluster.yml
run_playbook cluster.yml
## Idempotency checks 5/5 (Advanced DNS checks)
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
if [ [ ! ( " $CI_JOB_NAME " = ~ "hardening" ) ] ] ; then
run_playbook tests/testcases/040_check-network-adv.yml
fi
fi
fi
# Test node removal procedure
if [ " ${ REMOVE_NODE_CHECK } " = "true" ] ; then
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } -e skip_confirmation = yes -e node = ${ REMOVE_NODE_NAME } --limit "all:!fake_hosts" remove-node.yml
run_playbook remove-node.yml -e skip_confirmation = yes -e node = ${ REMOVE_NODE_NAME }
fi
# Clean up at the end, this is to allow stage1 tests to include cleanup test
if [ " ${ RESET_CHECK } " = "true" ] ; then
ansible-playbook ${ ANSIBLE_LOG_LEVEL } -e @${ CI_TEST_SETTING } -e @${ CI_TEST_REGISTRY_MIRROR } -e @${ CI_TEST_VARS } ${ CI_TEST_ADDITIONAL_VARS } -e reset_confirmation = yes --limit "all:!fake_hosts" reset.yml
run_playbook reset.yml -e reset_confirmation = yes
fi