You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

162 lines
7.6 KiB

  1. #!/bin/bash
  2. set -euxo pipefail
  3. echo "CI_JOB_NAME is $CI_JOB_NAME"
  4. CI_TEST_ADDITIONAL_VARS=""
  5. if [[ "$CI_JOB_NAME" =~ "upgrade" ]]; then
  6. if [ "${UPGRADE_TEST}" == "false" ]; then
  7. echo "Job name contains 'upgrade', but UPGRADE_TEST='false'"
  8. exit 1
  9. fi
  10. else
  11. if [ "${UPGRADE_TEST}" != "false" ]; then
  12. echo "UPGRADE_TEST!='false', but job names does not contain 'upgrade'"
  13. exit 1
  14. fi
  15. fi
  16. # needed for ara not to complain
  17. export TZ=UTC
  18. export ANSIBLE_REMOTE_USER=$SSH_USER
  19. export ANSIBLE_BECOME=true
  20. export ANSIBLE_BECOME_USER=root
  21. export ANSIBLE_CALLBACK_PLUGINS="$(python -m ara.setup.callback_plugins)"
  22. cd tests && make create-${CI_PLATFORM} -s ; cd -
  23. ansible-playbook tests/cloud_playbooks/wait-for-ssh.yml
  24. # Flatcar Container Linux needs auto update disabled
  25. if [[ "$CI_JOB_NAME" =~ "coreos" ]]; then
  26. ansible all -m raw -a 'systemctl disable locksmithd'
  27. ansible all -m raw -a 'systemctl stop locksmithd'
  28. mkdir -p /opt/bin && ln -s /usr/bin/python /opt/bin/python
  29. fi
  30. if [[ "$CI_JOB_NAME" =~ "opensuse" ]]; then
  31. # OpenSUSE needs netconfig update to get correct resolv.conf
  32. # See https://goinggnu.wordpress.com/2013/10/14/how-to-fix-the-dns-in-opensuse-13-1/
  33. ansible all -m raw -a 'netconfig update -f'
  34. # Auto import repo keys
  35. ansible all -m raw -a 'zypper --gpg-auto-import-keys refresh'
  36. fi
  37. if [[ "$CI_JOB_NAME" =~ "ubuntu" ]]; then
  38. # We need to tell ansible that ubuntu hosts are python3 only
  39. CI_TEST_ADDITIONAL_VARS="-e ansible_python_interpreter=/usr/bin/python3"
  40. fi
  41. ENABLE_040_TEST="true"
  42. if [[ "$CI_JOB_NAME" =~ "hardening" ]]; then
  43. # TODO: We need to remove this condition by finding alternative container
  44. # image instead of netchecker which doesn't work at hardening environments.
  45. ENABLE_040_TEST="false"
  46. fi
  47. # Check out latest tag if testing upgrade
  48. test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
  49. # Checkout the CI vars file so it is available
  50. test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
  51. test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_REGISTRY_MIRROR}
  52. test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_SETTING}
  53. # Create cluster
  54. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
  55. # Repeat deployment if testing upgrade
  56. if [ "${UPGRADE_TEST}" != "false" ]; then
  57. test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"
  58. test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"
  59. git checkout "${CI_BUILD_REF}"
  60. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK
  61. fi
  62. # Test control plane recovery
  63. if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
  64. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}:!fake_hosts" -e reset_confirmation=yes reset.yml
  65. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml
  66. fi
  67. # Test collection build and install by installing our collection, emptying our repository, adding
  68. # cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
  69. # running the same tests as before
  70. if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
  71. # Build and install collection
  72. ansible-galaxy collection build
  73. ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
  74. # Simply remove all of our files and directories except for our tests directory
  75. # to be absolutely certain that none of our playbooks or roles
  76. # are interfering with our collection
  77. find -maxdepth 1 ! -name tests -exec rm -rfv {} \;
  78. # Write cluster.yml
  79. cat > cluster.yml <<EOF
  80. - name: Install Kubernetes
  81. ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
  82. EOF
  83. # Write reset.yml
  84. cat > reset.yml <<EOF
  85. - name: Remove Kubernetes
  86. ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
  87. EOF
  88. # Write remove-node.yml
  89. cat > remove-node.yml <<EOF
  90. - name: Remove node from Kubernetes
  91. ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remote-node
  92. EOF
  93. fi
  94. # Tests Cases
  95. ## Test Master API
  96. ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
  97. ## Test that all nodes are Ready
  98. ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/015_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL
  99. ## Test that all pods are Running
  100. ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/020_check-pods-running.yml $ANSIBLE_LOG_LEVEL
  101. ## Test pod creation and ping between them
  102. ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
  103. ## Advanced DNS checks
  104. if [ "${ENABLE_040_TEST}" = "true" ]; then
  105. ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
  106. fi
  107. ## Kubernetes conformance tests
  108. ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL
  109. if [ "${IDEMPOT_CHECK}" = "true" ]; then
  110. ## Idempotency checks 1/5 (repeat deployment)
  111. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} ${CI_TEST_ADDITIONAL_VARS} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
  112. ## Idempotency checks 2/5 (Advanced DNS checks)
  113. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
  114. if [ "${RESET_CHECK}" = "true" ]; then
  115. ## Idempotency checks 3/5 (reset deployment)
  116. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
  117. ## Idempotency checks 4/5 (redeploy after reset)
  118. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
  119. ## Idempotency checks 5/5 (Advanced DNS checks)
  120. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml
  121. fi
  122. fi
  123. # Test node removal procedure
  124. if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
  125. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME} --limit "all:!fake_hosts" remove-node.yml
  126. fi
  127. # Clean up at the end, this is to allow stage1 tests to include cleanup test
  128. if [ "${RESET_CHECK}" = "true" ]; then
  129. ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml
  130. fi