You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

154 lines
4.7 KiB

  1. #!/bin/bash
  2. set -euxo pipefail
  3. echo "CI_JOB_NAME is $CI_JOB_NAME"
  4. if [[ "$CI_JOB_NAME" =~ "upgrade" ]]; then
  5. if [ "${UPGRADE_TEST}" == "false" ]; then
  6. echo "Job name contains 'upgrade', but UPGRADE_TEST='false'"
  7. exit 1
  8. fi
  9. else
  10. if [ "${UPGRADE_TEST}" != "false" ]; then
  11. echo "UPGRADE_TEST!='false', but job names does not contain 'upgrade'"
  12. exit 1
  13. fi
  14. fi
  15. # Check out latest tag if testing upgrade
  16. if [ "${UPGRADE_TEST}" != "false" ]; then
  17. git fetch --all && git checkout "$KUBESPRAY_VERSION"
  18. # Checkout the current tests/ directory ; even when testing old version,
  19. # we want the up-to-date test setup/provisionning
  20. git checkout "${CI_COMMIT_SHA}" -- tests/
  21. fi
  22. export ANSIBLE_REMOTE_USER=$SSH_USER
  23. export ANSIBLE_BECOME=true
  24. export ANSIBLE_BECOME_USER=root
  25. export ANSIBLE_INVENTORY=${CI_PROJECT_DIR}/inventory/sample/
  26. make -C tests INVENTORY_DIR=${ANSIBLE_INVENTORY} create-${CI_PLATFORM} -s
  27. ansible-playbook tests/cloud_playbooks/wait-for-ssh.yml
  28. run_playbook () {
  29. playbook=$1
  30. shift
  31. # We can set --limit here and still pass it as supplemental args because `--limit` is a 'last one wins' option
  32. ansible-playbook \
  33. -e @tests/common_vars.yml \
  34. -e @tests/files/${CI_JOB_NAME}.yml \
  35. -e local_release_dir=${PWD}/downloads \
  36. "$@" \
  37. ${playbook}
  38. }
  39. # Create cluster
  40. run_playbook cluster.yml
  41. # Repeat deployment if testing upgrade
  42. if [ "${UPGRADE_TEST}" != "false" ]; then
  43. git checkout "${CI_COMMIT_SHA}"
  44. case "${UPGRADE_TEST}" in
  45. "basic")
  46. run_playbook cluster.yml
  47. ;;
  48. "graceful")
  49. run_playbook upgrade-cluster.yml
  50. ;;
  51. *)
  52. ;;
  53. esac
  54. fi
  55. # Test control plane recovery
  56. if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
  57. run_playbook reset.yml --limit "${RECOVER_CONTROL_PLANE_TEST_GROUPS}" -e reset_confirmation=yes
  58. run_playbook recover-control-plane.yml -e etcd_retries=10 --limit "etcd:kube_control_plane"
  59. fi
  60. # Test collection build and install by installing our collection, emptying our repository, adding
  61. # cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
  62. # running the same tests as before
  63. if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
  64. # Build and install collection
  65. ansible-galaxy collection build
  66. ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
  67. # Simply remove all of our files and directories except for our tests directory
  68. # to be absolutely certain that none of our playbooks or roles
  69. # are interfering with our collection
  70. find -maxdepth 1 ! -name tests -exec rm -rfv {} \;
  71. # Write cluster.yml
  72. cat > cluster.yml <<EOF
  73. - name: Install Kubernetes
  74. ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
  75. EOF
  76. # Write reset.yml
  77. cat > reset.yml <<EOF
  78. - name: Remove Kubernetes
  79. ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
  80. EOF
  81. # Write remove-node.yml
  82. cat > remove-node.yml <<EOF
  83. - name: Remove node from Kubernetes
  84. ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remove_node
  85. EOF
  86. fi
  87. # Tests Cases
  88. ## Test Control Plane API
  89. run_playbook tests/testcases/010_check-apiserver.yml
  90. run_playbook tests/testcases/015_check-nodes-ready.yml
  91. ## Test that all nodes are Ready
  92. if [[ ! ( "$CI_JOB_NAME" =~ "macvlan" ) ]]; then
  93. run_playbook tests/testcases/020_check-pods-running.yml
  94. run_playbook tests/testcases/030_check-network.yml
  95. if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
  96. # TODO: We need to remove this condition by finding alternative container
  97. # image instead of netchecker which doesn't work at hardening environments.
  98. run_playbook tests/testcases/040_check-network-adv.yml
  99. fi
  100. fi
  101. ## Kubernetes conformance tests
  102. run_playbook tests/testcases/100_check-k8s-conformance.yml
  103. if [ "${IDEMPOT_CHECK}" = "true" ]; then
  104. ## Idempotency checks 1/5 (repeat deployment)
  105. run_playbook cluster.yml
  106. ## Idempotency checks 2/5 (Advanced DNS checks)
  107. if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
  108. run_playbook tests/testcases/040_check-network-adv.yml
  109. fi
  110. if [ "${RESET_CHECK}" = "true" ]; then
  111. ## Idempotency checks 3/5 (reset deployment)
  112. run_playbook reset.yml -e reset_confirmation=yes
  113. ## Idempotency checks 4/5 (redeploy after reset)
  114. run_playbook cluster.yml
  115. ## Idempotency checks 5/5 (Advanced DNS checks)
  116. if [[ ! ( "$CI_JOB_NAME" =~ "hardening" ) ]]; then
  117. run_playbook tests/testcases/040_check-network-adv.yml
  118. fi
  119. fi
  120. fi
  121. # Test node removal procedure
  122. if [ "${REMOVE_NODE_CHECK}" = "true" ]; then
  123. run_playbook remove-node.yml -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME}
  124. fi
  125. # Clean up at the end, this is to allow stage1 tests to include cleanup test
  126. if [ "${RESET_CHECK}" = "true" ]; then
  127. run_playbook reset.yml -e reset_confirmation=yes
  128. fi