Browse Source

Adds support for Ansible collections (#9582)

pull/9932/head
Luke Simmons 1 year ago
committed by GitHub
parent
commit
acbf44a1b4
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1104 additions and 899 deletions
  1. 4
      .gitignore
  2. 24
      .gitlab-ci/lint.yml
  3. 5
      .gitlab-ci/vagrant.yml
  4. 4
      README.md
  5. 132
      cluster.yml
  6. 38
      docs/ansible_collection.md
  7. 47
      galaxy.yml
  8. 357
      library/kube.py
  9. 1
      library/kube.py
  10. 0
      playbooks/ansible_version.yml
  11. 131
      playbooks/cluster.yml
  12. 0
      playbooks/facts.yml
  13. 0
      playbooks/legacy_groups.yml
  14. 34
      playbooks/recover-control-plane.yml
  15. 50
      playbooks/remove-node.yml
  16. 39
      playbooks/reset.yml
  17. 124
      playbooks/scale.yml
  18. 170
      playbooks/upgrade-cluster.yml
  19. 357
      plugins/modules/kube.py
  20. 35
      recover-control-plane.yml
  21. 51
      remove-node.yml
  22. 40
      reset.yml
  23. 125
      scale.yml
  24. 1
      tests/ansible.cfg
  25. 9
      tests/files/vagrant_ubuntu20-flannel-collection.rb
  26. 3
      tests/files/vagrant_ubuntu20-flannel-collection.yml
  27. 18
      tests/scripts/check_galaxy_version.sh
  28. 33
      tests/scripts/testcases_run.sh
  29. 171
      upgrade-cluster.yml

4
.gitignore

@ -114,3 +114,7 @@ roles/**/molecule/**/__pycache__/
# Temp location used by our scripts
scripts/tmp/
tmp.md
# Ansible collection files
kubernetes_sigs-kubespray*tar.gz
ansible_collections

24
.gitlab-ci/lint.yml

@ -39,11 +39,28 @@ syntax-check:
ANSIBLE_VERBOSITY: "3"
script:
- ansible-playbook --syntax-check cluster.yml
- ansible-playbook --syntax-check playbooks/cluster.yml
- ansible-playbook --syntax-check upgrade-cluster.yml
- ansible-playbook --syntax-check playbooks/upgrade-cluster.yml
- ansible-playbook --syntax-check reset.yml
- ansible-playbook --syntax-check playbooks/reset.yml
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
except: ['triggers', 'master']
collection-build-install-sanity-check:
extends: .job
stage: unit-tests
tags: [light]
variables:
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
script:
- ansible-galaxy collection build
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
except: ['triggers', 'master']
tox-inventory-builder:
stage: unit-tests
tags: [light]
@ -75,6 +92,13 @@ check-readme-versions:
script:
- tests/scripts/check_readme_versions.sh
check-galaxy-version:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_galaxy_version.sh
check-typo:
stage: unit-tests
tags: [light]

5
.gitlab-ci/vagrant.yml

@ -45,6 +45,11 @@ vagrant_ubuntu20-flannel:
when: on_success
allow_failure: false
vagrant_ubuntu20-flannel-collection:
stage: deploy-part2
extends: .vagrant
when: on_success
vagrant_ubuntu16-kube-router-sep:
stage: deploy-part2
extends: .vagrant

4
README.md

@ -77,6 +77,10 @@ docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inve
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
```
#### Collection
See [here](docs/ansible_collection.md) if you wish to use this repository as an Ansible collection
### Vagrant
For Vagrant we need to install Python dependencies for provisioning tasks.

132
cluster.yml

@ -1,131 +1,3 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- hosts: k8s_cluster:etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/control-plane, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm}
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network }
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
- name: Install Kubernetes
ansible.builtin.import_playbook: playbooks/cluster.yml

38
docs/ansible_collection.md

@ -0,0 +1,38 @@
# Ansible collection
Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html).
## Requirements
- An inventory file with the appropriate host groups. See the [README](../README.md#usage).
- A `group_vars` directory. These group variables **need** to match the appropriate variable names under `inventory/local/group_vars`. See the [README](../README.md#usage).
## Usage
1. Add Kubespray to your requirements.yml file
```yaml
collections:
- name: https://github.com/kubernetes_sigs/kubespray
type: git
version: v2.21.0
```
2. Install your collection
```ShellSession
ansible-galaxy install -r requirements.yml
```
3. Create a playbook to install your Kubernetes cluster
```yaml
- name: Install Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
```
4. Update INVENTORY and PLAYBOOK so that they point to your inventory file and the playbook you created above, and then install Kubespray
```ShellSession
ansible-playbook -i INVENTORY --become --become-user=root PLAYBOOK
```

47
galaxy.yml

@ -0,0 +1,47 @@
---
namespace: kubernetes_sigs
description: Deploy a production ready Kubernetes cluster
name: kubespray
version: 2.21.0
readme: README.md
authors:
- luksi1
tags:
- kubernetes
- kubespray
repository: https://github.com/kubernetes-sigs/kubespray
build_ignore:
- .github
- '*.tar.gz'
- extra_playbooks
- inventory
- scripts
- test-infra
- .ansible-lint
- .editorconfig
- .gitignore
- .gitlab-ci
- .gitlab-ci.yml
- .gitmodules
- .markdownlint.yaml
- .nojekyll
- .pre-commit-config.yaml
- .yamllint
- Dockerfile
- FILES.json
- MANIFEST.json
- Makefile
- Vagrantfile
- _config.yml
- ansible.cfg
- requirements*txt
- setup.cfg
- setup.py
- index.html
- reset.yml
- cluster.yml
- scale.yml
- recover-control-plane.yml
- remove-node.yml
- upgrade-cluster.yml
- library

357
library/kube.py

@ -1,357 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file(s).
- To operate on several files this can accept a comma separated list of files or a list of files.
aliases: [ 'files', 'file', 'filenames' ]
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
wait:
required: false
default: false
description:
- A flag to indicate to wait for resources to be created before continuing to the next step
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating or updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
recursive:
required: false
default: false
description:
- Process the directory used in -f, --filename recursively.
Useful when you want to manage related manifests organized
within the same directory.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
- name: test nginx and postgresql are present
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
- name: test nginx and postgresql are present
kube:
files:
- /tmp/nginx.yml
- /tmp/postgresql.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.wait = module.params.get('wait')
self.name = module.params.get('name')
self.filename = [f.strip() for f in module.params.get('filename') or []]
self.resource = module.params.get('resource')
self.label = module.params.get('label')
self.recursive = module.params.get('recursive')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True, force=True):
if check and self.exists():
return []
cmd = ['apply']
if force:
cmd.append('--force')
if self.wait:
cmd.append('--wait')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def replace(self, force=True):
cmd = ['apply']
if force:
cmd.append('--force')
if self.wait:
cmd.append('--wait')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
return self._execute(cmd)
def exists(self):
cmd = ['get']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
cmd.append('--no-headers')
result = self._execute_nofail(cmd)
if not result:
return False
return True
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(type='list', aliases=['files', 'file', 'filenames']),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
wait=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']),
recursive=dict(default=False, type='bool'),
),
mutually_exclusive=[['filename', 'list']]
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create(check=False)
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
result = manager.replace()
elif state == 'exists':
result = manager.exists()
module.exit_json(changed=changed,
msg='%s' % result)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

1
library/kube.py

@ -0,0 +1 @@
../plugins/modules/kube.py

ansible_version.yml → playbooks/ansible_version.yml

131
playbooks/cluster.yml

@ -0,0 +1,131 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- hosts: k8s_cluster:etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/control-plane, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm}
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network }
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

facts.yml → playbooks/facts.yml

legacy_groups.yml → playbooks/legacy_groups.yml

34
playbooks/recover-control-plane.yml

@ -0,0 +1,34 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: etcd[0]
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- role: recover_control_plane/etcd
when: etcd_deployment_type != "kubeadm"
- hosts: kube_control_plane[0]
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: recover_control_plane/control-plane }
- import_playbook: cluster.yml
- hosts: kube_control_plane
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: recover_control_plane/post-recover }

50
playbooks/remove-node.yml

@ -0,0 +1,50 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
gather_facts: no
tasks:
- name: Confirm Execution
pause:
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
register: pause_result
run_once: True
when:
- not (skip_confirmation | default(false) | bool)
- name: Fail if user does not confirm deletion
fail:
msg: "Delete nodes confirmation failed"
when: pause_result.user_input | default('yes') != 'yes'
- name: Gather facts
import_playbook: facts.yml
when: reset_nodes|default(True)|bool
- hosts: "{{ node | default('kube_node') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
- { role: remove-node/pre-remove, tags: pre-remove }
- { role: remove-node/remove-etcd-node }
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
# Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
- { role: remove-node/post-remove, tags: post-remove }

39
playbooks/reset.yml

@ -0,0 +1,39 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- name: Gather facts
import_playbook: facts.yml
- hosts: etcd:k8s_cluster:calico_rr
gather_facts: False
vars_prompt:
name: "reset_confirmation"
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
default: "no"
private: no
pre_tasks:
- name: check confirmation
fail:
msg: "Reset confirmation failed"
when: reset_confirmation != "yes"
- name: Gather information about installed services
service_facts:
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true }
- { role: reset, tags: reset }

124
playbooks/scale.yml

@ -0,0 +1,124 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- name: Bootstrap any new workers
hosts: kube_node
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os }
- name: Gather facts
tags: always
import_playbook: facts.yml
- name: Generate the etcd certificates beforehand
hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- name: Upload control plane certs and retrieve encryption key
hosts: kube_control_plane | first
environment: "{{ proxy_disable_env }}"
gather_facts: False
tags: kubeadm
roles:
- { role: kubespray-defaults }
tasks:
- name: Upload control plane certificates
command: >-
{{ bin_dir }}/kubeadm init phase
--config {{ kube_config_dir }}/kubeadm-config.yaml
upload-certs
--upload-certs
environment: "{{ proxy_disable_env }}"
register: kubeadm_upload_cert
changed_when: false
- name: set fact 'kubeadm_certificate_key' for later use
set_fact:
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
when: kubeadm_certificate_key is not defined
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm }
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

170
playbooks/upgrade-cluster.yml

@ -0,0 +1,170 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd:calico_rr
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
- name: Prepare nodes for upgrade
hosts: k8s_cluster:etcd:calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: download, tags: download, when: "not skip_downloads" }
- name: Upgrade container engine on non-cluster nodes
hosts: etcd:calico_rr:!k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Handle upgrades to master components first to maintain backwards compat.
gather_facts: False
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: 1
roles:
- { role: kubespray-defaults }
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
- { role: kubernetes/client, tags: client }
- { role: kubernetes/node-label, tags: node-label }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: kubernetes-apps, tags: csi-driver }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
hosts: kube_control_plane:calico_rr:kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: network_plugin, tags: network }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- name: Finally handle worker upgrades, based on given batch size
hosts: kube_node:calico_rr:!kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults }
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/kubeadm, tags: kubeadm }
- { role: kubernetes/node-label, tags: node-label }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: true
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: network }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

357
plugins/modules/kube.py

@ -0,0 +1,357 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file(s).
- To operate on several files this can accept a comma separated list of files or a list of files.
aliases: [ 'files', 'file', 'filenames' ]
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
wait:
required: false
default: false
description:
- A flag to indicate to wait for resources to be created before continuing to the next step
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating or updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
recursive:
required: false
default: false
description:
- Process the directory used in -f, --filename recursively.
Useful when you want to manage related manifests organized
within the same directory.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
- name: test nginx and postgresql are present
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
- name: test nginx and postgresql are present
kube:
files:
- /tmp/nginx.yml
- /tmp/postgresql.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.wait = module.params.get('wait')
self.name = module.params.get('name')
self.filename = [f.strip() for f in module.params.get('filename') or []]
self.resource = module.params.get('resource')
self.label = module.params.get('label')
self.recursive = module.params.get('recursive')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True, force=True):
if check and self.exists():
return []
cmd = ['apply']
if force:
cmd.append('--force')
if self.wait:
cmd.append('--wait')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def replace(self, force=True):
cmd = ['apply']
if force:
cmd.append('--force')
if self.wait:
cmd.append('--wait')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
return self._execute(cmd)
def exists(self):
cmd = ['get']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
cmd.append('--no-headers')
result = self._execute_nofail(cmd)
if not result:
return False
return True
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
if self.recursive:
cmd.append('--recursive={}'.format(self.recursive))
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(type='list', aliases=['files', 'file', 'filenames']),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
wait=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']),
recursive=dict(default=False, type='bool'),
),
mutually_exclusive=[['filename', 'list']]
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create(check=False)
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
result = manager.replace()
elif state == 'exists':
result = manager.exists()
module.exit_json(changed=changed,
msg='%s' % result)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

35
recover-control-plane.yml

@ -1,34 +1,3 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: etcd[0]
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- role: recover_control_plane/etcd
when: etcd_deployment_type != "kubeadm"
- hosts: kube_control_plane[0]
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: recover_control_plane/control-plane }
- import_playbook: cluster.yml
- hosts: kube_control_plane
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: recover_control_plane/post-recover }
- name: Recover control panel
ansible.builtin.import_playbook: playbooks/recover-control-panel.yml

51
remove-node.yml

@ -1,50 +1,3 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
gather_facts: no
tasks:
- name: Confirm Execution
pause:
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
register: pause_result
run_once: True
when:
- not (skip_confirmation | default(false) | bool)
- name: Fail if user does not confirm deletion
fail:
msg: "Delete nodes confirmation failed"
when: pause_result.user_input | default('yes') != 'yes'
- name: Gather facts
import_playbook: facts.yml
when: reset_nodes|default(True)|bool
- hosts: "{{ node | default('kube_node') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
- { role: remove-node/pre-remove, tags: pre-remove }
- { role: remove-node/remove-etcd-node }
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
# Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
- { role: remove-node/post-remove, tags: post-remove }
- name: Remove node
ansible.builtin.import_playbook: playbooks/remove-node.yml

40
reset.yml

@ -1,39 +1,3 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- name: Gather facts
import_playbook: facts.yml
- hosts: etcd:k8s_cluster:calico_rr
gather_facts: False
vars_prompt:
name: "reset_confirmation"
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
default: "no"
private: no
pre_tasks:
- name: check confirmation
fail:
msg: "Reset confirmation failed"
when: reset_confirmation != "yes"
- name: Gather information about installed services
service_facts:
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true }
- { role: reset, tags: reset }
- name: Reset the cluster
ansible.builtin.import_playbook: playbooks/reset.yml

125
scale.yml

@ -1,124 +1,3 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- name: Bootstrap any new workers
hosts: kube_node
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os }
- name: Gather facts
tags: always
import_playbook: facts.yml
- name: Generate the etcd certificates beforehand
hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- name: Upload control plane certs and retrieve encryption key
hosts: kube_control_plane | first
environment: "{{ proxy_disable_env }}"
gather_facts: False
tags: kubeadm
roles:
- { role: kubespray-defaults }
tasks:
- name: Upload control plane certificates
command: >-
{{ bin_dir }}/kubeadm init phase
--config {{ kube_config_dir }}/kubeadm-config.yaml
upload-certs
--upload-certs
environment: "{{ proxy_disable_env }}"
register: kubeadm_upload_cert
changed_when: false
- name: set fact 'kubeadm_certificate_key' for later use
set_fact:
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
when: kubeadm_certificate_key is not defined
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm }
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
- name: Scale the cluster
ansible.builtin.import_playbook: playbooks/scale.yml

1
tests/ansible.cfg

@ -12,3 +12,4 @@ stdout_callback = skippy
library = ./library:../library
callbacks_enabled = profile_tasks
jinja2_extensions = jinja2.ext.do
roles_path = ../roles

9
tests/files/vagrant_ubuntu20-flannel-collection.rb

@ -0,0 +1,9 @@
$os = "ubuntu2004"
# For CI we are not worries about data persistence across reboot
$libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2

3
tests/files/vagrant_ubuntu20-flannel-collection.yml

@ -0,0 +1,3 @@
---
# Kubespray settings
kube_network_plugin: flannel

18
tests/scripts/check_galaxy_version.sh

@ -0,0 +1,18 @@
#!/bin/bash
set -e
version_from_galaxy=$(grep "^version:" galaxy.yml | awk '{print $2}')
version_from_docs=$(grep -P "^\s+version:\sv\d+\.\d+\.\d+" docs/ansible_collection.md | awk '{print $2}')
if [[ $KUBESPRAY_VERSION != "v${version_from_galaxy}" ]]
then
echo "Please update galaxy.yml version to match the KUBESPRAY_VERSION. Be sure to remove the \"v\" to adhere"
echo "to semenatic versioning"
exit 1
fi
if [[ $KUBESPRAY_VERSION != "${version_from_docs}" ]]
then
echo "Please update the documentation for Ansible collections under docs/ansible_collection.md to reflect the KUBESPRAY_VERSION"
exit 1
fi

33
tests/scripts/testcases_run.sh

@ -78,6 +78,39 @@ if [ "${RECOVER_CONTROL_PLANE_TEST}" != "false" ]; then
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads -e etcd_retries=10 --limit etcd,kube_control_plane:!fake_hosts recover-control-plane.yml
fi
# Test collection build and install by installing our collection, emptying our repository, adding
# cluster.yml, reset.yml, and remote-node.yml files that simply point to our collection's playbooks, and then
# running the same tests as before
if [[ "${CI_JOB_NAME}" =~ "collection" ]]; then
# Build and install collection
ansible-galaxy collection build
ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
# Simply remove all of our files and directories except for our tests directory
# to be absolutely certain that none of our playbooks or roles
# are interfering with our collection
find -maxdepth 1 ! -name tests -exec rm -rfv {} \;
# Write cluster.yml
cat > cluster.yml <<EOF
- name: Install Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
EOF
# Write reset.yml
cat > reset.yml <<EOF
- name: Remove Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.reset
EOF
# Write remove-node.yml
cat > remove-node.yml <<EOF
- name: Remove node from Kubernetes
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.remote-node
EOF
fi
# Tests Cases
## Test Master API
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL

171
upgrade-cluster.yml

@ -1,170 +1,3 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd:calico_rr
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
- name: Prepare nodes for upgrade
hosts: k8s_cluster:etcd:calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: download, tags: download, when: "not skip_downloads" }
- name: Upgrade container engine on non-cluster nodes
hosts: etcd:calico_rr:!k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Handle upgrades to master components first to maintain backwards compat.
gather_facts: False
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: 1
roles:
- { role: kubespray-defaults }
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
- { role: kubernetes/client, tags: client }
- { role: kubernetes/node-label, tags: node-label }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: kubernetes-apps, tags: csi-driver }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
hosts: kube_control_plane:calico_rr:kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: network_plugin, tags: network }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- name: Finally handle worker upgrades, based on given batch size
hosts: kube_node:calico_rr:!kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults }
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/kubeadm, tags: kubeadm }
- { role: kubernetes/node-label, tags: node-label }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: true
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: network }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
- name: Upgrade cluster
ansible.builtin.import_playbook: playbooks/upgrade-cluster.yml
Loading…
Cancel
Save