committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1104 additions and 899 deletions
Unified View
Diff Options
-
4.gitignore
-
24.gitlab-ci/lint.yml
-
5.gitlab-ci/vagrant.yml
-
4README.md
-
132cluster.yml
-
38docs/ansible_collection.md
-
47galaxy.yml
-
357library/kube.py
-
1library/kube.py
-
0playbooks/ansible_version.yml
-
131playbooks/cluster.yml
-
0playbooks/facts.yml
-
0playbooks/legacy_groups.yml
-
34playbooks/recover-control-plane.yml
-
50playbooks/remove-node.yml
-
39playbooks/reset.yml
-
124playbooks/scale.yml
-
170playbooks/upgrade-cluster.yml
-
357plugins/modules/kube.py
-
35recover-control-plane.yml
-
51remove-node.yml
-
40reset.yml
-
125scale.yml
-
1tests/ansible.cfg
-
9tests/files/vagrant_ubuntu20-flannel-collection.rb
-
3tests/files/vagrant_ubuntu20-flannel-collection.yml
-
18tests/scripts/check_galaxy_version.sh
-
33tests/scripts/testcases_run.sh
-
171upgrade-cluster.yml
@ -1,131 +1,3 @@ |
|||||
--- |
--- |
||||
- name: Check ansible version |
|
||||
import_playbook: ansible_version.yml |
|
||||
|
|
||||
- name: Ensure compatibility with old groups |
|
||||
import_playbook: legacy_groups.yml |
|
||||
|
|
||||
- hosts: bastion[0] |
|
||||
gather_facts: False |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
|
||||
|
|
||||
- hosts: k8s_cluster:etcd |
|
||||
strategy: linear |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
gather_facts: false |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: bootstrap-os, tags: bootstrap-os} |
|
||||
|
|
||||
- name: Gather facts |
|
||||
tags: always |
|
||||
import_playbook: facts.yml |
|
||||
|
|
||||
- hosts: k8s_cluster:etcd |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/preinstall, tags: preinstall } |
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine } |
|
||||
- { role: download, tags: download, when: "not skip_downloads" } |
|
||||
|
|
||||
- hosts: etcd:kube_control_plane |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- role: etcd |
|
||||
tags: etcd |
|
||||
vars: |
|
||||
etcd_cluster_setup: true |
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" |
|
||||
when: etcd_deployment_type != "kubeadm" |
|
||||
|
|
||||
- hosts: k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- role: etcd |
|
||||
tags: etcd |
|
||||
vars: |
|
||||
etcd_cluster_setup: false |
|
||||
etcd_events_cluster_setup: false |
|
||||
when: |
|
||||
- etcd_deployment_type != "kubeadm" |
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
|
||||
|
|
||||
- hosts: k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/node, tags: node } |
|
||||
|
|
||||
- hosts: kube_control_plane |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/control-plane, tags: master } |
|
||||
- { role: kubernetes/client, tags: client } |
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles } |
|
||||
|
|
||||
- hosts: k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm} |
|
||||
- { role: kubernetes/node-label, tags: node-label } |
|
||||
- { role: network_plugin, tags: network } |
|
||||
|
|
||||
- hosts: calico_rr |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } |
|
||||
|
|
||||
- hosts: kube_control_plane[0] |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } |
|
||||
|
|
||||
- hosts: kube_control_plane |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } |
|
||||
- { role: kubernetes-apps/network_plugin, tags: network } |
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller } |
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller } |
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner } |
|
||||
- { role: kubernetes-apps, tags: apps } |
|
||||
|
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up |
|
||||
hosts: k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } |
|
||||
|
- name: Install Kubernetes |
||||
|
ansible.builtin.import_playbook: playbooks/cluster.yml |
@ -0,0 +1,38 @@ |
|||||
|
# Ansible collection |
||||
|
|
||||
|
Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html). |
||||
|
|
||||
|
## Requirements |
||||
|
|
||||
|
- An inventory file with the appropriate host groups. See the [README](../README.md#usage). |
||||
|
- A `group_vars` directory. These group variables **need** to match the appropriate variable names under `inventory/local/group_vars`. See the [README](../README.md#usage). |
||||
|
|
||||
|
## Usage |
||||
|
|
||||
|
1. Add Kubespray to your requirements.yml file |
||||
|
|
||||
|
```yaml |
||||
|
collections: |
||||
|
- name: https://github.com/kubernetes_sigs/kubespray |
||||
|
type: git |
||||
|
version: v2.21.0 |
||||
|
``` |
||||
|
|
||||
|
2. Install your collection |
||||
|
|
||||
|
```ShellSession |
||||
|
ansible-galaxy install -r requirements.yml |
||||
|
``` |
||||
|
|
||||
|
3. Create a playbook to install your Kubernetes cluster |
||||
|
|
||||
|
```yaml |
||||
|
- name: Install Kubernetes |
||||
|
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster |
||||
|
``` |
||||
|
|
||||
|
4. Update INVENTORY and PLAYBOOK so that they point to your inventory file and the playbook you created above, and then install Kubespray |
||||
|
|
||||
|
```ShellSession |
||||
|
ansible-playbook -i INVENTORY --become --become-user=root PLAYBOOK |
||||
|
``` |
@ -0,0 +1,47 @@ |
|||||
|
--- |
||||
|
namespace: kubernetes_sigs |
||||
|
description: Deploy a production ready Kubernetes cluster |
||||
|
name: kubespray |
||||
|
version: 2.21.0 |
||||
|
readme: README.md |
||||
|
authors: |
||||
|
- luksi1 |
||||
|
tags: |
||||
|
- kubernetes |
||||
|
- kubespray |
||||
|
repository: https://github.com/kubernetes-sigs/kubespray |
||||
|
build_ignore: |
||||
|
- .github |
||||
|
- '*.tar.gz' |
||||
|
- extra_playbooks |
||||
|
- inventory |
||||
|
- scripts |
||||
|
- test-infra |
||||
|
- .ansible-lint |
||||
|
- .editorconfig |
||||
|
- .gitignore |
||||
|
- .gitlab-ci |
||||
|
- .gitlab-ci.yml |
||||
|
- .gitmodules |
||||
|
- .markdownlint.yaml |
||||
|
- .nojekyll |
||||
|
- .pre-commit-config.yaml |
||||
|
- .yamllint |
||||
|
- Dockerfile |
||||
|
- FILES.json |
||||
|
- MANIFEST.json |
||||
|
- Makefile |
||||
|
- Vagrantfile |
||||
|
- _config.yml |
||||
|
- ansible.cfg |
||||
|
- requirements*txt |
||||
|
- setup.cfg |
||||
|
- setup.py |
||||
|
- index.html |
||||
|
- reset.yml |
||||
|
- cluster.yml |
||||
|
- scale.yml |
||||
|
- recover-control-plane.yml |
||||
|
- remove-node.yml |
||||
|
- upgrade-cluster.yml |
||||
|
- library |
@ -1,357 +0,0 @@ |
|||||
#!/usr/bin/python |
|
||||
# -*- coding: utf-8 -*- |
|
||||
|
|
||||
DOCUMENTATION = """ |
|
||||
--- |
|
||||
module: kube |
|
||||
short_description: Manage Kubernetes Cluster |
|
||||
description: |
|
||||
- Create, replace, remove, and stop resources within a Kubernetes Cluster |
|
||||
version_added: "2.0" |
|
||||
options: |
|
||||
name: |
|
||||
required: false |
|
||||
default: null |
|
||||
description: |
|
||||
- The name associated with resource |
|
||||
filename: |
|
||||
required: false |
|
||||
default: null |
|
||||
description: |
|
||||
- The path and filename of the resource(s) definition file(s). |
|
||||
- To operate on several files this can accept a comma separated list of files or a list of files. |
|
||||
aliases: [ 'files', 'file', 'filenames' ] |
|
||||
kubectl: |
|
||||
required: false |
|
||||
default: null |
|
||||
description: |
|
||||
- The path to the kubectl bin |
|
||||
namespace: |
|
||||
required: false |
|
||||
default: null |
|
||||
description: |
|
||||
- The namespace associated with the resource(s) |
|
||||
resource: |
|
||||
required: false |
|
||||
default: null |
|
||||
description: |
|
||||
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc) |
|
||||
label: |
|
||||
required: false |
|
||||
default: null |
|
||||
description: |
|
||||
- The labels used to filter specific resources. |
|
||||
server: |
|
||||
required: false |
|
||||
default: null |
|
||||
description: |
|
||||
- The url for the API server that commands are executed against. |
|
||||
force: |
|
||||
required: false |
|
||||
default: false |
|
||||
description: |
|
||||
- A flag to indicate to force delete, replace, or stop. |
|
||||
wait: |
|
||||
required: false |
|
||||
default: false |
|
||||
description: |
|
||||
- A flag to indicate to wait for resources to be created before continuing to the next step |
|
||||
all: |
|
||||
required: false |
|
||||
default: false |
|
||||
description: |
|
||||
- A flag to indicate delete all, stop all, or all namespaces when checking exists. |
|
||||
log_level: |
|
||||
required: false |
|
||||
default: 0 |
|
||||
description: |
|
||||
- Indicates the level of verbosity of logging by kubectl. |
|
||||
state: |
|
||||
required: false |
|
||||
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped'] |
|
||||
default: present |
|
||||
description: |
|
||||
- present handles checking existence or creating if definition file provided, |
|
||||
absent handles deleting resource(s) based on other options, |
|
||||
latest handles creating or updating based on existence, |
|
||||
reloaded handles updating resource(s) definition using definition file, |
|
||||
stopped handles stopping resource(s) based on other options. |
|
||||
recursive: |
|
||||
required: false |
|
||||
default: false |
|
||||
description: |
|
||||
- Process the directory used in -f, --filename recursively. |
|
||||
Useful when you want to manage related manifests organized |
|
||||
within the same directory. |
|
||||
requirements: |
|
||||
- kubectl |
|
||||
author: "Kenny Jones (@kenjones-cisco)" |
|
||||
""" |
|
||||
|
|
||||
EXAMPLES = """ |
|
||||
- name: test nginx is present |
|
||||
kube: name=nginx resource=rc state=present |
|
||||
|
|
||||
- name: test nginx is stopped |
|
||||
kube: name=nginx resource=rc state=stopped |
|
||||
|
|
||||
- name: test nginx is absent |
|
||||
kube: name=nginx resource=rc state=absent |
|
||||
|
|
||||
- name: test nginx is present |
|
||||
kube: filename=/tmp/nginx.yml |
|
||||
|
|
||||
- name: test nginx and postgresql are present |
|
||||
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml |
|
||||
|
|
||||
- name: test nginx and postgresql are present |
|
||||
kube: |
|
||||
files: |
|
||||
- /tmp/nginx.yml |
|
||||
- /tmp/postgresql.yml |
|
||||
""" |
|
||||
|
|
||||
|
|
||||
class KubeManager(object): |
|
||||
|
|
||||
def __init__(self, module): |
|
||||
|
|
||||
self.module = module |
|
||||
|
|
||||
self.kubectl = module.params.get('kubectl') |
|
||||
if self.kubectl is None: |
|
||||
self.kubectl = module.get_bin_path('kubectl', True) |
|
||||
self.base_cmd = [self.kubectl] |
|
||||
|
|
||||
if module.params.get('server'): |
|
||||
self.base_cmd.append('--server=' + module.params.get('server')) |
|
||||
|
|
||||
if module.params.get('log_level'): |
|
||||
self.base_cmd.append('--v=' + str(module.params.get('log_level'))) |
|
||||
|
|
||||
if module.params.get('namespace'): |
|
||||
self.base_cmd.append('--namespace=' + module.params.get('namespace')) |
|
||||
|
|
||||
|
|
||||
self.all = module.params.get('all') |
|
||||
self.force = module.params.get('force') |
|
||||
self.wait = module.params.get('wait') |
|
||||
self.name = module.params.get('name') |
|
||||
self.filename = [f.strip() for f in module.params.get('filename') or []] |
|
||||
self.resource = module.params.get('resource') |
|
||||
self.label = module.params.get('label') |
|
||||
self.recursive = module.params.get('recursive') |
|
||||
|
|
||||
def _execute(self, cmd): |
|
||||
args = self.base_cmd + cmd |
|
||||
try: |
|
||||
rc, out, err = self.module.run_command(args) |
|
||||
if rc != 0: |
|
||||
self.module.fail_json( |
|
||||
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err)) |
|
||||
except Exception as exc: |
|
||||
self.module.fail_json( |
|
||||
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc))) |
|
||||
return out.splitlines() |
|
||||
|
|
||||
def _execute_nofail(self, cmd): |
|
||||
args = self.base_cmd + cmd |
|
||||
rc, out, err = self.module.run_command(args) |
|
||||
if rc != 0: |
|
||||
return None |
|
||||
return out.splitlines() |
|
||||
|
|
||||
def create(self, check=True, force=True): |
|
||||
if check and self.exists(): |
|
||||
return [] |
|
||||
|
|
||||
cmd = ['apply'] |
|
||||
|
|
||||
if force: |
|
||||
cmd.append('--force') |
|
||||
|
|
||||
if self.wait: |
|
||||
cmd.append('--wait') |
|
||||
|
|
||||
if self.recursive: |
|
||||
cmd.append('--recursive={}'.format(self.recursive)) |
|
||||
|
|
||||
if not self.filename: |
|
||||
self.module.fail_json(msg='filename required to create') |
|
||||
|
|
||||
cmd.append('--filename=' + ','.join(self.filename)) |
|
||||
|
|
||||
return self._execute(cmd) |
|
||||
|
|
||||
def replace(self, force=True): |
|
||||
|
|
||||
cmd = ['apply'] |
|
||||
|
|
||||
if force: |
|
||||
cmd.append('--force') |
|
||||
|
|
||||
if self.wait: |
|
||||
cmd.append('--wait') |
|
||||
|
|
||||
if self.recursive: |
|
||||
cmd.append('--recursive={}'.format(self.recursive)) |
|
||||
|
|
||||
if not self.filename: |
|
||||
self.module.fail_json(msg='filename required to reload') |
|
||||
|
|
||||
cmd.append('--filename=' + ','.join(self.filename)) |
|
||||
|
|
||||
return self._execute(cmd) |
|
||||
|
|
||||
def delete(self): |
|
||||
|
|
||||
if not self.force and not self.exists(): |
|
||||
return [] |
|
||||
|
|
||||
cmd = ['delete'] |
|
||||
|
|
||||
if self.filename: |
|
||||
cmd.append('--filename=' + ','.join(self.filename)) |
|
||||
if self.recursive: |
|
||||
cmd.append('--recursive={}'.format(self.recursive)) |
|
||||
else: |
|
||||
if not self.resource: |
|
||||
self.module.fail_json(msg='resource required to delete without filename') |
|
||||
|
|
||||
cmd.append(self.resource) |
|
||||
|
|
||||
if self.name: |
|
||||
cmd.append(self.name) |
|
||||
|
|
||||
if self.label: |
|
||||
cmd.append('--selector=' + self.label) |
|
||||
|
|
||||
if self.all: |
|
||||
cmd.append('--all') |
|
||||
|
|
||||
if self.force: |
|
||||
cmd.append('--ignore-not-found') |
|
||||
|
|
||||
if self.recursive: |
|
||||
cmd.append('--recursive={}'.format(self.recursive)) |
|
||||
|
|
||||
return self._execute(cmd) |
|
||||
|
|
||||
def exists(self): |
|
||||
cmd = ['get'] |
|
||||
|
|
||||
if self.filename: |
|
||||
cmd.append('--filename=' + ','.join(self.filename)) |
|
||||
if self.recursive: |
|
||||
cmd.append('--recursive={}'.format(self.recursive)) |
|
||||
else: |
|
||||
if not self.resource: |
|
||||
self.module.fail_json(msg='resource required without filename') |
|
||||
|
|
||||
cmd.append(self.resource) |
|
||||
|
|
||||
if self.name: |
|
||||
cmd.append(self.name) |
|
||||
|
|
||||
if self.label: |
|
||||
cmd.append('--selector=' + self.label) |
|
||||
|
|
||||
if self.all: |
|
||||
cmd.append('--all-namespaces') |
|
||||
|
|
||||
cmd.append('--no-headers') |
|
||||
|
|
||||
result = self._execute_nofail(cmd) |
|
||||
if not result: |
|
||||
return False |
|
||||
return True |
|
||||
|
|
||||
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param? |
|
||||
def stop(self): |
|
||||
|
|
||||
if not self.force and not self.exists(): |
|
||||
return [] |
|
||||
|
|
||||
cmd = ['stop'] |
|
||||
|
|
||||
if self.filename: |
|
||||
cmd.append('--filename=' + ','.join(self.filename)) |
|
||||
if self.recursive: |
|
||||
cmd.append('--recursive={}'.format(self.recursive)) |
|
||||
else: |
|
||||
if not self.resource: |
|
||||
self.module.fail_json(msg='resource required to stop without filename') |
|
||||
|
|
||||
cmd.append(self.resource) |
|
||||
|
|
||||
if self.name: |
|
||||
cmd.append(self.name) |
|
||||
|
|
||||
if self.label: |
|
||||
cmd.append('--selector=' + self.label) |
|
||||
|
|
||||
if self.all: |
|
||||
cmd.append('--all') |
|
||||
|
|
||||
if self.force: |
|
||||
cmd.append('--ignore-not-found') |
|
||||
|
|
||||
return self._execute(cmd) |
|
||||
|
|
||||
|
|
||||
def main(): |
|
||||
|
|
||||
module = AnsibleModule( |
|
||||
argument_spec=dict( |
|
||||
name=dict(), |
|
||||
filename=dict(type='list', aliases=['files', 'file', 'filenames']), |
|
||||
namespace=dict(), |
|
||||
resource=dict(), |
|
||||
label=dict(), |
|
||||
server=dict(), |
|
||||
kubectl=dict(), |
|
||||
force=dict(default=False, type='bool'), |
|
||||
wait=dict(default=False, type='bool'), |
|
||||
all=dict(default=False, type='bool'), |
|
||||
log_level=dict(default=0, type='int'), |
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']), |
|
||||
recursive=dict(default=False, type='bool'), |
|
||||
), |
|
||||
mutually_exclusive=[['filename', 'list']] |
|
||||
) |
|
||||
|
|
||||
changed = False |
|
||||
|
|
||||
manager = KubeManager(module) |
|
||||
state = module.params.get('state') |
|
||||
if state == 'present': |
|
||||
result = manager.create(check=False) |
|
||||
|
|
||||
elif state == 'absent': |
|
||||
result = manager.delete() |
|
||||
|
|
||||
elif state == 'reloaded': |
|
||||
result = manager.replace() |
|
||||
|
|
||||
elif state == 'stopped': |
|
||||
result = manager.stop() |
|
||||
|
|
||||
elif state == 'latest': |
|
||||
result = manager.replace() |
|
||||
|
|
||||
elif state == 'exists': |
|
||||
result = manager.exists() |
|
||||
module.exit_json(changed=changed, |
|
||||
msg='%s' % result) |
|
||||
|
|
||||
else: |
|
||||
module.fail_json(msg='Unrecognized state %s.' % state) |
|
||||
|
|
||||
module.exit_json(changed=changed, |
|
||||
msg='success: %s' % (' '.join(result)) |
|
||||
) |
|
||||
|
|
||||
|
|
||||
from ansible.module_utils.basic import * # noqa |
|
||||
if __name__ == '__main__': |
|
||||
main() |
|
@ -0,0 +1 @@ |
|||||
|
../plugins/modules/kube.py |
@ -0,0 +1,131 @@ |
|||||
|
--- |
||||
|
- name: Check ansible version |
||||
|
import_playbook: ansible_version.yml |
||||
|
|
||||
|
- name: Ensure compatibility with old groups |
||||
|
import_playbook: legacy_groups.yml |
||||
|
|
||||
|
- hosts: bastion[0] |
||||
|
gather_facts: False |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
||||
|
|
||||
|
- hosts: k8s_cluster:etcd |
||||
|
strategy: linear |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
gather_facts: false |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: bootstrap-os, tags: bootstrap-os} |
||||
|
|
||||
|
- name: Gather facts |
||||
|
tags: always |
||||
|
import_playbook: facts.yml |
||||
|
|
||||
|
- hosts: k8s_cluster:etcd |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/preinstall, tags: preinstall } |
||||
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine } |
||||
|
- { role: download, tags: download, when: "not skip_downloads" } |
||||
|
|
||||
|
- hosts: etcd:kube_control_plane |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- role: etcd |
||||
|
tags: etcd |
||||
|
vars: |
||||
|
etcd_cluster_setup: true |
||||
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" |
||||
|
when: etcd_deployment_type != "kubeadm" |
||||
|
|
||||
|
- hosts: k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- role: etcd |
||||
|
tags: etcd |
||||
|
vars: |
||||
|
etcd_cluster_setup: false |
||||
|
etcd_events_cluster_setup: false |
||||
|
when: |
||||
|
- etcd_deployment_type != "kubeadm" |
||||
|
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
||||
|
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
||||
|
|
||||
|
- hosts: k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/node, tags: node } |
||||
|
|
||||
|
- hosts: kube_control_plane |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/control-plane, tags: master } |
||||
|
- { role: kubernetes/client, tags: client } |
||||
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles } |
||||
|
|
||||
|
- hosts: k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/kubeadm, tags: kubeadm} |
||||
|
- { role: kubernetes/node-label, tags: node-label } |
||||
|
- { role: network_plugin, tags: network } |
||||
|
|
||||
|
- hosts: calico_rr |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] } |
||||
|
|
||||
|
- hosts: kube_control_plane[0] |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } |
||||
|
|
||||
|
- hosts: kube_control_plane |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } |
||||
|
- { role: kubernetes-apps/network_plugin, tags: network } |
||||
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller } |
||||
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller } |
||||
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner } |
||||
|
- { role: kubernetes-apps, tags: apps } |
||||
|
|
||||
|
- name: Apply resolv.conf changes now that cluster DNS is up |
||||
|
hosts: k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } |
@ -0,0 +1,34 @@ |
|||||
|
--- |
||||
|
- name: Check ansible version |
||||
|
import_playbook: ansible_version.yml |
||||
|
|
||||
|
- name: Ensure compatibility with old groups |
||||
|
import_playbook: legacy_groups.yml |
||||
|
|
||||
|
- hosts: bastion[0] |
||||
|
gather_facts: False |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults} |
||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]} |
||||
|
|
||||
|
- hosts: etcd[0] |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults} |
||||
|
- role: recover_control_plane/etcd |
||||
|
when: etcd_deployment_type != "kubeadm" |
||||
|
|
||||
|
- hosts: kube_control_plane[0] |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults} |
||||
|
- { role: recover_control_plane/control-plane } |
||||
|
|
||||
|
- import_playbook: cluster.yml |
||||
|
|
||||
|
- hosts: kube_control_plane |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults} |
||||
|
- { role: recover_control_plane/post-recover } |
@ -0,0 +1,50 @@ |
|||||
|
--- |
||||
|
- name: Check ansible version |
||||
|
import_playbook: ansible_version.yml |
||||
|
|
||||
|
- name: Ensure compatibility with old groups |
||||
|
import_playbook: legacy_groups.yml |
||||
|
|
||||
|
- hosts: bastion[0] |
||||
|
gather_facts: False |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
||||
|
|
||||
|
- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}" |
||||
|
gather_facts: no |
||||
|
tasks: |
||||
|
- name: Confirm Execution |
||||
|
pause: |
||||
|
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes." |
||||
|
register: pause_result |
||||
|
run_once: True |
||||
|
when: |
||||
|
- not (skip_confirmation | default(false) | bool) |
||||
|
|
||||
|
- name: Fail if user does not confirm deletion |
||||
|
fail: |
||||
|
msg: "Delete nodes confirmation failed" |
||||
|
when: pause_result.user_input | default('yes') != 'yes' |
||||
|
|
||||
|
- name: Gather facts |
||||
|
import_playbook: facts.yml |
||||
|
when: reset_nodes|default(True)|bool |
||||
|
|
||||
|
- hosts: "{{ node | default('kube_node') }}" |
||||
|
gather_facts: no |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool } |
||||
|
- { role: remove-node/pre-remove, tags: pre-remove } |
||||
|
- { role: remove-node/remove-etcd-node } |
||||
|
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool } |
||||
|
|
||||
|
# Currently cannot remove first master or etcd |
||||
|
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}" |
||||
|
gather_facts: no |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool } |
||||
|
- { role: remove-node/post-remove, tags: post-remove } |
@ -0,0 +1,39 @@ |
|||||
|
--- |
||||
|
- name: Check ansible version |
||||
|
import_playbook: ansible_version.yml |
||||
|
|
||||
|
- name: Ensure compatibility with old groups |
||||
|
import_playbook: legacy_groups.yml |
||||
|
|
||||
|
- hosts: bastion[0] |
||||
|
gather_facts: False |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults} |
||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]} |
||||
|
|
||||
|
- name: Gather facts |
||||
|
import_playbook: facts.yml |
||||
|
|
||||
|
- hosts: etcd:k8s_cluster:calico_rr |
||||
|
gather_facts: False |
||||
|
vars_prompt: |
||||
|
name: "reset_confirmation" |
||||
|
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster." |
||||
|
default: "no" |
||||
|
private: no |
||||
|
|
||||
|
pre_tasks: |
||||
|
- name: check confirmation |
||||
|
fail: |
||||
|
msg: "Reset confirmation failed" |
||||
|
when: reset_confirmation != "yes" |
||||
|
|
||||
|
- name: Gather information about installed services |
||||
|
service_facts: |
||||
|
|
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults} |
||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true } |
||||
|
- { role: reset, tags: reset } |
@ -0,0 +1,124 @@ |
|||||
|
--- |
||||
|
- name: Check ansible version |
||||
|
import_playbook: ansible_version.yml |
||||
|
|
||||
|
- name: Ensure compatibility with old groups |
||||
|
import_playbook: legacy_groups.yml |
||||
|
|
||||
|
- hosts: bastion[0] |
||||
|
gather_facts: False |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
||||
|
|
||||
|
- name: Bootstrap any new workers |
||||
|
hosts: kube_node |
||||
|
strategy: linear |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
gather_facts: false |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: bootstrap-os, tags: bootstrap-os } |
||||
|
|
||||
|
- name: Gather facts |
||||
|
tags: always |
||||
|
import_playbook: facts.yml |
||||
|
|
||||
|
- name: Generate the etcd certificates beforehand |
||||
|
hosts: etcd:kube_control_plane |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- role: etcd |
||||
|
tags: etcd |
||||
|
vars: |
||||
|
etcd_cluster_setup: false |
||||
|
etcd_events_cluster_setup: false |
||||
|
when: |
||||
|
- etcd_deployment_type != "kubeadm" |
||||
|
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
||||
|
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
||||
|
|
||||
|
- name: Download images to ansible host cache via first kube_control_plane node |
||||
|
hosts: kube_control_plane[0] |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" } |
||||
|
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" } |
||||
|
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" } |
||||
|
|
||||
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine) |
||||
|
hosts: kube_node |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/preinstall, tags: preinstall } |
||||
|
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
||||
|
- { role: download, tags: download, when: "not skip_downloads" } |
||||
|
- role: etcd |
||||
|
tags: etcd |
||||
|
vars: |
||||
|
etcd_cluster_setup: false |
||||
|
when: |
||||
|
- etcd_deployment_type != "kubeadm" |
||||
|
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
||||
|
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
||||
|
|
||||
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(node) |
||||
|
hosts: kube_node |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/node, tags: node } |
||||
|
|
||||
|
- name: Upload control plane certs and retrieve encryption key |
||||
|
hosts: kube_control_plane | first |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
gather_facts: False |
||||
|
tags: kubeadm |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
tasks: |
||||
|
- name: Upload control plane certificates |
||||
|
command: >- |
||||
|
{{ bin_dir }}/kubeadm init phase |
||||
|
--config {{ kube_config_dir }}/kubeadm-config.yaml |
||||
|
upload-certs |
||||
|
--upload-certs |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
register: kubeadm_upload_cert |
||||
|
changed_when: false |
||||
|
- name: set fact 'kubeadm_certificate_key' for later use |
||||
|
set_fact: |
||||
|
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}" |
||||
|
when: kubeadm_certificate_key is not defined |
||||
|
|
||||
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(network) |
||||
|
hosts: kube_node |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/kubeadm, tags: kubeadm } |
||||
|
- { role: kubernetes/node-label, tags: node-label } |
||||
|
- { role: network_plugin, tags: network } |
||||
|
|
||||
|
- name: Apply resolv.conf changes now that cluster DNS is up |
||||
|
hosts: k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } |
@ -0,0 +1,170 @@ |
|||||
|
--- |
||||
|
- name: Check ansible version |
||||
|
import_playbook: ansible_version.yml |
||||
|
|
||||
|
- name: Ensure compatibility with old groups |
||||
|
import_playbook: legacy_groups.yml |
||||
|
|
||||
|
- hosts: bastion[0] |
||||
|
gather_facts: False |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
||||
|
|
||||
|
- hosts: k8s_cluster:etcd:calico_rr |
||||
|
strategy: linear |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
gather_facts: false |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
vars: |
||||
|
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining |
||||
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. |
||||
|
ansible_ssh_pipelining: false |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: bootstrap-os, tags: bootstrap-os} |
||||
|
|
||||
|
- name: Gather facts |
||||
|
tags: always |
||||
|
import_playbook: facts.yml |
||||
|
|
||||
|
- name: Download images to ansible host cache via first kube_control_plane node |
||||
|
hosts: kube_control_plane[0] |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"} |
||||
|
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" } |
||||
|
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" } |
||||
|
|
||||
|
- name: Prepare nodes for upgrade |
||||
|
hosts: k8s_cluster:etcd:calico_rr |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/preinstall, tags: preinstall } |
||||
|
- { role: download, tags: download, when: "not skip_downloads" } |
||||
|
|
||||
|
- name: Upgrade container engine on non-cluster nodes |
||||
|
hosts: etcd:calico_rr:!k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
serial: "{{ serial | default('20%') }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
||||
|
|
||||
|
- hosts: etcd:kube_control_plane |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- role: etcd |
||||
|
tags: etcd |
||||
|
vars: |
||||
|
etcd_cluster_setup: true |
||||
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" |
||||
|
when: etcd_deployment_type != "kubeadm" |
||||
|
|
||||
|
- hosts: k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- role: etcd |
||||
|
tags: etcd |
||||
|
vars: |
||||
|
etcd_cluster_setup: false |
||||
|
etcd_events_cluster_setup: false |
||||
|
when: |
||||
|
- etcd_deployment_type != "kubeadm" |
||||
|
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
||||
|
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
||||
|
|
||||
|
- name: Handle upgrades to master components first to maintain backwards compat. |
||||
|
gather_facts: False |
||||
|
hosts: kube_control_plane |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
serial: 1 |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade } |
||||
|
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
||||
|
- { role: kubernetes/node, tags: node } |
||||
|
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true } |
||||
|
- { role: kubernetes/client, tags: client } |
||||
|
- { role: kubernetes/node-label, tags: node-label } |
||||
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles } |
||||
|
- { role: kubernetes-apps, tags: csi-driver } |
||||
|
- { role: upgrade/post-upgrade, tags: post-upgrade } |
||||
|
|
||||
|
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes |
||||
|
hosts: kube_control_plane:calico_rr:kube_node |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
serial: "{{ serial | default('20%') }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } |
||||
|
- { role: network_plugin, tags: network } |
||||
|
- { role: kubernetes-apps/network_plugin, tags: network } |
||||
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller } |
||||
|
|
||||
|
- name: Finally handle worker upgrades, based on given batch size |
||||
|
hosts: kube_node:calico_rr:!kube_control_plane |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
serial: "{{ serial | default('20%') }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade } |
||||
|
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
||||
|
- { role: kubernetes/node, tags: node } |
||||
|
- { role: kubernetes/kubeadm, tags: kubeadm } |
||||
|
- { role: kubernetes/node-label, tags: node-label } |
||||
|
- { role: upgrade/post-upgrade, tags: post-upgrade } |
||||
|
|
||||
|
- hosts: kube_control_plane[0] |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: true |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } |
||||
|
|
||||
|
- hosts: calico_rr |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: network_plugin/calico/rr, tags: network } |
||||
|
|
||||
|
- hosts: kube_control_plane |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller } |
||||
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner } |
||||
|
- { role: kubernetes-apps, tags: apps } |
||||
|
|
||||
|
- name: Apply resolv.conf changes now that cluster DNS is up |
||||
|
hosts: k8s_cluster |
||||
|
gather_facts: False |
||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
||||
|
environment: "{{ proxy_disable_env }}" |
||||
|
roles: |
||||
|
- { role: kubespray-defaults } |
||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } |
@ -0,0 +1,357 @@ |
|||||
|
#!/usr/bin/python |
||||
|
# -*- coding: utf-8 -*- |
||||
|
|
||||
|
DOCUMENTATION = """ |
||||
|
--- |
||||
|
module: kube |
||||
|
short_description: Manage Kubernetes Cluster |
||||
|
description: |
||||
|
- Create, replace, remove, and stop resources within a Kubernetes Cluster |
||||
|
version_added: "2.0" |
||||
|
options: |
||||
|
name: |
||||
|
required: false |
||||
|
default: null |
||||
|
description: |
||||
|
- The name associated with resource |
||||
|
filename: |
||||
|
required: false |
||||
|
default: null |
||||
|
description: |
||||
|
- The path and filename of the resource(s) definition file(s). |
||||
|
- To operate on several files this can accept a comma separated list of files or a list of files. |
||||
|
aliases: [ 'files', 'file', 'filenames' ] |
||||
|
kubectl: |
||||
|
required: false |
||||
|
default: null |
||||
|
description: |
||||
|
- The path to the kubectl bin |
||||
|
namespace: |
||||
|
required: false |
||||
|
default: null |
||||
|
description: |
||||
|
- The namespace associated with the resource(s) |
||||
|
resource: |
||||
|
required: false |
||||
|
default: null |
||||
|
description: |
||||
|
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc) |
||||
|
label: |
||||
|
required: false |
||||
|
default: null |
||||
|
description: |
||||
|
- The labels used to filter specific resources. |
||||
|
server: |
||||
|
required: false |
||||
|
default: null |
||||
|
description: |
||||
|
- The url for the API server that commands are executed against. |
||||
|
force: |
||||
|
required: false |
||||
|
default: false |
||||
|
description: |
||||
|
- A flag to indicate to force delete, replace, or stop. |
||||
|
wait: |
||||
|
required: false |
||||
|
default: false |
||||
|
description: |
||||
|
- A flag to indicate to wait for resources to be created before continuing to the next step |
||||
|
all: |
||||
|
required: false |
||||
|
default: false |
||||
|
description: |
||||
|
- A flag to indicate delete all, stop all, or all namespaces when checking exists. |
||||
|
log_level: |
||||
|
required: false |
||||
|
default: 0 |
||||
|
description: |
||||
|
- Indicates the level of verbosity of logging by kubectl. |
||||
|
state: |
||||
|
required: false |
||||
|
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped'] |
||||
|
default: present |
||||
|
description: |
||||
|
- present handles checking existence or creating if definition file provided, |
||||
|
absent handles deleting resource(s) based on other options, |
||||
|
latest handles creating or updating based on existence, |
||||
|
reloaded handles updating resource(s) definition using definition file, |
||||
|
stopped handles stopping resource(s) based on other options. |
||||
|
recursive: |
||||
|
required: false |
||||
|
default: false |
||||
|
description: |
||||
|
- Process the directory used in -f, --filename recursively. |
||||
|
Useful when you want to manage related manifests organized |
||||
|
within the same directory. |
||||
|
requirements: |
||||
|
- kubectl |
||||
|
author: "Kenny Jones (@kenjones-cisco)" |
||||
|
""" |
||||
|
|
||||
|
EXAMPLES = """ |
||||
|
- name: test nginx is present |
||||
|
kube: name=nginx resource=rc state=present |
||||
|
|
||||
|
- name: test nginx is stopped |
||||
|
kube: name=nginx resource=rc state=stopped |
||||
|
|
||||
|
- name: test nginx is absent |
||||
|
kube: name=nginx resource=rc state=absent |
||||
|
|
||||
|
- name: test nginx is present |
||||
|
kube: filename=/tmp/nginx.yml |
||||
|
|
||||
|
- name: test nginx and postgresql are present |
||||
|
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml |
||||
|
|
||||
|
- name: test nginx and postgresql are present |
||||
|
kube: |
||||
|
files: |
||||
|
- /tmp/nginx.yml |
||||
|
- /tmp/postgresql.yml |
||||
|
""" |
||||
|
|
||||
|
|
||||
|
class KubeManager(object): |
||||
|
|
||||
|
def __init__(self, module): |
||||
|
|
||||
|
self.module = module |
||||
|
|
||||
|
self.kubectl = module.params.get('kubectl') |
||||
|
if self.kubectl is None: |
||||
|
self.kubectl = module.get_bin_path('kubectl', True) |
||||
|
self.base_cmd = [self.kubectl] |
||||
|
|
||||
|
if module.params.get('server'): |
||||
|
self.base_cmd.append('--server=' + module.params.get('server')) |
||||
|
|
||||
|
if module.params.get('log_level'): |
||||
|
self.base_cmd.append('--v=' + str(module.params.get('log_level'))) |
||||
|
|
||||
|
if module.params.get('namespace'): |
||||
|
self.base_cmd.append('--namespace=' + module.params.get('namespace')) |
||||
|
|
||||
|
|
||||
|
self.all = module.params.get('all') |
||||
|
self.force = module.params.get('force') |
||||
|
self.wait = module.params.get('wait') |
||||
|
self.name = module.params.get('name') |
||||
|
self.filename = [f.strip() for f in module.params.get('filename') or []] |
||||
|
self.resource = module.params.get('resource') |
||||
|
self.label = module.params.get('label') |
||||
|
self.recursive = module.params.get('recursive') |
||||
|
|
||||
|
def _execute(self, cmd): |
||||
|
args = self.base_cmd + cmd |
||||
|
try: |
||||
|
rc, out, err = self.module.run_command(args) |
||||
|
if rc != 0: |
||||
|
self.module.fail_json( |
||||
|
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err)) |
||||
|
except Exception as exc: |
||||
|
self.module.fail_json( |
||||
|
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc))) |
||||
|
return out.splitlines() |
||||
|
|
||||
|
def _execute_nofail(self, cmd): |
||||
|
args = self.base_cmd + cmd |
||||
|
rc, out, err = self.module.run_command(args) |
||||
|
if rc != 0: |
||||
|
return None |
||||
|
return out.splitlines() |
||||
|
|
||||
|
def create(self, check=True, force=True): |
||||
|
if check and self.exists(): |
||||
|
return [] |
||||
|
|
||||
|
cmd = ['apply'] |
||||
|
|
||||
|
if force: |
||||
|
cmd.append('--force') |
||||
|
|
||||
|
if self.wait: |
||||
|
cmd.append('--wait') |
||||
|
|
||||
|
if self.recursive: |
||||
|
cmd.append('--recursive={}'.format(self.recursive)) |
||||
|
|
||||
|
if not self.filename: |
||||
|
self.module.fail_json(msg='filename required to create') |
||||
|
|
||||
|
cmd.append('--filename=' + ','.join(self.filename)) |
||||
|
|
||||
|
return self._execute(cmd) |
||||
|
|
||||
|
def replace(self, force=True): |
||||
|
|
||||
|
cmd = ['apply'] |
||||
|
|
||||
|
if force: |
||||
|
cmd.append('--force') |
||||
|
|
||||
|
if self.wait: |
||||
|
cmd.append('--wait') |
||||
|
|
||||
|
if self.recursive: |
||||
|
cmd.append('--recursive={}'.format(self.recursive)) |
||||
|
|
||||
|
if not self.filename: |
||||
|
self.module.fail_json(msg='filename required to reload') |
||||
|
|
||||
|
cmd.append('--filename=' + ','.join(self.filename)) |
||||
|
|
||||
|
return self._execute(cmd) |
||||
|
|
||||
|
def delete(self): |
||||
|
|
||||
|
if not self.force and not self.exists(): |
||||
|
return [] |
||||
|
|
||||
|
cmd = ['delete'] |
||||
|
|
||||
|
if self.filename: |
||||
|
cmd.append('--filename=' + ','.join(self.filename)) |
||||
|
if self.recursive: |
||||
|
cmd.append('--recursive={}'.format(self.recursive)) |
||||
|
else: |
||||
|
if not self.resource: |
||||
|
self.module.fail_json(msg='resource required to delete without filename') |
||||
|
|
||||
|
cmd.append(self.resource) |
||||
|
|
||||
|
if self.name: |
||||
|
cmd.append(self.name) |
||||
|
|
||||
|
if self.label: |
||||
|
cmd.append('--selector=' + self.label) |
||||
|
|
||||
|
if self.all: |
||||
|
cmd.append('--all') |
||||
|
|
||||
|
if self.force: |
||||
|
cmd.append('--ignore-not-found') |
||||
|
|
||||
|
if self.recursive: |
||||
|
cmd.append('--recursive={}'.format(self.recursive)) |
||||
|
|
||||
|
return self._execute(cmd) |
||||
|
|
||||
|
def exists(self): |
||||
|
cmd = ['get'] |
||||
|
|
||||
|
if self.filename: |
||||
|
cmd.append('--filename=' + ','.join(self.filename)) |
||||
|
if self.recursive: |
||||
|
cmd.append('--recursive={}'.format(self.recursive)) |
||||
|
else: |
||||
|
if not self.resource: |
||||
|
self.module.fail_json(msg='resource required without filename') |
||||
|
|
||||
|
cmd.append(self.resource) |
||||
|
|
||||
|
if self.name: |
||||
|
cmd.append(self.name) |
||||
|
|
||||
|
if self.label: |
||||
|
cmd.append('--selector=' + self.label) |
||||
|
|
||||
|
if self.all: |
||||
|
cmd.append('--all-namespaces') |
||||
|
|
||||
|
cmd.append('--no-headers') |
||||
|
|
||||
|
result = self._execute_nofail(cmd) |
||||
|
if not result: |
||||
|
return False |
||||
|
return True |
||||
|
|
||||
|
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param? |
||||
|
def stop(self): |
||||
|
|
||||
|
if not self.force and not self.exists(): |
||||
|
return [] |
||||
|
|
||||
|
cmd = ['stop'] |
||||
|
|
||||
|
if self.filename: |
||||
|
cmd.append('--filename=' + ','.join(self.filename)) |
||||
|
if self.recursive: |
||||
|
cmd.append('--recursive={}'.format(self.recursive)) |
||||
|
else: |
||||
|
if not self.resource: |
||||
|
self.module.fail_json(msg='resource required to stop without filename') |
||||
|
|
||||
|
cmd.append(self.resource) |
||||
|
|
||||
|
if self.name: |
||||
|
cmd.append(self.name) |
||||
|
|
||||
|
if self.label: |
||||
|
cmd.append('--selector=' + self.label) |
||||
|
|
||||
|
if self.all: |
||||
|
cmd.append('--all') |
||||
|
|
||||
|
if self.force: |
||||
|
cmd.append('--ignore-not-found') |
||||
|
|
||||
|
return self._execute(cmd) |
||||
|
|
||||
|
|
||||
|
def main(): |
||||
|
|
||||
|
module = AnsibleModule( |
||||
|
argument_spec=dict( |
||||
|
name=dict(), |
||||
|
filename=dict(type='list', aliases=['files', 'file', 'filenames']), |
||||
|
namespace=dict(), |
||||
|
resource=dict(), |
||||
|
label=dict(), |
||||
|
server=dict(), |
||||
|
kubectl=dict(), |
||||
|
force=dict(default=False, type='bool'), |
||||
|
wait=dict(default=False, type='bool'), |
||||
|
all=dict(default=False, type='bool'), |
||||
|
log_level=dict(default=0, type='int'), |
||||
|
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']), |
||||
|
recursive=dict(default=False, type='bool'), |
||||
|
), |
||||
|
mutually_exclusive=[['filename', 'list']] |
||||
|
) |
||||
|
|
||||
|
changed = False |
||||
|
|
||||
|
manager = KubeManager(module) |
||||
|
state = module.params.get('state') |
||||
|
if state == 'present': |
||||
|
result = manager.create(check=False) |
||||
|
|
||||
|
elif state == 'absent': |
||||
|
result = manager.delete() |
||||
|
|
||||
|
elif state == 'reloaded': |
||||
|
result = manager.replace() |
||||
|
|
||||
|
elif state == 'stopped': |
||||
|
result = manager.stop() |
||||
|
|
||||
|
elif state == 'latest': |
||||
|
result = manager.replace() |
||||
|
|
||||
|
elif state == 'exists': |
||||
|
result = manager.exists() |
||||
|
module.exit_json(changed=changed, |
||||
|
msg='%s' % result) |
||||
|
|
||||
|
else: |
||||
|
module.fail_json(msg='Unrecognized state %s.' % state) |
||||
|
|
||||
|
module.exit_json(changed=changed, |
||||
|
msg='success: %s' % (' '.join(result)) |
||||
|
) |
||||
|
|
||||
|
|
||||
|
from ansible.module_utils.basic import * # noqa |
||||
|
if __name__ == '__main__': |
||||
|
main() |
@ -1,34 +1,3 @@ |
|||||
--- |
--- |
||||
- name: Check ansible version |
|
||||
import_playbook: ansible_version.yml |
|
||||
|
|
||||
- name: Ensure compatibility with old groups |
|
||||
import_playbook: legacy_groups.yml |
|
||||
|
|
||||
- hosts: bastion[0] |
|
||||
gather_facts: False |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults} |
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]} |
|
||||
|
|
||||
- hosts: etcd[0] |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults} |
|
||||
- role: recover_control_plane/etcd |
|
||||
when: etcd_deployment_type != "kubeadm" |
|
||||
|
|
||||
- hosts: kube_control_plane[0] |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults} |
|
||||
- { role: recover_control_plane/control-plane } |
|
||||
|
|
||||
- import_playbook: cluster.yml |
|
||||
|
|
||||
- hosts: kube_control_plane |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults} |
|
||||
- { role: recover_control_plane/post-recover } |
|
||||
|
- name: Recover control panel |
||||
|
ansible.builtin.import_playbook: playbooks/recover-control-panel.yml |
@ -1,50 +1,3 @@ |
|||||
--- |
--- |
||||
- name: Check ansible version |
|
||||
import_playbook: ansible_version.yml |
|
||||
|
|
||||
- name: Ensure compatibility with old groups |
|
||||
import_playbook: legacy_groups.yml |
|
||||
|
|
||||
- hosts: bastion[0] |
|
||||
gather_facts: False |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
|
||||
|
|
||||
- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}" |
|
||||
gather_facts: no |
|
||||
tasks: |
|
||||
- name: Confirm Execution |
|
||||
pause: |
|
||||
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes." |
|
||||
register: pause_result |
|
||||
run_once: True |
|
||||
when: |
|
||||
- not (skip_confirmation | default(false) | bool) |
|
||||
|
|
||||
- name: Fail if user does not confirm deletion |
|
||||
fail: |
|
||||
msg: "Delete nodes confirmation failed" |
|
||||
when: pause_result.user_input | default('yes') != 'yes' |
|
||||
|
|
||||
- name: Gather facts |
|
||||
import_playbook: facts.yml |
|
||||
when: reset_nodes|default(True)|bool |
|
||||
|
|
||||
- hosts: "{{ node | default('kube_node') }}" |
|
||||
gather_facts: no |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool } |
|
||||
- { role: remove-node/pre-remove, tags: pre-remove } |
|
||||
- { role: remove-node/remove-etcd-node } |
|
||||
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool } |
|
||||
|
|
||||
# Currently cannot remove first master or etcd |
|
||||
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}" |
|
||||
gather_facts: no |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool } |
|
||||
- { role: remove-node/post-remove, tags: post-remove } |
|
||||
|
- name: Remove node |
||||
|
ansible.builtin.import_playbook: playbooks/remove-node.yml |
@ -1,39 +1,3 @@ |
|||||
--- |
--- |
||||
- name: Check ansible version |
|
||||
import_playbook: ansible_version.yml |
|
||||
|
|
||||
- name: Ensure compatibility with old groups |
|
||||
import_playbook: legacy_groups.yml |
|
||||
|
|
||||
- hosts: bastion[0] |
|
||||
gather_facts: False |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults} |
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]} |
|
||||
|
|
||||
- name: Gather facts |
|
||||
import_playbook: facts.yml |
|
||||
|
|
||||
- hosts: etcd:k8s_cluster:calico_rr |
|
||||
gather_facts: False |
|
||||
vars_prompt: |
|
||||
name: "reset_confirmation" |
|
||||
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster." |
|
||||
default: "no" |
|
||||
private: no |
|
||||
|
|
||||
pre_tasks: |
|
||||
- name: check confirmation |
|
||||
fail: |
|
||||
msg: "Reset confirmation failed" |
|
||||
when: reset_confirmation != "yes" |
|
||||
|
|
||||
- name: Gather information about installed services |
|
||||
service_facts: |
|
||||
|
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults} |
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true } |
|
||||
- { role: reset, tags: reset } |
|
||||
|
- name: Reset the cluster |
||||
|
ansible.builtin.import_playbook: playbooks/reset.yml |
@ -1,124 +1,3 @@ |
|||||
--- |
--- |
||||
- name: Check ansible version |
|
||||
import_playbook: ansible_version.yml |
|
||||
|
|
||||
- name: Ensure compatibility with old groups |
|
||||
import_playbook: legacy_groups.yml |
|
||||
|
|
||||
- hosts: bastion[0] |
|
||||
gather_facts: False |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
|
||||
|
|
||||
- name: Bootstrap any new workers |
|
||||
hosts: kube_node |
|
||||
strategy: linear |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
gather_facts: false |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: bootstrap-os, tags: bootstrap-os } |
|
||||
|
|
||||
- name: Gather facts |
|
||||
tags: always |
|
||||
import_playbook: facts.yml |
|
||||
|
|
||||
- name: Generate the etcd certificates beforehand |
|
||||
hosts: etcd:kube_control_plane |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- role: etcd |
|
||||
tags: etcd |
|
||||
vars: |
|
||||
etcd_cluster_setup: false |
|
||||
etcd_events_cluster_setup: false |
|
||||
when: |
|
||||
- etcd_deployment_type != "kubeadm" |
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
|
||||
|
|
||||
- name: Download images to ansible host cache via first kube_control_plane node |
|
||||
hosts: kube_control_plane[0] |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" } |
|
||||
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" } |
|
||||
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" } |
|
||||
|
|
||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine) |
|
||||
hosts: kube_node |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/preinstall, tags: preinstall } |
|
||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
|
||||
- { role: download, tags: download, when: "not skip_downloads" } |
|
||||
- role: etcd |
|
||||
tags: etcd |
|
||||
vars: |
|
||||
etcd_cluster_setup: false |
|
||||
when: |
|
||||
- etcd_deployment_type != "kubeadm" |
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
|
||||
|
|
||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(node) |
|
||||
hosts: kube_node |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/node, tags: node } |
|
||||
|
|
||||
- name: Upload control plane certs and retrieve encryption key |
|
||||
hosts: kube_control_plane | first |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
gather_facts: False |
|
||||
tags: kubeadm |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
tasks: |
|
||||
- name: Upload control plane certificates |
|
||||
command: >- |
|
||||
{{ bin_dir }}/kubeadm init phase |
|
||||
--config {{ kube_config_dir }}/kubeadm-config.yaml |
|
||||
upload-certs |
|
||||
--upload-certs |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
register: kubeadm_upload_cert |
|
||||
changed_when: false |
|
||||
- name: set fact 'kubeadm_certificate_key' for later use |
|
||||
set_fact: |
|
||||
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}" |
|
||||
when: kubeadm_certificate_key is not defined |
|
||||
|
|
||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(network) |
|
||||
hosts: kube_node |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm } |
|
||||
- { role: kubernetes/node-label, tags: node-label } |
|
||||
- { role: network_plugin, tags: network } |
|
||||
|
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up |
|
||||
hosts: k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } |
|
||||
|
- name: Scale the cluster |
||||
|
ansible.builtin.import_playbook: playbooks/scale.yml |
@ -0,0 +1,9 @@ |
|||||
|
$os = "ubuntu2004" |
||||
|
|
||||
|
# For CI we are not worries about data persistence across reboot |
||||
|
$libvirt_volume_cache = "unsafe" |
||||
|
|
||||
|
# Checking for box update can trigger API rate limiting |
||||
|
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html |
||||
|
$box_check_update = false |
||||
|
$vm_cpus = 2 |
@ -0,0 +1,3 @@ |
|||||
|
--- |
||||
|
# Kubespray settings |
||||
|
kube_network_plugin: flannel |
@ -0,0 +1,18 @@ |
|||||
|
#!/bin/bash |
||||
|
set -e |
||||
|
|
||||
|
version_from_galaxy=$(grep "^version:" galaxy.yml | awk '{print $2}') |
||||
|
version_from_docs=$(grep -P "^\s+version:\sv\d+\.\d+\.\d+" docs/ansible_collection.md | awk '{print $2}') |
||||
|
|
||||
|
if [[ $KUBESPRAY_VERSION != "v${version_from_galaxy}" ]] |
||||
|
then |
||||
|
echo "Please update galaxy.yml version to match the KUBESPRAY_VERSION. Be sure to remove the \"v\" to adhere" |
||||
|
echo "to semenatic versioning" |
||||
|
exit 1 |
||||
|
fi |
||||
|
|
||||
|
if [[ $KUBESPRAY_VERSION != "${version_from_docs}" ]] |
||||
|
then |
||||
|
echo "Please update the documentation for Ansible collections under docs/ansible_collection.md to reflect the KUBESPRAY_VERSION" |
||||
|
exit 1 |
||||
|
fi |
@ -1,170 +1,3 @@ |
|||||
--- |
--- |
||||
- name: Check ansible version |
|
||||
import_playbook: ansible_version.yml |
|
||||
|
|
||||
- name: Ensure compatibility with old groups |
|
||||
import_playbook: legacy_groups.yml |
|
||||
|
|
||||
- hosts: bastion[0] |
|
||||
gather_facts: False |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] } |
|
||||
|
|
||||
- hosts: k8s_cluster:etcd:calico_rr |
|
||||
strategy: linear |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
gather_facts: false |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
vars: |
|
||||
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining |
|
||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. |
|
||||
ansible_ssh_pipelining: false |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: bootstrap-os, tags: bootstrap-os} |
|
||||
|
|
||||
- name: Gather facts |
|
||||
tags: always |
|
||||
import_playbook: facts.yml |
|
||||
|
|
||||
- name: Download images to ansible host cache via first kube_control_plane node |
|
||||
hosts: kube_control_plane[0] |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"} |
|
||||
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" } |
|
||||
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" } |
|
||||
|
|
||||
- name: Prepare nodes for upgrade |
|
||||
hosts: k8s_cluster:etcd:calico_rr |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/preinstall, tags: preinstall } |
|
||||
- { role: download, tags: download, when: "not skip_downloads" } |
|
||||
|
|
||||
- name: Upgrade container engine on non-cluster nodes |
|
||||
hosts: etcd:calico_rr:!k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
serial: "{{ serial | default('20%') }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
|
||||
|
|
||||
- hosts: etcd:kube_control_plane |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- role: etcd |
|
||||
tags: etcd |
|
||||
vars: |
|
||||
etcd_cluster_setup: true |
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" |
|
||||
when: etcd_deployment_type != "kubeadm" |
|
||||
|
|
||||
- hosts: k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- role: etcd |
|
||||
tags: etcd |
|
||||
vars: |
|
||||
etcd_cluster_setup: false |
|
||||
etcd_events_cluster_setup: false |
|
||||
when: |
|
||||
- etcd_deployment_type != "kubeadm" |
|
||||
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool |
|
||||
- kube_network_plugin != "calico" or calico_datastore == "etcd" |
|
||||
|
|
||||
- name: Handle upgrades to master components first to maintain backwards compat. |
|
||||
gather_facts: False |
|
||||
hosts: kube_control_plane |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
serial: 1 |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade } |
|
||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
|
||||
- { role: kubernetes/node, tags: node } |
|
||||
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true } |
|
||||
- { role: kubernetes/client, tags: client } |
|
||||
- { role: kubernetes/node-label, tags: node-label } |
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles } |
|
||||
- { role: kubernetes-apps, tags: csi-driver } |
|
||||
- { role: upgrade/post-upgrade, tags: post-upgrade } |
|
||||
|
|
||||
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes |
|
||||
hosts: kube_control_plane:calico_rr:kube_node |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
serial: "{{ serial | default('20%') }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller } |
|
||||
- { role: network_plugin, tags: network } |
|
||||
- { role: kubernetes-apps/network_plugin, tags: network } |
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller } |
|
||||
|
|
||||
- name: Finally handle worker upgrades, based on given batch size |
|
||||
hosts: kube_node:calico_rr:!kube_control_plane |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
serial: "{{ serial | default('20%') }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade } |
|
||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine } |
|
||||
- { role: kubernetes/node, tags: node } |
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm } |
|
||||
- { role: kubernetes/node-label, tags: node-label } |
|
||||
- { role: upgrade/post-upgrade, tags: post-upgrade } |
|
||||
|
|
||||
- hosts: kube_control_plane[0] |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: true |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } |
|
||||
|
|
||||
- hosts: calico_rr |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: network_plugin/calico/rr, tags: network } |
|
||||
|
|
||||
- hosts: kube_control_plane |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller } |
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner } |
|
||||
- { role: kubernetes-apps, tags: apps } |
|
||||
|
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up |
|
||||
hosts: k8s_cluster |
|
||||
gather_facts: False |
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" |
|
||||
environment: "{{ proxy_disable_env }}" |
|
||||
roles: |
|
||||
- { role: kubespray-defaults } |
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true } |
|
||||
|
- name: Upgrade cluster |
||||
|
ansible.builtin.import_playbook: playbooks/upgrade-cluster.yml |
Write
Preview
Loading…
Cancel
Save