From f70381456141921e244f79c52c6fc508e8da7d6d Mon Sep 17 00:00:00 2001 From: Sascha Marcel Schmidt Date: Thu, 5 Jul 2018 02:15:05 +0200 Subject: [PATCH] add tear down playbook --- contrib/network-storage/heketi/README.md | 5 ++ .../heketi/heketi-tear-down.yml | 4 ++ .../heketi/roles/tear-down/tasks/disks.yml | 51 ++++++++++++++++++ .../heketi/roles/tear-down/tasks/main.yml | 53 +++++++++++++++++++ 4 files changed, 113 insertions(+) create mode 100644 contrib/network-storage/heketi/heketi-tear-down.yml create mode 100644 contrib/network-storage/heketi/roles/tear-down/tasks/disks.yml create mode 100644 contrib/network-storage/heketi/roles/tear-down/tasks/main.yml diff --git a/contrib/network-storage/heketi/README.md b/contrib/network-storage/heketi/README.md index 4ea825b7b..aa1b656e5 100644 --- a/contrib/network-storage/heketi/README.md +++ b/contrib/network-storage/heketi/README.md @@ -9,3 +9,8 @@ Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml ``` ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml ``` + +## Tear down +``` +ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml +``` diff --git a/contrib/network-storage/heketi/heketi-tear-down.yml b/contrib/network-storage/heketi/heketi-tear-down.yml new file mode 100644 index 000000000..da2c05eb7 --- /dev/null +++ b/contrib/network-storage/heketi/heketi-tear-down.yml @@ -0,0 +1,4 @@ +--- +- hosts: localhost + roles: + - { role: tear-down } diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/disks.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/disks.yml new file mode 100644 index 000000000..dace142ed --- /dev/null +++ b/contrib/network-storage/heketi/roles/tear-down/tasks/disks.yml @@ -0,0 +1,51 @@ +--- +- name: "Install lvm utils (RedHat)" + delegate_to: "{{ node }}" + become: true + yum: + name: "lvm2" + state: "present" + when: "ansible_os_family == 'RedHat'" + +- name: "Install lvm utils (Debian)" + delegate_to: "{{ node }}" + become: true + apt: + name: "lvm2" + state: "present" + when: "ansible_os_family == 'Debian'" +- name: "Get volume group information." + delegate_to: "{{ node }}" + become: true + shell: "pvs {{ disk }} --option vg_name | tail -n+2" + vars: { disk: "{{ hostvars[node]['disk_volume_device_1'] }}" } + register: "volume_groups" + ignore_errors: true + changed_when: false +- name: "Remove volume groups." + delegate_to: "{{ node }}" + become: true + command: "vgremove {{ volume_group }} --yes" + with_items: "{{ volume_groups.stdout_lines }}" + loop_control: { loop_var: "volume_group" } +- name: "Remove physical volume from cluster disks." + delegate_to: "{{ node }}" + become: true + command: "pvremove {{ disk }} --yes" + vars: { disk: "{{ hostvars[node]['disk_volume_device_1'] }}" } + ignore_errors: true +- name: "Remove lvm utils (RedHat)" + delegate_to: "{{ node }}" + become: true + yum: + name: "lvm2" + state: "absent" + when: "ansible_os_family == 'RedHat'" + +- name: "Remove lvm utils (Debian)" + delegate_to: "{{ node }}" + become: true + apt: + name: "lvm2" + state: "absent" + when: "ansible_os_family == 'Debian'" diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml new file mode 100644 index 000000000..a62d59d97 --- /dev/null +++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: "Tear down heketi." + command: "kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" + ignore_errors: true +- name: "Tear down heketi." + command: "kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" + ignore_errors: true +- name: "Tear down bootstrap." + include_tasks: "../provision/tasks/setup/tear-down-bootstrap.yml" +- name: "Ensure there is nothing left over." + command: "kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 +- name: "Ensure there is nothing left over." + command: "kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 +- name: "Tear down glusterfs." + command: "kubectl delete daemonset.extensions/glusterfs" + ignore_errors: true +- name: "Remove heketi storage service." + command: "kubectl delete service heketi-storage-endpoints" + ignore_errors: true +- name: "Remove heketi gluster role binding" + command: "kubectl delete clusterrolebinding heketi-gluster-admin" + ignore_errors: true +- name: "Remove heketi config secret" + command: "kubectl delete secret heketi-config-secret" + ignore_errors: true +- name: "Remove heketi db backup" + command: "kubectl delete secret heketi-db-backup" + ignore_errors: true +- name: "Remove heketi service account" + command: "kubectl delete serviceaccount heketi-service-account" + ignore_errors: true +- name: "Get secrets" + command: "kubectl get secrets --output=\"json\"" + register: "secrets" + changed_when: false +- name: "Remove heketi storage secret" + vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" } + command: "kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}" + when: "storage_query is defined" + ignore_errors: true +- name: "Prepare cluster disks." + include_tasks: "disks.yml" + with_items: "{{ groups['heketi-node'] }}" + loop_control: + loop_var: "node"