diff --git a/contrib/network-storage/heketi/README.md b/contrib/network-storage/heketi/README.md new file mode 100644 index 000000000..aa1b656e5 --- /dev/null +++ b/contrib/network-storage/heketi/README.md @@ -0,0 +1,16 @@ +# Deploy Heketi/Glusterfs into Kubespray/Kubernetes +This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass. + +## Client Setup +Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine. + +## Install +Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup. +``` +ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml +``` + +## Tear down +``` +ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml +``` diff --git a/contrib/network-storage/heketi/heketi-tear-down.yml b/contrib/network-storage/heketi/heketi-tear-down.yml new file mode 100644 index 000000000..92b9f92d6 --- /dev/null +++ b/contrib/network-storage/heketi/heketi-tear-down.yml @@ -0,0 +1,9 @@ +--- +- hosts: kube-master[0] + roles: + - { role: tear-down } + +- hosts: heketi-node + become: yes + roles: + - { role: tear-down-disks } diff --git a/contrib/network-storage/heketi/heketi.yml b/contrib/network-storage/heketi/heketi.yml new file mode 100644 index 000000000..3ec719e95 --- /dev/null +++ b/contrib/network-storage/heketi/heketi.yml @@ -0,0 +1,10 @@ +--- +- hosts: heketi-node + roles: + - { role: prepare } + +- hosts: kube-master[0] + tags: + - "provision" + roles: + - { role: provision } diff --git a/contrib/network-storage/heketi/inventory.yml.sample b/contrib/network-storage/heketi/inventory.yml.sample new file mode 100644 index 000000000..7d488d1ba --- /dev/null +++ b/contrib/network-storage/heketi/inventory.yml.sample @@ -0,0 +1,26 @@ +all: + vars: + heketi_admin_key: "11elfeinhundertundelf" + heketi_user_key: "!!einseinseins" + children: + k8s-cluster: + vars: + kubelet_fail_swap_on: false + children: + kube-master: + hosts: + node1: + etcd: + hosts: + node2: + kube-node: + hosts: &kube_nodes + node1: + node2: + node3: + node4: + heketi-node: + vars: + disk_volume_device_1: "/dev/vdb" + hosts: + <<: *kube_nodes diff --git a/contrib/network-storage/heketi/requirements.txt b/contrib/network-storage/heketi/requirements.txt new file mode 100644 index 000000000..45c1e038e --- /dev/null +++ b/contrib/network-storage/heketi/requirements.txt @@ -0,0 +1 @@ +jmespath diff --git a/contrib/network-storage/heketi/roles/prepare/tasks/main.yml b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml new file mode 100644 index 000000000..e4db23365 --- /dev/null +++ b/contrib/network-storage/heketi/roles/prepare/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: "Load lvm kernel modules" + become: true + with_items: + - "dm_snapshot" + - "dm_mirror" + - "dm_thin_pool" + modprobe: + name: "{{ item }}" + state: "present" + +- name: "Install glusterfs mount utils (RedHat)" + become: true + yum: + name: "glusterfs-fuse" + state: "present" + when: "ansible_os_family == 'RedHat'" + +- name: "Install glusterfs mount utils (Debian)" + become: true + apt: + name: "glusterfs-client" + state: "present" + when: "ansible_os_family == 'Debian'" diff --git a/contrib/network-storage/heketi/roles/provision/defaults/main.yml b/contrib/network-storage/heketi/roles/provision/defaults/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/contrib/network-storage/heketi/roles/provision/handlers/main.yml b/contrib/network-storage/heketi/roles/provision/handlers/main.yml new file mode 100644 index 000000000..9e876de17 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: "stop port forwarding" + command: "killall " diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml new file mode 100644 index 000000000..572913a63 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap.yml @@ -0,0 +1,56 @@ +# Bootstrap heketi +- name: "Get state of heketi service, deployment and pods." + register: "initial_heketi_state" + changed_when: false + command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json" +- name: "Bootstrap heketi." + when: + - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0" + - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0" + - "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0" + include_tasks: "bootstrap/deploy.yml" + +# Prepare heketi topology +- name: "Get heketi initial pod state." + register: "initial_heketi_pod" + command: "{{ bin_dir }}/kubectl get pods --selector=deploy-heketi=pod,glusterfs=heketi-pod,name=deploy-heketi --output=json" + changed_when: false +- name: "Ensure heketi bootstrap pod is up." + assert: + that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1" +- set_fact: + initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}" +- name: "Test heketi topology." + changed_when: false + register: "heketi_topology" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" +- name: "Load heketi topology." + when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0" + include_tasks: "bootstrap/topology.yml" + +# Provision heketi database volume +- name: "Prepare heketi volumes." + include_tasks: "bootstrap/volumes.yml" + +# Remove bootstrap heketi +- name: "Tear down bootstrap." + include_tasks: "bootstrap/tear-down.yml" + +# Prepare heketi storage +- name: "Test heketi storage." + command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json" + changed_when: false + register: "heketi_storage_state" +# ensure endpoints actually exist before trying to move database data to it +- name: "Create heketi storage." + include_tasks: "bootstrap/storage.yml" + vars: + secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']" + endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']" + service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']" + job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']" + when: + - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml new file mode 100644 index 000000000..3037d8b77 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml @@ -0,0 +1,24 @@ +--- +- name: "Kubernetes Apps | Lay Down Heketi Bootstrap" + become: true + template: { src: "heketi-bootstrap.json.j2", dest: "{{ kube_config_dir }}/heketi-bootstrap.json" } + register: "rendering" +- name: "Kubernetes Apps | Install and configure Heketi Bootstrap" + kube: + name: "GlusterFS" + kubectl: "{{bin_dir}}/kubectl" + filename: "{{ kube_config_dir }}/heketi-bootstrap.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" +- name: "Wait for heketi bootstrap to complete." + changed_when: false + register: "initial_heketi_state" + vars: + initial_heketi_state: { stdout: "{}" } + pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]" + deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]" + command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json" + until: + - "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'" + - "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'" + retries: 60 + delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml new file mode 100644 index 000000000..be3c42caf --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml @@ -0,0 +1,33 @@ +--- +- name: "Test heketi storage." + command: "{{ bin_dir }}/kubectl get secrets,endpoints,services,jobs --output=json" + changed_when: false + register: "heketi_storage_state" +- name: "Create heketi storage." + kube: + name: "GlusterFS" + kubectl: "{{bin_dir}}/kubectl" + filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json" + state: "present" + vars: + secret_query: "items[?metadata.name=='heketi-storage-secret' && kind=='Secret']" + endpoints_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Endpoints']" + service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']" + job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']" + when: + - "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0" + - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0" + register: "heketi_storage_result" +- name: "Get state of heketi database copy job." + command: "{{ bin_dir }}/kubectl get jobs --output=json" + changed_when: false + register: "heketi_storage_state" + vars: + heketi_storage_state: { stdout: "{}" } + job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job' && status.succeeded==1]" + until: + - "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 1" + retries: 60 + delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml new file mode 100644 index 000000000..0ffd6f469 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/tear-down.yml @@ -0,0 +1,14 @@ +--- +- name: "Get existing Heketi deploy resources." + command: "{{ bin_dir }}/kubectl get all --selector=\"deploy-heketi\" -o=json" + register: "heketi_resources" + changed_when: false +- name: "Delete bootstrap Heketi." + command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\"" + when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0" +- name: "Ensure there is nothing left over." + command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml new file mode 100644 index 000000000..7d2c5981e --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/topology.yml @@ -0,0 +1,26 @@ +--- +- name: "Get heketi topology." + changed_when: false + register: "heketi_topology" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" +- name: "Render heketi topology template." + become: true + vars: { nodes: "{{ groups['heketi-node'] }}" } + register: "render" + template: + src: "topology.json.j2" + dest: "{{ kube_config_dir }}/topology.json" +- name: "Copy topology configuration into container." + changed_when: false + command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" +- name: "Load heketi topology." + when: "render.changed" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" + register: "load_heketi" +- name: "Get heketi topology." + changed_when: false + register: "heketi_topology" + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" + until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length" + retries: 60 + delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml new file mode 100644 index 000000000..d5da1a125 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/volumes.yml @@ -0,0 +1,41 @@ +--- +- name: "Get heketi volume ids." + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json" + changed_when: false + register: "heketi_volumes" +- name: "Get heketi volumes." + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json" + with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}" + loop_control: { loop_var: "volume_id" } + register: "volumes_information" +- name: "Test heketi database volume." + set_fact: { heketi_database_volume_exists: true } + with_items: "{{ volumes_information.results }}" + loop_control: { loop_var: "volume_information" } + vars: { volume: "{{ volume_information.stdout|from_json }}" } + when: "volume.name == 'heketidbstorage'" +- name: "Provision database volume." + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage" + when: "heketi_database_volume_exists is undefined" +- name: "Copy configuration from pod." + become: true + command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json" +- name: "Get heketi volume ids." + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume list --json" + changed_when: false + register: "heketi_volumes" +- name: "Get heketi volumes." + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} volume info {{ volume_id }} --json" + with_items: "{{ heketi_volumes.stdout|from_json|json_query(\"volumes[*]\") }}" + loop_control: { loop_var: "volume_id" } + register: "volumes_information" +- name: "Test heketi database volume." + set_fact: { heketi_database_volume_created: true } + with_items: "{{ volumes_information.results }}" + loop_control: { loop_var: "volume_information" } + vars: { volume: "{{ volume_information.stdout|from_json }}" } + when: "volume.name == 'heketidbstorage'" +- name: "Ensure heketi database volume exists." + assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." } diff --git a/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml b/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml new file mode 100644 index 000000000..238f29bc2 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml @@ -0,0 +1,4 @@ +--- +- name: "Clean up left over jobs." + command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\"" + changed_when: false diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml new file mode 100644 index 000000000..e46159969 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml @@ -0,0 +1,38 @@ +--- +- name: "Kubernetes Apps | Lay Down GlusterFS Daemonset" + template: { src: "glusterfs-daemonset.json.j2", dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" } + become: true + register: "rendering" +- name: "Kubernetes Apps | Install and configure GlusterFS daemonset" + kube: + name: "GlusterFS" + kubectl: "{{bin_dir}}/kubectl" + filename: "{{ kube_config_dir }}/glusterfs-daemonset.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" +- name: "Kubernetes Apps | Label GlusterFS nodes" + include_tasks: "glusterfs/label.yml" + with_items: "{{ groups['heketi-node'] }}" + loop_control: + loop_var: "node" +- name: "Kubernetes Apps | Wait for daemonset to become available." + register: "daemonset_state" + command: "{{ bin_dir }}/kubectl get daemonset glusterfs --output=json --ignore-not-found=true" + changed_when: false + vars: + daemonset_state: { stdout: "{}" } + ready: "{{ daemonset_state.stdout|from_json|json_query(\"status.numberReady\") }}" + desired: "{{ daemonset_state.stdout|from_json|json_query(\"status.desiredNumberScheduled\") }}" + until: "ready >= 3" + retries: 60 + delay: 5 + +- name: "Kubernetes Apps | Lay Down Heketi Service Account" + template: { src: "heketi-service-account.json.j2", dest: "{{ kube_config_dir }}/heketi-service-account.json" } + become: true + register: "rendering" +- name: "Kubernetes Apps | Install and configure Heketi Service Account" + kube: + name: "GlusterFS" + kubectl: "{{bin_dir}}/kubectl" + filename: "{{ kube_config_dir }}/heketi-service-account.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml new file mode 100644 index 000000000..61729a5e2 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs/label.yml @@ -0,0 +1,11 @@ +--- +- register: "label_present" + command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true" + changed_when: false +- name: "Assign storage label" + when: "label_present.stdout_lines|length == 0" + command: "{{ bin_dir }}/kubectl label node {{ node }} storagenode=glusterfs" +- register: "label_present" + command: "{{ bin_dir }}/kubectl get node --selector=storagenode=glusterfs,kubernetes.io/hostname={{ node }} --ignore-not-found=true" + changed_when: false +- assert: { that: "label_present|length > 0", msg: "Node {{ node }} has not been assigned with label storagenode=glusterfs." } diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml new file mode 100644 index 000000000..029baef94 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml @@ -0,0 +1,26 @@ +--- +- name: "Kubernetes Apps | Lay Down Heketi" + become: true + template: { src: "heketi-deployment.json.j2", dest: "{{ kube_config_dir }}/heketi-deployment.json" } + register: "rendering" +- name: "Kubernetes Apps | Install and configure Heketi" + kube: + name: "GlusterFS" + kubectl: "{{bin_dir}}/kubectl" + filename: "{{ kube_config_dir }}/heketi-deployment.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" +- name: "Ensure heketi is up and running." + changed_when: false + register: "heketi_state" + vars: + heketi_state: { stdout: "{}" } + pods_query: "items[?kind=='Pod'].status.conditions|[0][?type=='Ready'].status|[0]" + deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]" + command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json" + until: + - "heketi_state.stdout|from_json|json_query(pods_query) == 'True'" + - "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'" + retries: 60 + delay: 5 +- set_fact: + heketi_pod_name: "{{ heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod'].metadata.name|[0]\") }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/main.yml b/contrib/network-storage/heketi/roles/provision/tasks/main.yml new file mode 100644 index 000000000..23a2b4f9c --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: "Kubernetes Apps | GlusterFS" + include_tasks: "glusterfs.yml" + +- name: "Kubernetes Apps | Heketi Secrets" + include_tasks: "secret.yml" + +- name: "Kubernetes Apps | Test Heketi" + register: "heketi_service_state" + command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true" + changed_when: false + +- name: "Kubernetes Apps | Bootstrap Heketi" + when: "heketi_service_state.stdout == \"\"" + include_tasks: "bootstrap.yml" + +- name: "Kubernetes Apps | Heketi" + include_tasks: "heketi.yml" + +- name: "Kubernetes Apps | Heketi Topology" + include_tasks: "topology.yml" + +- name: "Kubernetes Apps | Heketi Storage" + include_tasks: "storage.yml" + +- name: "Kubernetes Apps | Storage Class" + include_tasks: "storageclass.yml" + +- name: "Clean up" + include_tasks: "cleanup.yml" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml new file mode 100644 index 000000000..8ca21bcb6 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml @@ -0,0 +1,27 @@ +--- +- register: "clusterrolebinding_state" + command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" + changed_when: false +- name: "Kubernetes Apps | Deploy cluster role binding." + when: "clusterrolebinding_state.stdout == \"\"" + command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account" +- register: "clusterrolebinding_state" + command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" + changed_when: false +- assert: { that: "clusterrolebinding_state.stdout != \"\"", message: "Cluster role binding is not present." } + +- register: "secret_state" + command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" + changed_when: false +- name: "Render Heketi secret configuration." + become: true + template: + src: "heketi.json.j2" + dest: "{{ kube_config_dir }}/heketi.json" +- name: "Deploy Heketi config secret" + when: "secret_state.stdout == \"\"" + command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json" +- register: "secret_state" + command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" + changed_when: false +- assert: { that: "secret_state.stdout != \"\"", message: "Heketi config secret is not present." } diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml new file mode 100644 index 000000000..881084bbe --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml @@ -0,0 +1,11 @@ +--- +- name: "Kubernetes Apps | Lay Down Heketi Storage" + become: true + vars: { nodes: "{{ groups['heketi-node'] }}" } + template: { src: "heketi-storage.json.j2", dest: "{{ kube_config_dir }}/heketi-storage.json" } + register: "rendering" +- name: "Kubernetes Apps | Install and configure Heketi Storage" + kube: + name: "GlusterFS" + filename: "{{ kube_config_dir }}/heketi-storage.json" + state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml new file mode 100644 index 000000000..afd818eb3 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml @@ -0,0 +1,25 @@ +--- +- name: "Test storage class." + command: "{{ bin_dir }}/kubectl get storageclass gluster --ignore-not-found=true --output=json" + register: "storageclass" + changed_when: false +- name: "Test heketi service." + command: "{{ bin_dir }}/kubectl get service heketi --ignore-not-found=true --output=json" + register: "heketi_service" + changed_when: false +- name: "Ensure heketi service is available." + assert: { that: "heketi_service.stdout != \"\"" } +- name: "Render storage class configuration." + become: true + vars: + endpoint_address: "{{ (heketi_service.stdout|from_json).spec.clusterIP }}" + template: + src: "storageclass.yml.j2" + dest: "{{ kube_config_dir }}/storageclass.yml" + register: "rendering" +- name: "Kubernetes Apps | Install and configure Storace Class" + kube: + name: "GlusterFS" + kubectl: "{{bin_dir}}/kubectl" + filename: "{{ kube_config_dir }}/storageclass.yml" + state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/topology.yml b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml new file mode 100644 index 000000000..dd1e272be --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/tasks/topology.yml @@ -0,0 +1,25 @@ +--- +- name: "Get heketi topology." + register: "heketi_topology" + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" +- name: "Render heketi topology template." + become: true + vars: { nodes: "{{ groups['heketi-node'] }}" } + register: "rendering" + template: + src: "topology.json.j2" + dest: "{{ kube_config_dir }}/topology.json" +- name: "Copy topology configuration into container." + when: "rendering.changed" + command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" +- name: "Load heketi topology." + when: "rendering.changed" + command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json" +- name: "Get heketi topology." + register: "heketi_topology" + changed_when: false + command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json" + until: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*].devices[?state=='online'].id\")|flatten|length == groups['heketi-node']|length" + retries: 60 + delay: 5 diff --git a/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 new file mode 100644 index 000000000..eddd57eb8 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/glusterfs-daemonset.json.j2 @@ -0,0 +1,144 @@ +{ + "kind": "DaemonSet", + "apiVersion": "extensions/v1beta1", + "metadata": { + "name": "glusterfs", + "labels": { + "glusterfs": "deployment" + }, + "annotations": { + "description": "GlusterFS Daemon Set", + "tags": "glusterfs" + } + }, + "spec": { + "template": { + "metadata": { + "name": "glusterfs", + "labels": { + "glusterfs-node": "daemonset" + } + }, + "spec": { + "nodeSelector": { + "storagenode" : "glusterfs" + }, + "hostNetwork": true, + "containers": [ + { + "image": "gluster/gluster-centos:gluster4u0_centos7", + "imagePullPolicy": "IfNotPresent", + "name": "glusterfs", + "volumeMounts": [ + { + "name": "glusterfs-heketi", + "mountPath": "/var/lib/heketi" + }, + { + "name": "glusterfs-run", + "mountPath": "/run" + }, + { + "name": "glusterfs-lvm", + "mountPath": "/run/lvm" + }, + { + "name": "glusterfs-etc", + "mountPath": "/etc/glusterfs" + }, + { + "name": "glusterfs-logs", + "mountPath": "/var/log/glusterfs" + }, + { + "name": "glusterfs-config", + "mountPath": "/var/lib/glusterd" + }, + { + "name": "glusterfs-dev", + "mountPath": "/dev" + }, + { + "name": "glusterfs-cgroup", + "mountPath": "/sys/fs/cgroup" + } + ], + "securityContext": { + "capabilities": {}, + "privileged": true + }, + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 60, + "exec": { + "command": [ + "/bin/bash", + "-c", + "systemctl status glusterd.service" + ] + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 60, + "exec": { + "command": [ + "/bin/bash", + "-c", + "systemctl status glusterd.service" + ] + } + } + } + ], + "volumes": [ + { + "name": "glusterfs-heketi", + "hostPath": { + "path": "/var/lib/heketi" + } + }, + { + "name": "glusterfs-run" + }, + { + "name": "glusterfs-lvm", + "hostPath": { + "path": "/run/lvm" + } + }, + { + "name": "glusterfs-etc", + "hostPath": { + "path": "/etc/glusterfs" + } + }, + { + "name": "glusterfs-logs", + "hostPath": { + "path": "/var/log/glusterfs" + } + }, + { + "name": "glusterfs-config", + "hostPath": { + "path": "/var/lib/glusterd" + } + }, + { + "name": "glusterfs-dev", + "hostPath": { + "path": "/dev" + } + }, + { + "name": "glusterfs-cgroup", + "hostPath": { + "path": "/sys/fs/cgroup" + } + } + ] + } + } + } +} diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 new file mode 100644 index 000000000..bdcf3e958 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-bootstrap.json.j2 @@ -0,0 +1,133 @@ +{ + "kind": "List", + "apiVersion": "v1", + "items": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "deploy-heketi", + "labels": { + "glusterfs": "heketi-service", + "deploy-heketi": "support" + }, + "annotations": { + "description": "Exposes Heketi Service" + } + }, + "spec": { + "selector": { + "name": "deploy-heketi" + }, + "ports": [ + { + "name": "deploy-heketi", + "port": 8080, + "targetPort": 8080 + } + ] + } + }, + { + "kind": "Deployment", + "apiVersion": "extensions/v1beta1", + "metadata": { + "name": "deploy-heketi", + "labels": { + "glusterfs": "heketi-deployment", + "deploy-heketi": "deployment" + }, + "annotations": { + "description": "Defines how to deploy Heketi" + } + }, + "spec": { + "replicas": 1, + "template": { + "metadata": { + "name": "deploy-heketi", + "labels": { + "name": "deploy-heketi", + "glusterfs": "heketi-pod", + "deploy-heketi": "pod" + } + }, + "spec": { + "serviceAccountName": "heketi-service-account", + "containers": [ + { + "image": "heketi/heketi:7", + "imagePullPolicy": "Always", + "name": "deploy-heketi", + "env": [ + { + "name": "HEKETI_EXECUTOR", + "value": "kubernetes" + }, + { + "name": "HEKETI_DB_PATH", + "value": "/var/lib/heketi/heketi.db" + }, + { + "name": "HEKETI_FSTAB", + "value": "/var/lib/heketi/fstab" + }, + { + "name": "HEKETI_SNAPSHOT_LIMIT", + "value": "14" + }, + { + "name": "HEKETI_KUBE_GLUSTER_DAEMONSET", + "value": "y" + } + ], + "ports": [ + { + "containerPort": 8080 + } + ], + "volumeMounts": [ + { + "name": "db", + "mountPath": "/var/lib/heketi" + }, + { + "name": "config", + "mountPath": "/etc/heketi" + } + ], + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 3, + "httpGet": { + "path": "/hello", + "port": 8080 + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 30, + "httpGet": { + "path": "/hello", + "port": 8080 + } + } + } + ], + "volumes": [ + { + "name": "db" + }, + { + "name": "config", + "secret": { + "secretName": "heketi-config-secret" + } + } + ] + } + } + } + } + ] +} diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 new file mode 100644 index 000000000..5eb71718c --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-deployment.json.j2 @@ -0,0 +1,159 @@ +{ + "kind": "List", + "apiVersion": "v1", + "items": [ + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "heketi-db-backup", + "labels": { + "glusterfs": "heketi-db", + "heketi": "db" + } + }, + "data": { + }, + "type": "Opaque" + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "heketi", + "labels": { + "glusterfs": "heketi-service", + "deploy-heketi": "support" + }, + "annotations": { + "description": "Exposes Heketi Service" + } + }, + "spec": { + "selector": { + "name": "heketi" + }, + "ports": [ + { + "name": "heketi", + "port": 8080, + "targetPort": 8080 + } + ] + } + }, + { + "kind": "Deployment", + "apiVersion": "extensions/v1beta1", + "metadata": { + "name": "heketi", + "labels": { + "glusterfs": "heketi-deployment" + }, + "annotations": { + "description": "Defines how to deploy Heketi" + } + }, + "spec": { + "replicas": 1, + "template": { + "metadata": { + "name": "heketi", + "labels": { + "name": "heketi", + "glusterfs": "heketi-pod" + } + }, + "spec": { + "serviceAccountName": "heketi-service-account", + "containers": [ + { + "image": "heketi/heketi:7", + "imagePullPolicy": "Always", + "name": "heketi", + "env": [ + { + "name": "HEKETI_EXECUTOR", + "value": "kubernetes" + }, + { + "name": "HEKETI_DB_PATH", + "value": "/var/lib/heketi/heketi.db" + }, + { + "name": "HEKETI_FSTAB", + "value": "/var/lib/heketi/fstab" + }, + { + "name": "HEKETI_SNAPSHOT_LIMIT", + "value": "14" + }, + { + "name": "HEKETI_KUBE_GLUSTER_DAEMONSET", + "value": "y" + } + ], + "ports": [ + { + "containerPort": 8080 + } + ], + "volumeMounts": [ + { + "mountPath": "/backupdb", + "name": "heketi-db-secret" + }, + { + "name": "db", + "mountPath": "/var/lib/heketi" + }, + { + "name": "config", + "mountPath": "/etc/heketi" + } + ], + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 3, + "httpGet": { + "path": "/hello", + "port": 8080 + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 30, + "httpGet": { + "path": "/hello", + "port": 8080 + } + } + } + ], + "volumes": [ + { + "name": "db", + "glusterfs": { + "endpoints": "heketi-storage-endpoints", + "path": "heketidbstorage" + } + }, + { + "name": "heketi-db-secret", + "secret": { + "secretName": "heketi-db-backup" + } + }, + { + "name": "config", + "secret": { + "secretName": "heketi-config-secret" + } + } + ] + } + } + } + } + ] +} diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2 new file mode 100644 index 000000000..1dbcb9e96 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2 @@ -0,0 +1,7 @@ +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": { + "name": "heketi-service-account" + } +} diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2 new file mode 100644 index 000000000..3089256c9 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/heketi-storage.json.j2 @@ -0,0 +1,54 @@ +{ + "apiVersion": "v1", + "kind": "List", + "items": [ + { + "kind": "Endpoints", + "apiVersion": "v1", + "metadata": { + "name": "heketi-storage-endpoints", + "creationTimestamp": null + }, + "subsets": [ +{% set nodeblocks = [] %} +{% for node in nodes %} +{% set nodeblock %} + { + "addresses": [ + { + "ip": "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}" + } + ], + "ports": [ + { + "port": 1 + } + ] + } +{% endset %} +{% if nodeblocks.append(nodeblock) %}{% endif %} +{% endfor %} +{{ nodeblocks|join(',') }} + ] + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "heketi-storage-endpoints", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "port": 1, + "targetPort": 0 + } + ] + }, + "status": { + "loadBalancer": {} + } + } + ] +} diff --git a/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2 new file mode 100644 index 000000000..5861b684b --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/heketi.json.j2 @@ -0,0 +1,44 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port": "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth": true, + + "_jwt": "Private keys for access", + "jwt": { + "_admin": "Admin has access to all APIs", + "admin": { + "key": "{{ heketi_admin_key }}" + }, + "_user": "User only has access to /volumes endpoint", + "user": { + "key": "{{ heketi_user_key }}" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs": { + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor": "kubernetes", + + "_db_comment": "Database file name", + "db": "/var/lib/heketi/heketi.db", + + "kubeexec": { + "rebalance_on_expansion": true + }, + + "sshexec": { + "rebalance_on_expansion": true, + "keyfile": "/etc/heketi/private_key", + "fstab": "/etc/fstab", + "port": "22", + "user": "root", + "sudo": false + } + }, + + "_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.", + "backup_db_to_kube_secret": false +} diff --git a/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2 b/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2 new file mode 100644 index 000000000..65104e143 --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gluster + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{{ endpoint_address }}:8080" + restuser: "user" + restuserkey: "{{ heketi_user_key }}" diff --git a/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2 b/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2 new file mode 100644 index 000000000..b0ac29d7b --- /dev/null +++ b/contrib/network-storage/heketi/roles/provision/templates/topology.json.j2 @@ -0,0 +1,34 @@ +{ + "clusters": [ + { + "nodes": [ +{% set nodeblocks = [] %} +{% for node in nodes %} +{% set nodeblock %} + { + "node": { + "hostnames": { + "manage": [ + "{{ node }}" + ], + "storage": [ + "{{ hostvars[node]['ansible_facts']['default_ipv4']['address'] }}" + ] + }, + "zone": 1 + }, + "devices": [ + { + "name": "{{ hostvars[node]['disk_volume_device_1'] }}", + "destroydata": false + } + ] + } +{% endset %} +{% if nodeblocks.append(nodeblock) %}{% endif %} +{% endfor %} +{{ nodeblocks|join(',') }} + ] + } + ] +} diff --git a/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml new file mode 100644 index 000000000..01e03660c --- /dev/null +++ b/contrib/network-storage/heketi/roles/tear-down-disks/tasks/main.yml @@ -0,0 +1,46 @@ +--- +- name: "Install lvm utils (RedHat)" + become: true + yum: + name: "lvm2" + state: "present" + when: "ansible_os_family == 'RedHat'" + +- name: "Install lvm utils (Debian)" + become: true + apt: + name: "lvm2" + state: "present" + when: "ansible_os_family == 'Debian'" + +- name: "Get volume group information." + become: true + shell: "pvs {{ disk_volume_device_1 }} --option vg_name | tail -n+2" + register: "volume_groups" + ignore_errors: true + changed_when: false + +- name: "Remove volume groups." + become: true + command: "vgremove {{ volume_group }} --yes" + with_items: "{{ volume_groups.stdout_lines }}" + loop_control: { loop_var: "volume_group" } + +- name: "Remove physical volume from cluster disks." + become: true + command: "pvremove {{ disk_volume_device_1 }} --yes" + ignore_errors: true + +- name: "Remove lvm utils (RedHat)" + become: true + yum: + name: "lvm2" + state: "absent" + when: "ansible_os_family == 'RedHat'" + +- name: "Remove lvm utils (Debian)" + become: true + apt: + name: "lvm2" + state: "absent" + when: "ansible_os_family == 'Debian'" diff --git a/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml new file mode 100644 index 000000000..ddc56b256 --- /dev/null +++ b/contrib/network-storage/heketi/roles/tear-down/tasks/main.yml @@ -0,0 +1,51 @@ +--- +- name: "Remove storage class." + command: "{{ bin_dir }}/kubectl delete storageclass gluster" + ignore_errors: true +- name: "Tear down heketi." + command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\"" + ignore_errors: true +- name: "Tear down heketi." + command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\"" + ignore_errors: true +- name: "Tear down bootstrap." + include_tasks: "../provision/tasks/bootstrap/tear-down.yml" +- name: "Ensure there is nothing left over." + command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 +- name: "Ensure there is nothing left over." + command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json" + register: "heketi_result" + until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0" + retries: 60 + delay: 5 +- name: "Tear down glusterfs." + command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs" + ignore_errors: true +- name: "Remove heketi storage service." + command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints" + ignore_errors: true +- name: "Remove heketi gluster role binding" + command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin" + ignore_errors: true +- name: "Remove heketi config secret" + command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret" + ignore_errors: true +- name: "Remove heketi db backup" + command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup" + ignore_errors: true +- name: "Remove heketi service account" + command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account" + ignore_errors: true +- name: "Get secrets" + command: "{{ bin_dir }}/kubectl get secrets --output=\"json\"" + register: "secrets" + changed_when: false +- name: "Remove heketi storage secret" + vars: { storage_query: "items[?metadata.annotations.\"kubernetes.io/service-account.name\"=='heketi-service-account'].metadata.name|[0]" } + command: "{{ bin_dir }}/kubectl delete secret {{ secrets.stdout|from_json|json_query(storage_query) }}" + when: "storage_query is defined" + ignore_errors: true