5 changed files with 220 additions and 0 deletions
Unified View
Diff Options
-
10roles/network_plugin/contiv/files/contiv-cleanup.sh
-
66roles/network_plugin/contiv/tasks/pre-reset.yml
-
57roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
-
80roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
-
7roles/reset/tasks/main.yml
@ -0,0 +1,10 @@ |
|||||
|
#!/bin/bash |
||||
|
set -e |
||||
|
echo "Starting cleanup" |
||||
|
ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br % |
||||
|
for p in $(ifconfig | grep vport | awk '{print $1}'); |
||||
|
do |
||||
|
ip link delete $p type veth |
||||
|
done |
||||
|
touch /tmp/cleanup.done |
||||
|
sleep 60 |
@ -0,0 +1,66 @@ |
|||||
|
--- |
||||
|
- name: reset | Check that kubectl is still here |
||||
|
stat: |
||||
|
path: "{{ bin_dir }}/kubectl" |
||||
|
register: contiv_kubectl |
||||
|
|
||||
|
- name: reset | Delete contiv netplugin and netmaster daemonsets |
||||
|
kube: |
||||
|
name: "{{ item }}" |
||||
|
namespace: "kube-system" |
||||
|
kubectl: "{{ bin_dir }}/kubectl" |
||||
|
resource: "ds" |
||||
|
state: absent |
||||
|
with_items: |
||||
|
- contiv-netplugin |
||||
|
- contiv-netmaster |
||||
|
register: contiv_cleanup_deletion |
||||
|
tags: |
||||
|
- network |
||||
|
when: |
||||
|
- contiv_kubectl.stat.exists |
||||
|
- inventory_hostname == groups['kube-master'][0] |
||||
|
|
||||
|
- name: reset | Copy contiv temporary cleanup script |
||||
|
copy: |
||||
|
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick... |
||||
|
dest: /opt/cni/bin/cleanup |
||||
|
owner: root |
||||
|
group: root |
||||
|
mode: 0750 |
||||
|
when: |
||||
|
- contiv_kubectl.stat.exists |
||||
|
|
||||
|
- name: reset | Lay down contiv cleanup template |
||||
|
template: |
||||
|
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick... |
||||
|
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset |
||||
|
register: contiv_cleanup_manifest |
||||
|
when: |
||||
|
- contiv_kubectl.stat.exists |
||||
|
- inventory_hostname == groups['kube-master'][0] |
||||
|
|
||||
|
- name: reset | Start contiv cleanup resources |
||||
|
kube: |
||||
|
name: "contiv-cleanup" |
||||
|
namespace: "kube-system" |
||||
|
kubectl: "{{ bin_dir }}/kubectl" |
||||
|
resource: "ds" |
||||
|
state: latest |
||||
|
filename: "{{ kube_config_dir }}/contiv-cleanup.yml" |
||||
|
when: |
||||
|
- contiv_kubectl.stat.exists |
||||
|
- inventory_hostname == groups['kube-master'][0] |
||||
|
ignore_errors: true |
||||
|
|
||||
|
- name: reset | Wait until contiv cleanup is done |
||||
|
command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'" |
||||
|
register: cleanup_done_all_nodes |
||||
|
until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length |
||||
|
retries: 5 |
||||
|
delay: 5 |
||||
|
ignore_errors: true |
||||
|
changed_when: false |
||||
|
when: |
||||
|
- contiv_kubectl.stat.exists |
||||
|
- inventory_hostname == groups['kube-master'][0] |
@ -0,0 +1,57 @@ |
|||||
|
--- |
||||
|
kind: DaemonSet |
||||
|
apiVersion: extensions/v1beta1 |
||||
|
metadata: |
||||
|
name: contiv-cleanup |
||||
|
namespace: kube-system |
||||
|
labels: |
||||
|
k8s-app: contiv-cleanup |
||||
|
spec: |
||||
|
selector: |
||||
|
matchLabels: |
||||
|
k8s-app: contiv-cleanup |
||||
|
template: |
||||
|
metadata: |
||||
|
labels: |
||||
|
k8s-app: contiv-cleanup |
||||
|
spec: |
||||
|
hostNetwork: true |
||||
|
hostPID: true |
||||
|
tolerations: |
||||
|
- key: node-role.kubernetes.io/master |
||||
|
effect: NoSchedule |
||||
|
serviceAccountName: contiv-netplugin |
||||
|
containers: |
||||
|
- name: contiv-ovs-cleanup |
||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }} |
||||
|
command: ["/opt/cni/bin/cleanup"] |
||||
|
securityContext: |
||||
|
privileged: true |
||||
|
volumeMounts: |
||||
|
- mountPath: /etc/openvswitch |
||||
|
name: etc-openvswitch |
||||
|
readOnly: false |
||||
|
- mountPath: /var/run |
||||
|
name: var-run |
||||
|
readOnly: false |
||||
|
- mountPath: /opt/cni/bin |
||||
|
name: cni-bin-dir |
||||
|
readOnly: false |
||||
|
readinessProbe: |
||||
|
exec: |
||||
|
command: |
||||
|
- cat |
||||
|
- /tmp/cleanup.done |
||||
|
initialDelaySeconds: 3 |
||||
|
periodSeconds: 3 |
||||
|
successThreshold: 1 |
||||
|
volumes: |
||||
|
- name: etc-openvswitch |
||||
|
hostPath: |
||||
|
path: /etc/openvswitch |
||||
|
- name: var-run |
||||
|
hostPath: |
||||
|
path: /var/run |
||||
|
- name: cni-bin-dir |
||||
|
hostPath: |
||||
|
path: /opt/cni/bin |
@ -0,0 +1,80 @@ |
|||||
|
--- |
||||
|
apiVersion: apps/v1 |
||||
|
# This manifest deploys the contiv-ovs pod. |
||||
|
kind: DaemonSet |
||||
|
apiVersion: extensions/v1beta1 |
||||
|
metadata: |
||||
|
name: contiv-ovs |
||||
|
namespace: kube-system |
||||
|
labels: |
||||
|
k8s-app: contiv-ovs |
||||
|
spec: |
||||
|
selector: |
||||
|
matchLabels: |
||||
|
k8s-app: contiv-ovs |
||||
|
template: |
||||
|
metadata: |
||||
|
labels: |
||||
|
k8s-app: contiv-ovs |
||||
|
annotations: |
||||
|
scheduler.alpha.kubernetes.io/critical-pod: '' |
||||
|
spec: |
||||
|
hostNetwork: true |
||||
|
hostPID: true |
||||
|
tolerations: |
||||
|
- key: node-role.kubernetes.io/master |
||||
|
effect: NoSchedule |
||||
|
containers: |
||||
|
# Runs ovs containers on each Kubernetes node. |
||||
|
- name: contiv-ovsdb-server |
||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }} |
||||
|
command: ["/scripts/start-ovsdb-server.sh"] |
||||
|
securityContext: |
||||
|
privileged: false |
||||
|
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again |
||||
|
env: |
||||
|
- name: OVSDBSERVER_EXTRA_FLAGS |
||||
|
valueFrom: |
||||
|
configMapKeyRef: |
||||
|
name: contiv-config |
||||
|
key: contiv_ovsdb_server_extra_flags |
||||
|
volumeMounts: |
||||
|
- mountPath: /etc/openvswitch |
||||
|
name: etc-openvswitch |
||||
|
readOnly: false |
||||
|
- mountPath: /var/run |
||||
|
name: var-run |
||||
|
readOnly: false |
||||
|
- name: contiv-ovs-vswitchd |
||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }} |
||||
|
command: ["/scripts/start-ovs-vswitchd.sh"] |
||||
|
securityContext: |
||||
|
privileged: true |
||||
|
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again |
||||
|
env: |
||||
|
- name: OVSVSWITCHD_EXTRA_FLAGS |
||||
|
valueFrom: |
||||
|
configMapKeyRef: |
||||
|
name: contiv-config |
||||
|
key: contiv_ovs_vswitchd_extra_flags |
||||
|
volumeMounts: |
||||
|
- mountPath: /etc/openvswitch |
||||
|
name: etc-openvswitch |
||||
|
readOnly: false |
||||
|
- mountPath: /lib/modules |
||||
|
name: lib-modules |
||||
|
readOnly: true |
||||
|
- mountPath: /var/run |
||||
|
name: var-run |
||||
|
readOnly: false |
||||
|
volumes: |
||||
|
# Used by contiv-ovs |
||||
|
- name: etc-openvswitch |
||||
|
hostPath: |
||||
|
path: /etc/openvswitch |
||||
|
- name: lib-modules |
||||
|
hostPath: |
||||
|
path: /lib/modules |
||||
|
- name: var-run |
||||
|
hostPath: |
||||
|
path: /var/run |
xxxxxxxxxx