committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 48 additions and 1304 deletions
Split View
Diff Options
-
6.gitlab-ci/packet.yml
-
4README.md
-
1docs/_sidebar.md
-
84docs/ci.md
-
72docs/contiv.md
-
2inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
-
20inventory/sample/group_vars/k8s-cluster/k8s-net-contiv.yml
-
38roles/download/defaults/main.yml
-
113roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml
-
14roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml
-
5roles/kubernetes-apps/network_plugin/meta/main.yml
-
2roles/kubernetes/node/templates/kubelet.env.v1beta1.j2
-
2roles/kubernetes/preinstall/tasks/0020-verify-settings.yml
-
3roles/kubernetes/preinstall/tasks/0050-create_directories.yml
-
5roles/kubespray-defaults/defaults/main.yaml
-
55roles/network_plugin/contiv/defaults/main.yml
-
10roles/network_plugin/contiv/files/contiv-cleanup.sh
-
6roles/network_plugin/contiv/handlers/main.yml
-
3roles/network_plugin/contiv/meta/main.yml
-
156roles/network_plugin/contiv/tasks/main.yml
-
66roles/network_plugin/contiv/tasks/pre-reset.yml
-
9roles/network_plugin/contiv/tasks/reset.yml
-
62roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2
-
58roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
-
31roles/network_plugin/contiv/templates/contiv-config.yml.j2
-
38roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2
-
65roles/network_plugin/contiv/templates/contiv-etcd.yml.j2
-
27roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2
-
12roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2
-
5roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2
-
71roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2
-
29roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2
-
12roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2
-
5roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2
-
128roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2
-
79roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
-
23roles/network_plugin/contiv/templates/generate-certificate.sh.j2
-
5roles/network_plugin/meta/main.yml
-
12roles/reset/tasks/main.yml
-
9tests/files/packet_ubuntu16-contiv-sep.yml
-
2tests/scripts/md-table/main.py
-
3tests/testcases/040_check-network-adv.yml
@ -1,72 +0,0 @@ |
|||
# Contiv |
|||
|
|||
Here is the [Contiv documentation](https://contiv.github.io/documents/). |
|||
|
|||
## Administrate Contiv |
|||
|
|||
There are two ways to manage Contiv: |
|||
|
|||
* a web UI managed by the api proxy service |
|||
* a CLI named `netctl` |
|||
|
|||
### Interfaces |
|||
|
|||
#### The Web Interface |
|||
|
|||
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`. |
|||
|
|||
You can configure the api proxy by overriding the following variables: |
|||
|
|||
```yaml |
|||
contiv_enable_api_proxy: true |
|||
contiv_api_proxy_port: 10000 |
|||
contiv_generate_certificate: true |
|||
``` |
|||
|
|||
The default credentials to log in are: admin/admin. |
|||
|
|||
#### The Command Line Interface |
|||
|
|||
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster: |
|||
|
|||
```bash |
|||
export NETMASTER=http://127.0.0.1:9999 |
|||
``` |
|||
|
|||
The port can be changed by overriding the following variable: |
|||
|
|||
```yaml |
|||
contiv_netmaster_port: 9999 |
|||
``` |
|||
|
|||
The CLI doesn't use the authentication process needed by the web interface. |
|||
|
|||
### Network configuration |
|||
|
|||
The default configuration uses VXLAN to create an overlay. Two networks are created by default: |
|||
|
|||
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN. |
|||
* `default-net` : the default network that hosts pods. |
|||
|
|||
You can change the default network configuration by overriding the `contiv_networks` variable. |
|||
|
|||
The default forward mode is set to routing and the default network mode is vxlan: |
|||
|
|||
```yaml |
|||
contiv_fwd_mode: routing |
|||
contiv_net_mode: vxlan |
|||
``` |
|||
|
|||
The following is an example of how you can use VLAN instead of VXLAN: |
|||
|
|||
```yaml |
|||
contiv_fwd_mode: bridge |
|||
contiv_net_mode: vlan |
|||
contiv_vlan_interface: eth0 |
|||
contiv_networks: |
|||
- name: default-net |
|||
subnet: "{{ kube_pods_subnet }}" |
|||
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" |
|||
encap: vlan |
|||
pkt_tag: 10 |
|||
``` |
@ -1,20 +0,0 @@ |
|||
# see roles/network_plugin/contiv/defaults/main.yml |
|||
|
|||
# Forwarding mode: bridge or routing |
|||
# contiv_fwd_mode: routing |
|||
|
|||
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing". |
|||
## In this case, you may need to peer with an uplink |
|||
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor" |
|||
# contiv_peer_with_uplink_leaf: false |
|||
# contiv_global_as: "65002" |
|||
# contiv_global_neighbor_as: "500" |
|||
|
|||
# Fabric mode: aci, aci-opflex or default |
|||
# contiv_fabric_mode: default |
|||
|
|||
# Default netmode: vxlan or vlan |
|||
# contiv_net_mode: vxlan |
|||
|
|||
# Dataplane interface |
|||
# contiv_vlan_interface: "" |
@ -1,113 +0,0 @@ |
|||
--- |
|||
|
|||
- name: Contiv | Wait for netmaster |
|||
uri: |
|||
url: "http://127.0.0.1:{{ contiv_netmaster_port }}/info" |
|||
register: result |
|||
until: result.status is defined and result.status == 200 |
|||
retries: 10 |
|||
delay: 5 |
|||
|
|||
- name: Contiv | Get global configuration |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
global info --json --all |
|||
register: global_config |
|||
run_once: true |
|||
changed_when: false |
|||
|
|||
- name: Contiv | Set contiv_global_config |
|||
set_fact: |
|||
contiv_global_config: "{{ (global_config.stdout|from_json)[0] }}" |
|||
|
|||
- name: Contiv | Set global forwarding mode |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
global set --fwd-mode={{ contiv_fwd_mode }} |
|||
when: "contiv_global_config.get('fwdMode', '') != contiv_fwd_mode" |
|||
run_once: true |
|||
|
|||
- name: Contiv | Set global fabric mode |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
global set --fabric-mode={{ contiv_fabric_mode }} |
|||
when: "contiv_global_config.networkInfraType != contiv_fabric_mode" |
|||
run_once: true |
|||
|
|||
- name: Contiv | Set peer hostname |
|||
set_fact: |
|||
contiv_peer_hostname: >- |
|||
{%- if override_system_hostname|default(true) -%} |
|||
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['inventory_hostname']}) }} |
|||
{%- else -%} |
|||
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['ansible_fqdn']}) }} |
|||
{%- endif -%} |
|||
with_items: "{{ groups['k8s-cluster'] }}" |
|||
run_once: true |
|||
when: |
|||
- contiv_fwd_mode == 'routing' |
|||
- contiv_peer_with_uplink_leaf |
|||
|
|||
- name: Contiv | Get BGP configuration |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
bgp ls --json |
|||
register: bgp_config |
|||
run_once: true |
|||
changed_when: false |
|||
when: |
|||
- contiv_fwd_mode == 'routing' |
|||
- contiv_peer_with_uplink_leaf |
|||
|
|||
- name: Contiv | Configure peering with router(s) |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
bgp create {{ item.value }} \ |
|||
--router-ip="{{ hostvars[item.key]['contiv']['router_ip'] }}" \ |
|||
--as="{{ hostvars[item.key]['contiv']['as'] | default(contiv_global_as) }}" \ |
|||
--neighbor-as="{{ hostvars[item.key]['contiv']['neighbor_as'] | default(contiv_global_neighbor_as) }}" \ |
|||
--neighbor="{{ hostvars[item.key]['contiv']['neighbor'] }}" |
|||
run_once: true |
|||
with_dict: "{{ contiv_peer_hostname }}" |
|||
when: |
|||
- contiv_fwd_mode == 'routing' |
|||
- contiv_peer_with_uplink_leaf |
|||
- bgp_config.stdout|from_json|length == 0 or not item.value in bgp_config.stdout|from_json|map(attribute='key')|list |
|||
|
|||
- name: Contiv | Get existing networks |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
net ls -q |
|||
register: net_result |
|||
run_once: true |
|||
changed_when: false |
|||
|
|||
- name: Contiv | Create networks |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
net create \ |
|||
--encap={{ item.encap|default("vxlan") }} \ |
|||
--gateway={{ item.gateway }} \ |
|||
--nw-type={{ item.nw_type|default("data") }} \ |
|||
--pkt-tag={{ item.pkt_tag|default("0") }} \ |
|||
--subnet={{ item.subnet }} \ |
|||
--tenant={{ item.tenant|default("default") }} \ |
|||
"{{ item.name }}" |
|||
with_items: "{{ contiv_networks }}" |
|||
when: item['name'] not in net_result.stdout_lines |
|||
run_once: true |
|||
|
|||
- name: Contiv | Check if default group exists |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
group ls -q |
|||
register: group_result |
|||
run_once: true |
|||
changed_when: false |
|||
|
|||
- name: Contiv | Create default group |
|||
command: | |
|||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ |
|||
group create default-net default |
|||
when: "'default' not in group_result.stdout_lines" |
|||
run_once: true |
@ -1,14 +0,0 @@ |
|||
--- |
|||
|
|||
- name: Contiv | Create Kubernetes resources |
|||
kube: |
|||
name: "{{ item.item.name }}" |
|||
namespace: "kube-system" |
|||
kubectl: "{{ bin_dir }}/kubectl" |
|||
resource: "{{ item.item.type }}" |
|||
filename: "{{ contiv_config_dir }}/{{ item.item.file }}" |
|||
state: "{{ item.changed | ternary('latest','present') }}" |
|||
with_items: "{{ contiv_manifests_results.results }}" |
|||
run_once: true |
|||
|
|||
- import_tasks: configure.yml |
@ -1,55 +0,0 @@ |
|||
--- |
|||
|
|||
contiv_config_dir: "{{ kube_config_dir }}/contiv" |
|||
contiv_etcd_conf_dir: "/etc/contiv/etcd" |
|||
contiv_etcd_data_dir: "/var/lib/etcd/contiv-data" |
|||
contiv_netmaster_port: 9999 |
|||
contiv_cni_version: 0.3.1 |
|||
|
|||
# No need to download it by default, but must be defined |
|||
contiv_etcd_image_repo: "{{ etcd_image_repo }}" |
|||
contiv_etcd_image_tag: "{{ etcd_image_tag }}" |
|||
|
|||
contiv_etcd_listen_port: 6666 |
|||
contiv_etcd_peer_port: 6667 |
|||
contiv_etcd_endpoints: |- |
|||
{% for host in groups['kube-master'] -%} |
|||
contiv_etcd{{ loop.index }}=http://{{ hostvars[host]['ip'] | default(fallback_ips[host]) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %} |
|||
{%- endfor %} |
|||
|
|||
# Parameters for Contiv api-proxy |
|||
contiv_enable_api_proxy: true |
|||
contiv_api_proxy_port: 10000 |
|||
contiv_generate_certificate: true |
|||
|
|||
# Forwarding mode: bridge or routing |
|||
contiv_fwd_mode: routing |
|||
|
|||
# Fabric mode: aci, aci-opflex or default |
|||
contiv_fabric_mode: default |
|||
|
|||
# Default netmode: vxlan or vlan |
|||
contiv_net_mode: vxlan |
|||
|
|||
# Dataplane interface |
|||
contiv_vlan_interface: "" |
|||
|
|||
# Default loglevels are INFO |
|||
contiv_netmaster_loglevel: "WARN" |
|||
contiv_netplugin_loglevel: "WARN" |
|||
contiv_ovsdb_server_loglevel: "warn" |
|||
contiv_ovs_vswitchd_loglevel: "warn" |
|||
|
|||
# VxLAN port |
|||
contiv_vxlan_port: 4789 |
|||
|
|||
# Default network configuration |
|||
contiv_networks: |
|||
- name: contivh1 |
|||
subnet: "10.233.128.0/18" |
|||
gateway: "10.233.128.1" |
|||
nw_type: infra |
|||
- name: default-net |
|||
subnet: "{{ kube_pods_subnet }}" |
|||
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" |
|||
pkt_tag: 10 |
@ -1,10 +0,0 @@ |
|||
#!/bin/bash |
|||
set -e |
|||
echo "Starting cleanup" |
|||
ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br % |
|||
for p in $(ifconfig | grep vport | awk '{print $1}'); |
|||
do |
|||
ip link delete $p type veth |
|||
done |
|||
touch /tmp/cleanup.done |
|||
sleep 60 |
@ -1,6 +0,0 @@ |
|||
--- |
|||
- name: Contiv | Reload kernel modules |
|||
service: |
|||
name: systemd-modules-load |
|||
state: restarted |
|||
enabled: yes |
@ -1,3 +0,0 @@ |
|||
--- |
|||
dependencies: |
|||
- role: network_plugin/cni |
@ -1,156 +0,0 @@ |
|||
--- |
|||
- name: Contiv | Load openvswitch kernel module |
|||
copy: |
|||
dest: /etc/modules-load.d/openvswitch.conf |
|||
content: "openvswitch" |
|||
notify: |
|||
- Contiv | Reload kernel modules |
|||
|
|||
- name: Contiv | Create contiv etcd directories |
|||
file: |
|||
dest: "{{ item }}" |
|||
state: directory |
|||
mode: 0750 |
|||
owner: root |
|||
group: root |
|||
with_items: |
|||
- "{{ contiv_etcd_conf_dir }}" |
|||
- "{{ contiv_etcd_data_dir }}" |
|||
when: inventory_hostname in groups['kube-master'] |
|||
|
|||
- name: Contiv | Workaround https://github.com/contiv/netplugin/issues/1152 |
|||
set_fact: |
|||
kube_apiserver_endpoint_for_contiv: |- |
|||
{% if not is_kube_master and loadbalancer_apiserver_localhost -%} |
|||
https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }} |
|||
{%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} |
|||
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }} |
|||
{%- if loadbalancer_apiserver.port|string != "443" -%} |
|||
:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} |
|||
{%- endif -%} |
|||
{%- else -%} |
|||
https://{{ first_kube_master }}:{{ kube_apiserver_port }} |
|||
{%- endif %} |
|||
when: inventory_hostname in groups['kube-master'] |
|||
|
|||
- name: Contiv | Set necessary facts |
|||
set_fact: |
|||
contiv_config_dir: "{{ contiv_config_dir }}" |
|||
contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}" |
|||
contiv_fabric_mode: "{{ contiv_fabric_mode }}" |
|||
contiv_fwd_mode: "{{ contiv_fwd_mode }}" |
|||
contiv_netmaster_port: "{{ contiv_netmaster_port }}" |
|||
contiv_networks: "{{ contiv_networks }}" |
|||
contiv_manifests: |
|||
- {name: contiv-config, file: contiv-config.yml, type: configmap} |
|||
- {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset} |
|||
- {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset} |
|||
- {name: contiv-ovs, file: contiv-ovs.yml, type: daemonset} |
|||
- {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding} |
|||
- {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole} |
|||
- {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount} |
|||
- {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset} |
|||
- {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding} |
|||
- {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole} |
|||
- {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount} |
|||
- {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset} |
|||
when: inventory_hostname in groups['kube-master'] |
|||
|
|||
- name: Contiv | Add another manifest if contiv_enable_api_proxy is true |
|||
set_fact: |
|||
contiv_manifests: |- |
|||
{% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %} |
|||
{{ contiv_manifests }} |
|||
when: |
|||
- contiv_enable_api_proxy |
|||
- inventory_hostname in groups['kube-master'] |
|||
|
|||
- name: Contiv | Create /var/contiv |
|||
file: |
|||
path: /var/contiv |
|||
state: directory |
|||
|
|||
- name: Contiv | Create contiv config directory |
|||
file: |
|||
dest: "{{ contiv_config_dir }}" |
|||
state: directory |
|||
mode: 0755 |
|||
owner: root |
|||
group: root |
|||
when: inventory_hostname in groups['kube-master'] |
|||
|
|||
- name: Contiv | Install all Kubernetes resources |
|||
template: |
|||
src: "{{ item.file }}.j2" |
|||
dest: "{{ contiv_config_dir }}/{{ item.file }}" |
|||
with_items: "{{ contiv_manifests }}" |
|||
register: contiv_manifests_results |
|||
when: inventory_hostname in groups['kube-master'] |
|||
|
|||
- name: Contiv | Copy certs generation script |
|||
template: |
|||
src: "generate-certificate.sh.j2" |
|||
dest: "/var/contiv/generate-certificate.sh" |
|||
mode: 0700 |
|||
when: |
|||
- contiv_enable_api_proxy |
|||
- contiv_generate_certificate |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
run_once: true |
|||
|
|||
- name: Contiv | Check for cert key existence |
|||
stat: |
|||
path: /var/contiv/auth_proxy_key.pem |
|||
register: contiv_certificate_key_state |
|||
when: |
|||
- contiv_enable_api_proxy |
|||
- contiv_generate_certificate |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
run_once: true |
|||
|
|||
- name: Contiv | Generate contiv-api-proxy certificates |
|||
command: /var/contiv/generate-certificate.sh |
|||
when: |
|||
- contiv_enable_api_proxy |
|||
- contiv_generate_certificate |
|||
- (not contiv_certificate_key_state.stat.exists) |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
run_once: true |
|||
|
|||
- name: Contiv | Fetch the generated certificate |
|||
fetch: |
|||
src: "/var/contiv/{{ item }}" |
|||
dest: "/tmp/kubespray-contiv-{{ item }}" |
|||
flat: yes |
|||
with_items: |
|||
- auth_proxy_key.pem |
|||
- auth_proxy_cert.pem |
|||
when: |
|||
- contiv_enable_api_proxy |
|||
- contiv_generate_certificate |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
run_once: true |
|||
|
|||
- name: Contiv | Copy the generated certificate on nodes |
|||
copy: |
|||
src: "/tmp/kubespray-contiv-{{ item }}" |
|||
dest: "/var/contiv/{{ item }}" |
|||
with_items: |
|||
- auth_proxy_key.pem |
|||
- auth_proxy_cert.pem |
|||
when: |
|||
- inventory_hostname != groups['kube-master'][0] |
|||
- inventory_hostname in groups['kube-master'] |
|||
- contiv_enable_api_proxy |
|||
- contiv_generate_certificate |
|||
|
|||
- name: Contiv | Copy netctl binary from docker container |
|||
command: sh -c "{{ docker_bin_dir }}/docker rm -f netctl-binarycopy; |
|||
{{ docker_bin_dir }}/docker create --name netctl-binarycopy {{ contiv_image_repo }}:{{ contiv_image_tag }} && |
|||
{{ docker_bin_dir }}/docker cp netctl-binarycopy:/contiv/bin/netctl {{ bin_dir }}/netctl && |
|||
{{ docker_bin_dir }}/docker rm -f netctl-binarycopy" |
|||
register: contiv_task_result |
|||
until: contiv_task_result.rc == 0 |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
changed_when: false |
@ -1,66 +0,0 @@ |
|||
--- |
|||
- name: reset | Check that kubectl is still here |
|||
stat: |
|||
path: "{{ bin_dir }}/kubectl" |
|||
register: contiv_kubectl |
|||
|
|||
- name: reset | Delete contiv netplugin and netmaster daemonsets |
|||
kube: |
|||
name: "{{ item }}" |
|||
namespace: "kube-system" |
|||
kubectl: "{{ bin_dir }}/kubectl" |
|||
resource: "ds" |
|||
state: absent |
|||
with_items: |
|||
- contiv-netplugin |
|||
- contiv-netmaster |
|||
register: contiv_cleanup_deletion |
|||
tags: |
|||
- network |
|||
when: |
|||
- contiv_kubectl.stat.exists |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
|
|||
- name: reset | Copy contiv temporary cleanup script |
|||
copy: |
|||
src: ../files/contiv-cleanup.sh # noqa 404 Not in role_path so we must trick... |
|||
dest: /opt/cni/bin/cleanup |
|||
owner: root |
|||
group: root |
|||
mode: 0750 |
|||
when: |
|||
- contiv_kubectl.stat.exists |
|||
|
|||
- name: reset | Lay down contiv cleanup template |
|||
template: |
|||
src: ../templates/contiv-cleanup.yml.j2 # noqa 404 Not in role_path so we must trick... |
|||
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset |
|||
register: contiv_cleanup_manifest |
|||
when: |
|||
- contiv_kubectl.stat.exists |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
|
|||
- name: reset | Start contiv cleanup resources |
|||
kube: |
|||
name: "contiv-cleanup" |
|||
namespace: "kube-system" |
|||
kubectl: "{{ bin_dir }}/kubectl" |
|||
resource: "ds" |
|||
state: latest |
|||
filename: "{{ kube_config_dir }}/contiv-cleanup.yml" |
|||
when: |
|||
- contiv_kubectl.stat.exists |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
ignore_errors: true |
|||
|
|||
- name: reset | Wait until contiv cleanup is done |
|||
command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'" |
|||
register: cleanup_done_all_nodes |
|||
until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length |
|||
retries: 5 |
|||
delay: 5 |
|||
ignore_errors: true |
|||
changed_when: false |
|||
when: |
|||
- contiv_kubectl.stat.exists |
|||
- inventory_hostname == groups['kube-master'][0] |
@ -1,9 +0,0 @@ |
|||
--- |
|||
- name: reset | check contiv vxlan_sys network device |
|||
stat: |
|||
path: "/sys/class/net/vxlan_sys_{{ contiv_vxlan_port | default('4789') }}" |
|||
register: contiv_vxlan_sys |
|||
|
|||
- name: reset | remove the vxlan_sys network device created by contiv |
|||
command: "ip link del vxlan_sys_{{ contiv_vxlan_port | default('4789') }}" |
|||
when: contiv_vxlan_sys.stat.exists |
@ -1,62 +0,0 @@ |
|||
# This manifest deploys the Contiv API Proxy Server on Kubernetes. |
|||
apiVersion: apps/v1 |
|||
kind: DaemonSet |
|||
metadata: |
|||
name: contiv-api-proxy |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-api-proxy |
|||
spec: |
|||
updateStrategy: |
|||
type: RollingUpdate |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: contiv-api-proxy |
|||
template: |
|||
metadata: |
|||
name: contiv-api-proxy |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-api-proxy |
|||
spec: |
|||
priorityClassName: system-node-critical |
|||
# The API proxy must run in the host network namespace so that |
|||
# it isn't governed by policy that would prevent it from working. |
|||
hostNetwork: true |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
hostPID: true |
|||
nodeSelector: |
|||
node-role.kubernetes.io/master: "" |
|||
tolerations: |
|||
- operator: Exists |
|||
serviceAccountName: contiv-netmaster |
|||
containers: |
|||
- name: contiv-api-proxy |
|||
image: {{ contiv_auth_proxy_image_repo }}:{{ contiv_auth_proxy_image_tag }} |
|||
args: |
|||
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }} |
|||
- --tls-key-file=/var/contiv/auth_proxy_key.pem |
|||
- --tls-certificate=/var/contiv/auth_proxy_cert.pem |
|||
- --data-store-driver=$(STORE_DRIVER) |
|||
- --data-store-address=$(CONTIV_ETCD) |
|||
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }} |
|||
env: |
|||
- name: NO_NETMASTER_STARTUP_CHECK |
|||
value: "0" |
|||
- name: STORE_DRIVER |
|||
value: etcd |
|||
- name: CONTIV_ETCD |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_etcd |
|||
securityContext: |
|||
privileged: false |
|||
volumeMounts: |
|||
- mountPath: /var/contiv |
|||
name: var-contiv |
|||
readOnly: false |
|||
volumes: |
|||
- name: var-contiv |
|||
hostPath: |
|||
path: /var/contiv |
@ -1,58 +0,0 @@ |
|||
--- |
|||
kind: DaemonSet |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
name: contiv-cleanup |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-cleanup |
|||
spec: |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: contiv-cleanup |
|||
template: |
|||
metadata: |
|||
labels: |
|||
k8s-app: contiv-cleanup |
|||
spec: |
|||
priorityClassName: system-node-critical |
|||
hostNetwork: true |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
hostPID: true |
|||
tolerations: |
|||
- operator: Exists |
|||
serviceAccountName: contiv-netplugin |
|||
containers: |
|||
- name: contiv-ovs-cleanup |
|||
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }} |
|||
command: ["/opt/cni/bin/cleanup"] |
|||
securityContext: |
|||
privileged: true |
|||
volumeMounts: |
|||
- mountPath: /etc/openvswitch |
|||
name: etc-openvswitch |
|||
readOnly: false |
|||
- mountPath: /var/run |
|||
name: var-run |
|||
readOnly: false |
|||
- mountPath: /opt/cni/bin |
|||
name: cni-bin-dir |
|||
readOnly: false |
|||
readinessProbe: |
|||
exec: |
|||
command: |
|||
- cat |
|||
- /tmp/cleanup.done |
|||
initialDelaySeconds: 3 |
|||
periodSeconds: 3 |
|||
successThreshold: 1 |
|||
volumes: |
|||
- name: etc-openvswitch |
|||
hostPath: |
|||
path: /etc/openvswitch |
|||
- name: var-run |
|||
hostPath: |
|||
path: /var/run |
|||
- name: cni-bin-dir |
|||
hostPath: |
|||
path: /opt/cni/bin |
@ -1,31 +0,0 @@ |
|||
# This ConfigMap is used to configure a self-hosted Contiv installation. |
|||
# It can be used with an external cluster store(etcd or consul) or used |
|||
# with the etcd instance being installed as contiv-etcd |
|||
kind: ConfigMap |
|||
apiVersion: v1 |
|||
metadata: |
|||
name: contiv-config |
|||
namespace: kube-system |
|||
data: |
|||
contiv_netmaster_loglevel: {{ contiv_netmaster_loglevel }} |
|||
contiv_netplugin_loglevel: {{ contiv_netplugin_loglevel }} |
|||
contiv_ovsdb_server_extra_flags: "--verbose={{ contiv_ovsdb_server_loglevel }}" |
|||
contiv_ovs_vswitchd_extra_flags: "--verbose={{ contiv_ovs_vswitchd_loglevel }}" |
|||
contiv_fwdmode: {{ contiv_fwd_mode }} |
|||
contiv_netmode: {{ contiv_net_mode }} |
|||
contiv_etcd: "http://127.0.0.1:{{ contiv_etcd_listen_port }}" |
|||
contiv_cni_config: |- |
|||
{ |
|||
"cniVersion": "{{ contiv_cni_version }}", |
|||
"name": "contiv-net", |
|||
"type": "contivk8s" |
|||
} |
|||
contiv_k8s_config: |- |
|||
{ |
|||
"K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}", |
|||
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", |
|||
"K8S_KEY": "", |
|||
"K8S_CERT": "", |
|||
"K8S_TOKEN": "", |
|||
"SVC_SUBNET": "{{ kube_service_addresses }}" |
|||
} |
@ -1,38 +0,0 @@ |
|||
--- |
|||
kind: DaemonSet |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
name: contiv-etcd-proxy |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-etcd-proxy |
|||
spec: |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: contiv-etcd-proxy |
|||
template: |
|||
metadata: |
|||
labels: |
|||
k8s-app: contiv-etcd-proxy |
|||
spec: |
|||
priorityClassName: system-node-critical |
|||
hostNetwork: true |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
hostPID: true |
|||
affinity: |
|||
nodeAffinity: |
|||
requiredDuringSchedulingIgnoredDuringExecution: |
|||
nodeSelectorTerms: |
|||
- matchExpressions: |
|||
- key: node-role.kubernetes.io/master |
|||
operator: DoesNotExist |
|||
containers: |
|||
- name: contiv-etcd-proxy |
|||
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }} |
|||
env: |
|||
- name: ETCD_LISTEN_CLIENT_URLS |
|||
value: 'http://127.0.0.1:{{ contiv_etcd_listen_port }}' |
|||
- name: ETCD_PROXY |
|||
value: "on" |
|||
- name: ETCD_INITIAL_CLUSTER |
|||
value: '{{ contiv_etcd_endpoints }}' |
@ -1,65 +0,0 @@ |
|||
--- |
|||
kind: DaemonSet |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
name: contiv-etcd |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-etcd |
|||
spec: |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: contiv-etcd |
|||
template: |
|||
metadata: |
|||
labels: |
|||
k8s-app: contiv-etcd |
|||
spec: |
|||
priorityClassName: system-node-critical |
|||
hostNetwork: true |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
hostPID: true |
|||
nodeSelector: |
|||
node-role.kubernetes.io/master: "" |
|||
tolerations: |
|||
- operator: Exists |
|||
initContainers: |
|||
- name: contiv-etcd-init |
|||
image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }} |
|||
imagePullPolicy: {{ k8s_image_pull_policy }} |
|||
env: |
|||
- name: ETCD_INIT_ARGSFILE |
|||
value: '{{ contiv_etcd_conf_dir }}/contiv-etcd-args' |
|||
- name: ETCD_INIT_LISTEN_PORT |
|||
value: '{{ contiv_etcd_listen_port }}' |
|||
- name: ETCD_INIT_PEER_PORT |
|||
value: '{{ contiv_etcd_peer_port }}' |
|||
- name: ETCD_INIT_CLUSTER |
|||
value: '{{ contiv_etcd_endpoints }}' |
|||
- name: ETCD_INIT_DATA_DIR |
|||
value: '{{ contiv_etcd_data_dir }}' |
|||
volumeMounts: |
|||
- name: contiv-etcd-conf-dir |
|||
mountPath: {{ contiv_etcd_conf_dir }} |
|||
containers: |
|||
- name: contiv-etcd |
|||
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }} |
|||
command: |
|||
- sh |
|||
- -c |
|||
- "/usr/local/bin/etcd $(cat $ETCD_INIT_ARGSFILE)" |
|||
env: |
|||
- name: ETCD_INIT_ARGSFILE |
|||
value: {{ contiv_etcd_conf_dir }}/contiv-etcd-args |
|||
volumeMounts: |
|||
- name: contiv-etcd-conf-dir |
|||
mountPath: {{ contiv_etcd_conf_dir }} |
|||
- name: contiv-etcd-data-dir |
|||
mountPath: {{ contiv_etcd_data_dir }} |
|||
volumes: |
|||
- name: contiv-etcd-data-dir |
|||
hostPath: |
|||
path: {{ contiv_etcd_data_dir }} |
|||
- name: contiv-etcd-conf-dir |
|||
hostPath: |
|||
path: {{ contiv_etcd_conf_dir }} |
@ -1,27 +0,0 @@ |
|||
kind: ClusterRole |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
name: contiv-netmaster |
|||
namespace: kube-system |
|||
rules: |
|||
- apiGroups: |
|||
- "" |
|||
- extensions |
|||
resources: |
|||
- pods |
|||
- nodes |
|||
- namespaces |
|||
- networkpolicies |
|||
verbs: |
|||
- get |
|||
- watch |
|||
- list |
|||
- update |
|||
- apiGroups: |
|||
- policy |
|||
resourceNames: |
|||
- privileged |
|||
resources: |
|||
- podsecuritypolicies |
|||
verbs: |
|||
- use |
@ -1,12 +0,0 @@ |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
kind: ClusterRoleBinding |
|||
metadata: |
|||
name: contiv-netmaster |
|||
roleRef: |
|||
apiGroup: rbac.authorization.k8s.io |
|||
kind: ClusterRole |
|||
name: contiv-netmaster |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: contiv-netmaster |
|||
namespace: kube-system |
@ -1,5 +0,0 @@ |
|||
apiVersion: v1 |
|||
kind: ServiceAccount |
|||
metadata: |
|||
name: contiv-netmaster |
|||
namespace: kube-system |
@ -1,71 +0,0 @@ |
|||
--- |
|||
kind: DaemonSet |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
name: contiv-netmaster |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-netmaster |
|||
spec: |
|||
updateStrategy: |
|||
type: RollingUpdate |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: contiv-netmaster |
|||
template: |
|||
metadata: |
|||
name: contiv-netmaster |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-netmaster |
|||
spec: |
|||
priorityClassName: system-node-critical |
|||
# The netmaster must run in the host network namespace so that |
|||
# it isn't governed by policy that would prevent it from working. |
|||
hostNetwork: true |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
hostPID: true |
|||
nodeSelector: |
|||
node-role.kubernetes.io/master: "" |
|||
tolerations: |
|||
- operator: Exists |
|||
serviceAccountName: contiv-netmaster |
|||
containers: |
|||
- name: contiv-netmaster |
|||
image: {{ contiv_image_repo }}:{{ contiv_image_tag }} |
|||
env: |
|||
- name: CONTIV_ROLE |
|||
value: netmaster |
|||
- name: CONTIV_NETMASTER_MODE |
|||
value: kubernetes |
|||
- name: CONTIV_NETMASTER_ETCD_ENDPOINTS |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_etcd |
|||
- name: CONTIV_NETMASTER_FORWARD_MODE |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_fwdmode |
|||
- name: CONTIV_NETMASTER_NET_MODE |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_netmode |
|||
- name: CONTIV_NETMASTER_LOG_LEVEL |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_netmaster_loglevel |
|||
securityContext: |
|||
privileged: true |
|||
volumeMounts: |
|||
- mountPath: /var/contiv |
|||
name: var-contiv |
|||
readOnly: false |
|||
volumes: |
|||
# Used by contiv-netmaster |
|||
- name: var-contiv |
|||
hostPath: |
|||
path: /var/contiv |
@ -1,29 +0,0 @@ |
|||
kind: ClusterRole |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
name: contiv-netplugin |
|||
namespace: kube-system |
|||
rules: |
|||
- apiGroups: |
|||
- "" |
|||
- extensions |
|||
resources: |
|||
- endpoints |
|||
- nodes |
|||
- namespaces |
|||
- networkpolicies |
|||
- pods |
|||
- services |
|||
verbs: |
|||
- watch |
|||
- list |
|||
- update |
|||
- get |
|||
- apiGroups: |
|||
- policy |
|||
resourceNames: |
|||
- privileged |
|||
resources: |
|||
- podsecuritypolicies |
|||
verbs: |
|||
- use |
@ -1,12 +0,0 @@ |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
kind: ClusterRoleBinding |
|||
metadata: |
|||
name: contiv-netplugin |
|||
roleRef: |
|||
apiGroup: rbac.authorization.k8s.io |
|||
kind: ClusterRole |
|||
name: contiv-netplugin |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: contiv-netplugin |
|||
namespace: kube-system |
@ -1,5 +0,0 @@ |
|||
apiVersion: v1 |
|||
kind: ServiceAccount |
|||
metadata: |
|||
name: contiv-netplugin |
|||
namespace: kube-system |
@ -1,128 +0,0 @@ |
|||
--- |
|||
# This manifest installs contiv-netplugin container, as well |
|||
# as the Contiv CNI plugins and network config on |
|||
# each master and worker node in a Kubernetes cluster. |
|||
kind: DaemonSet |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
name: contiv-netplugin |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-netplugin |
|||
spec: |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: contiv-netplugin |
|||
updateStrategy: |
|||
type: RollingUpdate |
|||
template: |
|||
metadata: |
|||
labels: |
|||
k8s-app: contiv-netplugin |
|||
spec: |
|||
priorityClassName: system-node-critical |
|||
hostNetwork: true |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
hostPID: true |
|||
tolerations: |
|||
- operator: Exists |
|||
serviceAccountName: contiv-netplugin |
|||
initContainers: |
|||
- name: contiv-netplugin-init |
|||
image: {{ contiv_init_image_repo }}:{{ contiv_init_image_tag }} |
|||
env: |
|||
- name: CONTIV_ROLE |
|||
value: netplugin |
|||
- name: CONTIV_MODE |
|||
value: kubernetes |
|||
- name: CONTIV_K8S_CONFIG |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_k8s_config |
|||
- name: CONTIV_CNI_CONFIG |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_cni_config |
|||
volumeMounts: |
|||
- mountPath: /var/contiv |
|||
name: var-contiv |
|||
readOnly: false |
|||
- mountPath: /etc/cni/net.d/ |
|||
name: etc-cni-dir |
|||
readOnly: false |
|||
- name: contiv-cni |
|||
image: {{ contiv_image_repo }}:{{ contiv_version }} |
|||
command: ["cp", "/contiv/bin/contivk8s", "/opt/cni/bin/contivk8s"] |
|||
volumeMounts: |
|||
- mountPath: /opt/cni/bin |
|||
name: cni-bin-dir |
|||
readOnly: false |
|||
containers: |
|||
# Runs netplugin container on each Kubernetes node. This |
|||
# container programs network policy and routes on each |
|||
# host. |
|||
- name: contiv-netplugin |
|||
image: {{ contiv_image_repo }}:{{ contiv_image_tag }} |
|||
env: |
|||
- name: VLAN_IF |
|||
value: {{ contiv_vlan_interface }} |
|||
- name: CONTIV_NETPLUGIN_VLAN_UPLINKS |
|||
value: {{ contiv_vlan_interface }} |
|||
- name: CONTIV_NETPLUGIN_VXLAN_PORT |
|||
value: "{{ contiv_vxlan_port }}" |
|||
- name: CONTIV_ROLE |
|||
value: netplugin |
|||
- name: CONTIV_NETPLUGIN_MODE |
|||
value: kubernetes |
|||
- name: CONTIV_NETPLUGIN_VTEP_IP |
|||
valueFrom: |
|||
fieldRef: |
|||
fieldPath: status.podIP |
|||
- name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_etcd |
|||
- name: CONTIV_NETPLUGIN_FORWARD_MODE |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_fwdmode |
|||
- name: CONTIV_NETPLUGIN_NET_MODE |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_netmode |
|||
- name: CONTIV_NETPLUGIN_LOG_LEVEL |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_netplugin_loglevel |
|||
resources: |
|||
requests: |
|||
cpu: 250m |
|||
securityContext: |
|||
privileged: true |
|||
volumeMounts: |
|||
- mountPath: /var/run |
|||
name: var-run |
|||
readOnly: false |
|||
- mountPath: /var/contiv |
|||
name: var-contiv |
|||
readOnly: false |
|||
volumes: |
|||
# Used by contiv-netplugin |
|||
- name: var-run |
|||
hostPath: |
|||
path: /var/run |
|||
- name: var-contiv |
|||
hostPath: |
|||
path: /var/contiv |
|||
- name: cni-bin-dir |
|||
hostPath: |
|||
path: /opt/cni/bin |
|||
- name: etc-cni-dir |
|||
hostPath: |
|||
path: /etc/cni/net.d/ |
@ -1,79 +0,0 @@ |
|||
--- |
|||
apiVersion: apps/v1 |
|||
# This manifest deploys the contiv-ovs pod. |
|||
kind: DaemonSet |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
name: contiv-ovs |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: contiv-ovs |
|||
spec: |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: contiv-ovs |
|||
template: |
|||
metadata: |
|||
labels: |
|||
k8s-app: contiv-ovs |
|||
spec: |
|||
priorityClassName: system-node-critical |
|||
hostNetwork: true |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
hostPID: true |
|||
tolerations: |
|||
- operator: Exists |
|||
containers: |
|||
# Runs ovs containers on each Kubernetes node. |
|||
- name: contiv-ovsdb-server |
|||
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }} |
|||
command: ["/scripts/start-ovsdb-server.sh"] |
|||
securityContext: |
|||
privileged: false |
|||
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again |
|||
env: |
|||
- name: OVSDBSERVER_EXTRA_FLAGS |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_ovsdb_server_extra_flags |
|||
volumeMounts: |
|||
- mountPath: /etc/openvswitch |
|||
name: etc-openvswitch |
|||
readOnly: false |
|||
- mountPath: /var/run |
|||
name: var-run |
|||
readOnly: false |
|||
- name: contiv-ovs-vswitchd |
|||
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }} |
|||
command: ["/scripts/start-ovs-vswitchd.sh"] |
|||
securityContext: |
|||
privileged: true |
|||
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again |
|||
env: |
|||
- name: OVSVSWITCHD_EXTRA_FLAGS |
|||
valueFrom: |
|||
configMapKeyRef: |
|||
name: contiv-config |
|||
key: contiv_ovs_vswitchd_extra_flags |
|||
volumeMounts: |
|||
- mountPath: /etc/openvswitch |
|||
name: etc-openvswitch |
|||
readOnly: false |
|||
- mountPath: /lib/modules |
|||
name: lib-modules |
|||
readOnly: true |
|||
- mountPath: /var/run |
|||
name: var-run |
|||
readOnly: false |
|||
volumes: |
|||
# Used by contiv-ovs |
|||
- name: etc-openvswitch |
|||
hostPath: |
|||
path: /etc/openvswitch |
|||
- name: lib-modules |
|||
hostPath: |
|||
path: /lib/modules |
|||
- name: var-run |
|||
hostPath: |
|||
path: /var/run |
@ -1,23 +0,0 @@ |
|||
#!/bin/bash |
|||
|
|||
set -euo pipefail |
|||
|
|||
PREFIX="/var/contiv" |
|||
KEY_PATH="$PREFIX/auth_proxy_key.pem" |
|||
CERT_PATH="$PREFIX/auth_proxy_cert.pem" |
|||
|
|||
# if both files exist, just exit |
|||
if [[ -f $KEY_PATH && -f $CERT_PATH ]]; then |
|||
exit 0 |
|||
fi |
|||
|
|||
mkdir -p "$PREFIX" |
|||
|
|||
rm -f $KEY_PATH |
|||
rm -f $CERT_PATH |
|||
|
|||
openssl genrsa -out $KEY_PATH {{certificates_key_size}} >/dev/null 2>&1 |
|||
openssl req -new -x509 -sha256 -days {{certificates_duration}} \ |
|||
-key $KEY_PATH \ |
|||
-out $CERT_PATH \ |
|||
-subj "/C=US/ST=CA/L=San Jose/O=CPSG/OU=IT Department/CN=auth-local.cisco.com" |
@ -1,9 +0,0 @@ |
|||
--- |
|||
# Instance settings |
|||
cloud_image: ubuntu-1604 |
|||
mode: separate |
|||
|
|||
# Kubespray settings |
|||
kube_network_plugin: contiv |
|||
deploy_netchecker: true |
|||
dns_min_replicas: 1 |
Write
Preview
Loading…
Cancel
Save