Browse Source
calico upgrade to v3 (#3086)
calico upgrade to v3 (#3086)
* calico upgrade to v3 * update calico_rr version * add missing file * change contents of main.yml as it was left old version * enable network policy by default * remove unneeded task * Fix kubelet calico settings * fix when statement * switch back to node-kubeconfig.yamlpull/3142/head
Aivars Sterns
6 years ago
committed by
Matthew Mosesohn
27 changed files with 399 additions and 252 deletions
Split View
Diff Options
-
2README.md
-
9docs/calico.md
-
3inventory/sample/group_vars/k8s-cluster.yml
-
10roles/download/defaults/main.yml
-
12roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
-
23roles/kubernetes-apps/policy_controller/calico/tasks/main.yml
-
15roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2
-
8roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2
-
9roles/kubernetes/kubeadm/tasks/main.yml
-
1roles/kubernetes/node/templates/kubelet-container.j2
-
7roles/kubernetes/node/templates/kubelet.rkt.service.j2
-
1roles/kubernetes/preinstall/tasks/main.yml
-
2roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
-
2roles/kubespray-defaults/defaults/main.yaml
-
11roles/network_plugin/calico/defaults/main.yml
-
37roles/network_plugin/calico/tasks/check.yml
-
189roles/network_plugin/calico/tasks/install.yml
-
197roles/network_plugin/calico/tasks/main.yml
-
16roles/network_plugin/calico/tasks/pre.yml
-
26roles/network_plugin/calico/tasks/upgrade.yml
-
14roles/network_plugin/calico/templates/calico-node.yml.j2
-
10roles/network_plugin/calico/templates/cni-calico.conflist.j2
-
9roles/network_plugin/calico/templates/etcdv2-store.yml.j2
-
9roles/network_plugin/calico/templates/etcdv3-store.yml.j2
-
6roles/network_plugin/canal/templates/canal-node.yaml.j2
-
1roles/reset/tasks/main.yml
-
22upgrade-cluster.yml
@ -0,0 +1,37 @@ |
|||
--- |
|||
- name: "Check vars defined correctly" |
|||
assert: |
|||
that: |
|||
- "calico_pool_name is defined" |
|||
- "calico_pool_name | match('^[a-zA-Z0-9-_\\\\.]{2,63}$')" |
|||
- "ipip_mode is defined" |
|||
- "ipip_mode in ['Always', 'CrossSubnet', 'Never']" |
|||
msg: "Check variable definitions seems something is wrong" |
|||
run_once: yes |
|||
|
|||
- name: "Get current version of calico cluster version" |
|||
shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version' | awk '{ print $3}'" |
|||
register: calico_version_on_server |
|||
run_once: yes |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
|
|||
- name: "Determine if calico upgrade is needed" |
|||
block: |
|||
- name: "Check that calico version is enought for upgrade" |
|||
assert: |
|||
that: |
|||
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=') |
|||
msg: "Your version of calico is not fresh enough for upgrade" |
|||
|
|||
- name: "Set upgrade flag when version needs to be updated" |
|||
set_fact: |
|||
calico_upgrade_needed: True |
|||
when: |
|||
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=') |
|||
- calico_version_on_server.stdout|version_compare('v3.0.0', '<') |
|||
|
|||
when: |
|||
- 'calico_version_on_server.stdout is defined' |
|||
- 'calico_version_on_server.stdout != ""' |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
run_once: yes |
@ -0,0 +1,189 @@ |
|||
--- |
|||
|
|||
- name: Calico | Write Calico cni config |
|||
template: |
|||
src: "cni-calico.conflist.j2" |
|||
dest: "/etc/cni/net.d/10-calico.conflist" |
|||
owner: kube |
|||
|
|||
- name: Calico | Create calico certs directory |
|||
file: |
|||
dest: "{{ calico_cert_dir }}" |
|||
state: directory |
|||
mode: 0750 |
|||
owner: root |
|||
group: root |
|||
|
|||
- name: Calico | Link etcd certificates for calico-node |
|||
file: |
|||
src: "{{ etcd_cert_dir }}/{{ item.s }}" |
|||
dest: "{{ calico_cert_dir }}/{{ item.d }}" |
|||
state: hard |
|||
force: yes |
|||
with_items: |
|||
- {s: "ca.pem", d: "ca_cert.crt"} |
|||
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} |
|||
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} |
|||
|
|||
- name: Calico | Install calicoctl container script |
|||
template: |
|||
src: calicoctl-container.j2 |
|||
dest: "{{ bin_dir }}/calicoctl" |
|||
mode: 0755 |
|||
owner: root |
|||
group: root |
|||
changed_when: false |
|||
|
|||
- name: Calico | Copy cni plugins from hyperkube |
|||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" |
|||
register: cni_task_result |
|||
until: cni_task_result.rc == 0 |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
changed_when: false |
|||
tags: |
|||
- hyperkube |
|||
- upgrade |
|||
|
|||
- name: Calico | Copy cni plugins from calico/cni container |
|||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} sh -c 'cp /opt/cni/bin/* /cnibindir/'" |
|||
register: cni_task_result |
|||
until: cni_task_result.rc == 0 |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
changed_when: false |
|||
when: |
|||
- "overwrite_hyperkube_cni|bool" |
|||
tags: |
|||
- hyperkube |
|||
- upgrade |
|||
|
|||
- name: Calico | Set cni directory permissions |
|||
file: |
|||
path: /opt/cni/bin |
|||
state: directory |
|||
owner: kube |
|||
recurse: true |
|||
mode: 0755 |
|||
|
|||
- name: Calico | wait for etcd |
|||
uri: |
|||
url: "{{ etcd_access_addresses.split(',') | first }}/health" |
|||
validate_certs: no |
|||
client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" |
|||
client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" |
|||
register: result |
|||
until: result.status == 200 or result.status == 401 |
|||
retries: 10 |
|||
delay: 5 |
|||
run_once: true |
|||
|
|||
- name: Calico | Check if calico network pool has already been configured |
|||
shell: > |
|||
{{ bin_dir }}/calicoctl get ippool | grep -w "{{ kube_pods_subnet }}" | wc -l |
|||
register: calico_conf |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
run_once: true |
|||
|
|||
- name: Calico | Configure calico network pool |
|||
shell: > |
|||
echo " |
|||
{ "kind": "IPPool", |
|||
"apiVersion": "projectcalico.org/v3", |
|||
"metadata": { |
|||
"name": "{{ calico_pool_name }}", |
|||
}, |
|||
"spec": { |
|||
"cidr": "{{ kube_pods_subnet }}", |
|||
"ipipMode": "{{ ipip_mode|capitalize }}", |
|||
"natOutgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }} }} " | {{ bin_dir }}/calicoctl create -f - |
|||
run_once: true |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
when: |
|||
- 'calico_conf.stdout == "0"' |
|||
|
|||
- name: "Determine nodeToNodeMesh needed state" |
|||
set_fact: |
|||
nodeToNodeMeshEnabled: "false" |
|||
when: |
|||
- peer_with_router|default(false) or peer_with_calico_rr|default(false) |
|||
- inventory_hostname in groups['k8s-cluster'] |
|||
run_once: yes |
|||
|
|||
|
|||
- name: Calico | Set global as_num |
|||
shell: > |
|||
echo ' |
|||
{ "kind": "BGPConfiguration", |
|||
"apiVersion": "projectcalico.org/v3", |
|||
"metadata": { |
|||
"name": "default", |
|||
}, |
|||
"spec": { |
|||
"logSeverityScreen": "Info", |
|||
"nodeToNodeMeshEnabled": {{ nodeToNodeMeshEnabled|default('true') }} , |
|||
"asNumber": {{ global_as_num }} }} ' | {{ bin_dir }}/calicoctl --skip-exists create -f - |
|||
run_once: true |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
|
|||
- name: Calico | Configure peering with router(s) |
|||
shell: > |
|||
echo '{ |
|||
"apiVersion": "projectcalico.org/v3", |
|||
"kind": "bgpPeer", |
|||
"metadata": { |
|||
"name": "{{ inventory_hostname }}-bgp" |
|||
}, |
|||
"spec": { |
|||
"asNumber": "{{ item.as }}", |
|||
"node": "{{ inventory_hostname }}", |
|||
"scope": "node", |
|||
"peerIP": "{{ item.router_id }}" |
|||
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f - |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
with_items: |
|||
- "{{ peers|default([]) }}" |
|||
when: |
|||
- peer_with_router|default(false) |
|||
- inventory_hostname in groups['k8s-cluster'] |
|||
|
|||
- name: Calico | Configure peering with route reflectors |
|||
shell: > |
|||
echo '{ |
|||
"apiVersion": "projectcalico.org/v3", |
|||
"kind": "bgpPeer", |
|||
"metadata": { |
|||
"name": "{{ inventory_hostname }}" |
|||
}, |
|||
"spec": { |
|||
"asNumber": "{{ local_as | default(global_as_num)}}", |
|||
"scope": "node", |
|||
"node": "{{ inventory_hostname }}", |
|||
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}" |
|||
}}' | {{ bin_dir }}/calicoctl create --skip-exists -f - |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
with_items: |
|||
- "{{ groups['calico-rr'] | default([]) }}" |
|||
when: |
|||
- (peer_with_calico_rr|default(false) |
|||
- inventory_hostname in groups['k8s-cluster'] |
|||
- hostvars[item]['cluster_id'] == cluster_id) |
|||
|
|||
- name: Calico | Create calico manifests |
|||
template: |
|||
src: "{{item.file}}.j2" |
|||
dest: "{{kube_config_dir}}/{{item.file}}" |
|||
with_items: |
|||
- {name: calico-config, file: calico-config.yml, type: cm} |
|||
- {name: calico-node, file: calico-node.yml, type: ds} |
|||
- {name: calico, file: calico-node-sa.yml, type: sa} |
|||
- {name: calico, file: calico-cr.yml, type: clusterrole} |
|||
- {name: calico, file: calico-crb.yml, type: clusterrolebinding} |
|||
register: calico_node_manifests |
|||
when: |
|||
- inventory_hostname in groups['kube-master'] |
|||
- rbac_enabled or item.type not in rbac_resources |
@ -1,193 +1,12 @@ |
|||
--- |
|||
- name: Calico | Disable calico-node service if it exists |
|||
service: |
|||
name: calico-node |
|||
state: stopped |
|||
enabled: yes |
|||
failed_when: false |
|||
- include_tasks: check.yml |
|||
|
|||
- name: Calico | Get kubelet hostname |
|||
shell: >- |
|||
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' |
|||
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 |
|||
register: calico_kubelet_name |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
when: cloud_provider is defined |
|||
|
|||
- name: Calico | Write Calico cni config |
|||
template: |
|||
src: "cni-calico.conflist.j2" |
|||
dest: "/etc/cni/net.d/10-calico.conflist" |
|||
owner: kube |
|||
|
|||
- name: Calico | Create calico certs directory |
|||
file: |
|||
dest: "{{ calico_cert_dir }}" |
|||
state: directory |
|||
mode: 0750 |
|||
owner: root |
|||
group: root |
|||
|
|||
- name: Calico | Link etcd certificates for calico-node |
|||
file: |
|||
src: "{{ etcd_cert_dir }}/{{ item.s }}" |
|||
dest: "{{ calico_cert_dir }}/{{ item.d }}" |
|||
state: hard |
|||
force: yes |
|||
with_items: |
|||
- {s: "ca.pem", d: "ca_cert.crt"} |
|||
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} |
|||
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} |
|||
|
|||
- name: Calico | Install calicoctl container script |
|||
template: |
|||
src: calicoctl-container.j2 |
|||
dest: "{{ bin_dir }}/calicoctl" |
|||
mode: 0755 |
|||
owner: root |
|||
group: root |
|||
changed_when: false |
|||
|
|||
- name: Calico | Copy cni plugins from hyperkube |
|||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -rf /opt/cni/bin/. /cnibindir/" |
|||
register: cni_task_result |
|||
until: cni_task_result.rc == 0 |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
changed_when: false |
|||
tags: |
|||
- hyperkube |
|||
- upgrade |
|||
|
|||
- name: Calico | Copy cni plugins from calico/cni container |
|||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ calico_cni_image_repo }}:{{ calico_cni_image_tag }} sh -c 'cp /opt/cni/bin/* /cnibindir/'" |
|||
register: cni_task_result |
|||
until: cni_task_result.rc == 0 |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
changed_when: false |
|||
when: overwrite_hyperkube_cni|bool |
|||
tags: |
|||
- hyperkube |
|||
- upgrade |
|||
|
|||
- name: Calico | Set cni directory permissions |
|||
file: |
|||
path: /opt/cni/bin |
|||
state: directory |
|||
owner: kube |
|||
recurse: true |
|||
mode: 0755 |
|||
|
|||
- name: Calico | wait for etcd |
|||
uri: |
|||
url: "{{ etcd_access_addresses.split(',') | first }}/health" |
|||
validate_certs: no |
|||
client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" |
|||
client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" |
|||
register: result |
|||
until: result.status == 200 or result.status == 401 |
|||
retries: 10 |
|||
delay: 5 |
|||
run_once: true |
|||
|
|||
- name: Calico | Check if calico network pool has already been configured |
|||
command: |- |
|||
curl \ |
|||
--cacert {{ etcd_cert_dir }}/ca.pem \ |
|||
--cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \ |
|||
--key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \ |
|||
{{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool |
|||
register: calico_conf |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
run_once: true |
|||
changed_when: false |
|||
- include_tasks: pre.yml |
|||
|
|||
- name: Calico | Configure calico network pool |
|||
shell: > |
|||
echo '{ |
|||
"kind": "ipPool", |
|||
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"}, |
|||
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}}, |
|||
"apiVersion": "v1", |
|||
"metadata": {"cidr": "{{ kube_pods_subnet }}"} |
|||
}' |
|||
| {{ bin_dir }}/calicoctl apply -f - |
|||
environment: |
|||
NO_DEFAULT_POOLS: true |
|||
run_once: true |
|||
when: ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout) |
|||
|
|||
- name: Calico | Get calico configuration from etcd |
|||
command: |- |
|||
curl \ |
|||
--cacert {{ etcd_cert_dir }}/ca.pem \ |
|||
--cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \ |
|||
--key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \ |
|||
{{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool |
|||
register: calico_pools_raw |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
run_once: true |
|||
|
|||
- set_fact: |
|||
calico_pools: "{{ calico_pools_raw.stdout | from_json }}" |
|||
run_once: true |
|||
|
|||
- name: Calico | Set global as_num |
|||
command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}" |
|||
run_once: true |
|||
|
|||
- name: Calico | Disable node mesh |
|||
shell: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off" |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false)) |
|||
and inventory_hostname in groups['k8s-cluster']) |
|||
run_once: true |
|||
|
|||
- name: Calico | Configure peering with router(s) |
|||
shell: > |
|||
echo '{ |
|||
"kind": "bgpPeer", |
|||
"spec": {"asNumber": "{{ item.as }}"}, |
|||
"apiVersion": "v1", |
|||
"metadata": {"node": "{{ inventory_hostname }}", "scope": "node", "peerIP": "{{ item.router_id }}"} |
|||
}' |
|||
| {{ bin_dir }}/calicoctl create --skip-exists -f - |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
with_items: "{{ peers|default([]) }}" |
|||
when: peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster'] |
|||
|
|||
- name: Calico | Configure peering with route reflectors |
|||
shell: > |
|||
echo '{ |
|||
"kind": "bgpPeer", |
|||
"spec": {"asNumber": "{{ local_as | default(global_as_num)}}"}, |
|||
"apiVersion": "v1", |
|||
"metadata": {"node": "{{ inventory_hostname }}", |
|||
"scope": "node", |
|||
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4"]["address"]) }}"} |
|||
}' |
|||
| {{ bin_dir }}/calicoctl create --skip-exists -f - |
|||
retries: 4 |
|||
delay: "{{ retry_stagger | random + 3 }}" |
|||
with_items: "{{ groups['calico-rr'] | default([]) }}" |
|||
when: (peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] |
|||
and hostvars[item]['cluster_id'] == cluster_id) |
|||
|
|||
- name: Calico | Create calico manifests |
|||
template: |
|||
src: "{{item.file}}.j2" |
|||
dest: "{{kube_config_dir}}/{{item.file}}" |
|||
with_items: |
|||
- {name: calico-config, file: calico-config.yml, type: cm} |
|||
- {name: calico-node, file: calico-node.yml, type: ds} |
|||
- {name: calico, file: calico-node-sa.yml, type: sa} |
|||
- {name: calico, file: calico-cr.yml, type: clusterrole} |
|||
- {name: calico, file: calico-crb.yml, type: clusterrolebinding} |
|||
register: calico_node_manifests |
|||
- include_tasks: upgrade.yml |
|||
when: |
|||
- inventory_hostname in groups['kube-master'] |
|||
- calico_upgrade_needed |
|||
run_once: yes |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
|
|||
- include_tasks: install.yml |
@ -0,0 +1,16 @@ |
|||
--- |
|||
- name: Calico | Disable calico-node service if it exists |
|||
service: |
|||
name: calico-node |
|||
state: stopped |
|||
enabled: no |
|||
failed_when: false |
|||
|
|||
- name: Calico | Get kubelet hostname |
|||
shell: >- |
|||
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address' |
|||
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1 |
|||
register: calico_kubelet_name |
|||
delegate_to: "{{ groups['kube-master'][0] }}" |
|||
when: |
|||
- "cloud_provider is defined" |
@ -0,0 +1,26 @@ |
|||
--- |
|||
- name: "Download calico-upgrade tool (force version)" |
|||
get_url: |
|||
url: "https://github.com/projectcalico/calico-upgrade/releases/download/{{ calico_upgrade_version }}/calico-upgrade" |
|||
dest: "{{ bin_dir }}/calico-upgrade" |
|||
mode: 0755 |
|||
owner: root |
|||
group: root |
|||
force: yes |
|||
|
|||
- name: "Create etcdv2 and etcdv3 calicoApiConfig" |
|||
template: |
|||
src: "{{ item }}-store.yml.j2" |
|||
dest: "/etc/calico/{{ item }}.yml" |
|||
with_items: |
|||
- "etcdv2" |
|||
- "etcdv3" |
|||
|
|||
- name: "Tests data migration (dry-run)" |
|||
shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" |
|||
register: calico_upgrade_test_data |
|||
failed_when: '"Successfully" not in calico_upgrade_test_data.stdout' |
|||
|
|||
- name: "If test migration is success continue with calico data real migration" |
|||
shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade" |
|||
register: calico_upgrade_migration_data |
@ -0,0 +1,9 @@ |
|||
apiVersion: v1 |
|||
kind: calicoApiConfig |
|||
metadata: |
|||
spec: |
|||
datastoreType: "etcdv2" |
|||
etcdEndpoints: "{{ etcd_access_addresses }}" |
|||
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" |
|||
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" |
|||
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem" |
@ -0,0 +1,9 @@ |
|||
apiVersion: projectcalico.org/v3 |
|||
kind: CalicoAPIConfig |
|||
metadata: |
|||
spec: |
|||
datastoreType: "etcdv3" |
|||
etcdEndpoints: "{{ etcd_access_addresses }}" |
|||
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" |
|||
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" |
|||
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem" |
Write
Preview
Loading…
Cancel
Save