Browse Source

Merge pull request #6 from ansibl8s/calico_bgp_peering_opt

Calico bgp peering opt
pull/11/head
Smaine Kahlouch 9 years ago
parent
commit
058ccea9bc
8 changed files with 83 additions and 43 deletions
  1. 10
      README.md
  2. 4
      cluster.yml
  3. 5
      environments/dev/group_vars/k8s-cluster.yml
  4. 33
      environments/dev/inventory
  5. 4
      environments/production/group_vars/all.yml
  6. 5
      environments/production/group_vars/k8s-cluster.yml
  7. 23
      roles/overlay_network/tasks/calico.yml
  8. 42
      roles/overlay_network/templates/calico/calico-node.service.j2

10
README.md

@ -50,7 +50,7 @@ The main variables to change are located in the directory ```environments/[env_n
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
- { role: apps/k8s-skydns, tags: ['skydns', 'apps'] }
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
- { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
- hosts: kube-node
@ -122,7 +122,7 @@ In order to use this role you'll need the following entries in the file '*requir
path: roles/apps
# version: v1.0
- src: https://github.com/ansibl8s/k8s-skydns.git
- src: https://github.com/ansibl8s/k8s-kubedns.git
path: roles/apps
# version: v1.0
```
@ -139,10 +139,10 @@ Finally update your playbook with the chosen role, and run it
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
- { role: apps/k8s-skydns, tags: ['skydns', 'apps'] }
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
...
```
Please refer to the [k8s-skydns readme](https://github.com/ansibl8s/k8s-skydns) for additionnal info.
Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
#### Calico networking
Check if the calico-node container is running
@ -199,6 +199,6 @@ grep path .gitmodules | sed 's/.*= //'
For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**.
In order to use this role you'll need to follow these steps
```
git submodule init roles/apps/k8s-common roles/apps/k8s-skydns
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
git submodule update
```

4
cluster.yml

@ -14,8 +14,8 @@
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
# Addons to be installed
# - { role: apps/k8s-skydns, tags: ['skydns', 'apps'] }
# Apps to be installed
# - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
# - { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] }
- hosts: kube-node

5
environments/dev/group_vars/k8s-cluster.yml

@ -31,6 +31,11 @@ overlay_network_subnet: 10.233.64.0/18
# room for 4096 nodes with 254 pods per node.
overlay_network_host_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,

33
environments/dev/inventory

@ -1,19 +1,36 @@
[downloader]
192.168.0.1
172.16.0.1
[kube-master]
# NB : the br_addr must be in the {{ calico_pool }} subnet
# it will assign a /24 subnet per node
192.168.0.1 br_addr=10.233.64.1
[kube-node]
192.168.0.2 br_addr=10.233.65.1
192.168.0.3 br_addr=10.233.66.1
192.168.0.4 br_addr=10.233.67.1
172.16.0.1 br_addr=10.233.64.1
[etcd]
192.168.0.1
172.16.0.1
[kube-node:children]
usa
france
[usa]
172.16.0.1 br_addr=10.233.64.1
# Configure the as assigned to the each node if bgp peering with border routers is enabled
172.16.0.2 br_addr=10.233.65.1 # local_as=65xxx
172.16.0.3 br_addr=10.233.66.1 # local_as=65xxx
[france]
192.168.0.1 br_addr=10.233.67.1 # local_as=65xxx
192.168.0.2 br_addr=10.233.68.1 # local_as=65xxx
[k8s-cluster:children]
kube-node
kube-master
# If you want to configure bgp peering with border router you'll need to set the following vars
# List of routers and their as number
#[usa:vars]
#bgp_peers=[{"router_id": "172.16.0.252", "as": "65xxx"}, {"router_id": "172.16.0.253", "as": "65xxx"}]
#
#[france:vars]
#bgp_peers=[{"router_id": "192.168.0.252", "as": "65xxx"}, {"router_id": "192.168.0.253", "as": "65xxx"}]

4
environments/production/group_vars/all.yml

@ -1,6 +1,6 @@
# Directory where the binaries will be installed
# bin_dir: /usr/local/bin
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
# local_release_dir: "/tmp/releases"
local_release_dir: "/tmp/releases"

5
environments/production/group_vars/k8s-cluster.yml

@ -31,6 +31,11 @@
# room for 4096 nodes with 254 pods per node.
# overlay_network_host_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
# peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,

23
roles/overlay_network/tasks/calico.yml

@ -1,37 +1,46 @@
---
- name: Install calicoctl bin
- name: Calico | Install calicoctl bin
copy:
src={{ local_release_dir }}/calico/bin/calicoctl
dest={{ bin_dir }}
mode=u+x
notify: restart calico-node
- name: Create calicoctl symlink (needed by kubelet)
- name: Calico | Create calicoctl symlink (needed by kubelet)
file: src=/usr/local/bin/calicoctl dest=/usr/bin/calicoctl state=link
- name: Write calico-node systemd init file
- name: Calico | Write calico-node systemd init file
template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
notify:
- reload systemd
- restart calico-node
- name: Write network-environment
- name: Calico | Write network-environment
template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x
notify:
- reload systemd
- restart calico-node
- name: Enable calico-node
- name: Calico | Enable calico-node
service: name=calico-node enabled=yes state=started
- name: Configure calico-node remove default pool
- name: Calico | Configure calico-node remove default pool
shell: calicoctl pool remove 192.168.0.0/16
environment:
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
run_once: true
- name: Configure calico-node desired pool
- name: Calico | Configure calico-node desired pool
shell: calicoctl pool add {{ overlay_network_subnet }}
environment:
ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001"
run_once: true
- name: Calico | Disable node mesh
shell: calicoctl bgp node-mesh off
when: peer_with_router and inventory_hostname in groups['kube-node']
- name: Calico | Configure peering with router(s)
shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}
with_items: peers
when: peer_with_router and inventory_hostname in groups['kube-node']

42
roles/overlay_network/templates/calico/calico-node.service.j2

@ -1,19 +1,23 @@
[Unit]
Description=calicoctl node
After=etcd2.service
[Service]
EnvironmentFile=/etc/network-environment
User=root
PermissionsStartOnly=true
ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix
{% if inventory_hostname in groups['kube-node'] %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes
{% else %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4}
{% endif %}
RemainAfterExit=yes
Type=oneshot
[Install]
WantedBy=multi-user.target
[Unit]
Description=calicoctl node
After=etcd2.service
[Service]
EnvironmentFile=/etc/network-environment
User=root
PermissionsStartOnly=true
ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix
{% if inventory_hostname in groups['kube-node'] %}
{% if peer_with_router %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --as={{ local_as }} --kubernetes
{% else %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes
{% endif %}
{% else %}
ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4}
{% endif %}
RemainAfterExit=yes
Type=oneshot
[Install]
WantedBy=multi-user.target
Loading…
Cancel
Save