From af5e35e938232f9f1064b405b78b555616cfb61b Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Thu, 15 Oct 2015 09:40:02 +0200 Subject: [PATCH 1/3] Configure bgp peering with border routers of dc --- environments/dev/group_vars/k8s-cluster.yml | 5 +++ environments/dev/inventory | 33 +++++++++++---- .../production/group_vars/k8s-cluster.yml | 5 +++ roles/overlay_network/tasks/calico.yml | 23 ++++++---- .../templates/calico/calico-node.service.j2 | 42 ++++++++++--------- 5 files changed, 74 insertions(+), 34 deletions(-) diff --git a/environments/dev/group_vars/k8s-cluster.yml b/environments/dev/group_vars/k8s-cluster.yml index 609e86807..61247c5a8 100644 --- a/environments/dev/group_vars/k8s-cluster.yml +++ b/environments/dev/group_vars/k8s-cluster.yml @@ -31,6 +31,11 @@ overlay_network_subnet: 10.233.64.0/18 # room for 4096 nodes with 254 pods per node. overlay_network_host_prefix: 24 +# With calico it is possible to distributed routes with border routers of the datacenter. +peer_with_router: false +# Warning : enabling router peering will disable calico's default behavior ('node mesh'). +# The subnets of each nodes will be distributed by the datacenter router + # Internal DNS configuration. # Kubernetes can create and mainatain its own DNS server to resolve service names # into appropriate IP addresses. It's highly advisable to run such DNS server, diff --git a/environments/dev/inventory b/environments/dev/inventory index 9955305dd..7b67a7a8f 100644 --- a/environments/dev/inventory +++ b/environments/dev/inventory @@ -1,19 +1,36 @@ [downloader] -192.168.0.1 +172.16.0.1 [kube-master] # NB : the br_addr must be in the {{ calico_pool }} subnet # it will assign a /24 subnet per node -192.168.0.1 br_addr=10.233.64.1 - -[kube-node] -192.168.0.2 br_addr=10.233.65.1 -192.168.0.3 br_addr=10.233.66.1 -192.168.0.4 br_addr=10.233.67.1 +172.16.0.1 br_addr=10.233.64.1 [etcd] -192.168.0.1 +172.16.0.1 + +[kube-node:children] +usa +france + +[usa] +172.16.0.1 br_addr=10.233.64.1 +# Configure the as assigned to the each node if bgp peering with border routers is enabled +172.16.0.2 br_addr=10.233.65.1 # local_as=65xxx +172.16.0.3 br_addr=10.233.66.1 # local_as=65xxx + +[france] +192.168.0.1 br_addr=10.233.67.1 # local_as=65xxx +192.168.0.2 br_addr=10.233.68.1 # local_as=65xxx [k8s-cluster:children] kube-node kube-master + +# If you want to configure bgp peering with border router you'll need to set the following vars +# List of routers and their as number +#[usa:vars] +#bgp_peers=[{"router_id": "172.16.0.252", "as": "65xxx"}, {"router_id": "172.16.0.253", "as": "65xxx"}] +# +#[france:vars] +#bgp_peers=[{"router_id": "192.168.0.252", "as": "65xxx"}, {"router_id": "192.168.0.253", "as": "65xxx"}] diff --git a/environments/production/group_vars/k8s-cluster.yml b/environments/production/group_vars/k8s-cluster.yml index 4231c4c19..95c89cead 100644 --- a/environments/production/group_vars/k8s-cluster.yml +++ b/environments/production/group_vars/k8s-cluster.yml @@ -31,6 +31,11 @@ # room for 4096 nodes with 254 pods per node. # overlay_network_host_prefix: 24 +# With calico it is possible to distributed routes with border routers of the datacenter. +# peer_with_router: false +# Warning : enabling router peering will disable calico's default behavior ('node mesh'). +# The subnets of each nodes will be distributed by the datacenter router + # Internal DNS configuration. # Kubernetes can create and mainatain its own DNS server to resolve service names # into appropriate IP addresses. It's highly advisable to run such DNS server, diff --git a/roles/overlay_network/tasks/calico.yml b/roles/overlay_network/tasks/calico.yml index 2b5e3d040..f09526fb0 100644 --- a/roles/overlay_network/tasks/calico.yml +++ b/roles/overlay_network/tasks/calico.yml @@ -1,37 +1,46 @@ --- -- name: Install calicoctl bin +- name: Calico | Install calicoctl bin copy: src={{ local_release_dir }}/calico/bin/calicoctl dest={{ bin_dir }} mode=u+x notify: restart calico-node -- name: Create calicoctl symlink (needed by kubelet) +- name: Calico | Create calicoctl symlink (needed by kubelet) file: src=/usr/local/bin/calicoctl dest=/usr/bin/calicoctl state=link -- name: Write calico-node systemd init file +- name: Calico | Write calico-node systemd init file template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service notify: - reload systemd - restart calico-node -- name: Write network-environment +- name: Calico | Write network-environment template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x notify: - reload systemd - restart calico-node -- name: Enable calico-node +- name: Calico | Enable calico-node service: name=calico-node enabled=yes state=started -- name: Configure calico-node remove default pool +- name: Calico | Configure calico-node remove default pool shell: calicoctl pool remove 192.168.0.0/16 environment: ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001" run_once: true -- name: Configure calico-node desired pool +- name: Calico | Configure calico-node desired pool shell: calicoctl pool add {{ overlay_network_subnet }} environment: ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001" run_once: true + +- name: Calico | Disable node mesh + shell: calicoctl bgp node-mesh off + when: peer_with_router and inventory_hostname in groups['kube-node'] + +- name: Calico | Configure peering with router(s) + shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }} + with_items: peers + when: peer_with_router and inventory_hostname in groups['kube-node'] diff --git a/roles/overlay_network/templates/calico/calico-node.service.j2 b/roles/overlay_network/templates/calico/calico-node.service.j2 index 4f51407b0..fe44d0f5e 100644 --- a/roles/overlay_network/templates/calico/calico-node.service.j2 +++ b/roles/overlay_network/templates/calico/calico-node.service.j2 @@ -1,19 +1,23 @@ -[Unit] -Description=calicoctl node -After=etcd2.service - -[Service] -EnvironmentFile=/etc/network-environment -User=root -PermissionsStartOnly=true -ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix -{% if inventory_hostname in groups['kube-node'] %} -ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes -{% else %} -ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} -{% endif %} -RemainAfterExit=yes -Type=oneshot - -[Install] -WantedBy=multi-user.target +[Unit] +Description=calicoctl node +After=etcd2.service + +[Service] +EnvironmentFile=/etc/network-environment +User=root +PermissionsStartOnly=true +ExecStartPre={{ bin_dir }}/calicoctl checksystem --fix +{% if inventory_hostname in groups['kube-node'] %} +{% if peer_with_router %} +ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --as={{ local_as }} --kubernetes +{% else %} +ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} --kubernetes +{% endif %} +{% else %} +ExecStart={{ bin_dir }}/calicoctl node --ip=${DEFAULT_IPV4} +{% endif %} +RemainAfterExit=yes +Type=oneshot + +[Install] +WantedBy=multi-user.target From 42613eac91f24506846b04e6c5da4e5dd4fedd1a Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sun, 18 Oct 2015 11:29:02 +0200 Subject: [PATCH 2/3] uncomment all.yml variables --- environments/production/group_vars/all.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/environments/production/group_vars/all.yml b/environments/production/group_vars/all.yml index ef234d256..5cf4c0b54 100644 --- a/environments/production/group_vars/all.yml +++ b/environments/production/group_vars/all.yml @@ -1,6 +1,6 @@ # Directory where the binaries will be installed -# bin_dir: /usr/local/bin +bin_dir: /usr/local/bin # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) -# local_release_dir: "/tmp/releases" +local_release_dir: "/tmp/releases" From 5d61661850dab386463577d6c78f85ef4945e66d Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sun, 18 Oct 2015 16:23:01 +0200 Subject: [PATCH 3/3] renaming role k8s-skydns to k8s-kubedns --- README.md | 10 +++++----- cluster.yml | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 5f65b12f8..17299432c 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ The main variables to change are located in the directory ```environments/[env_n - hosts: kube-master roles: - { role: kubernetes/master, tags: master } - - { role: apps/k8s-skydns, tags: ['skydns', 'apps'] } + - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] } - { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] } - hosts: kube-node @@ -122,7 +122,7 @@ In order to use this role you'll need the following entries in the file '*requir path: roles/apps # version: v1.0 -- src: https://github.com/ansibl8s/k8s-skydns.git +- src: https://github.com/ansibl8s/k8s-kubedns.git path: roles/apps # version: v1.0 ``` @@ -139,10 +139,10 @@ Finally update your playbook with the chosen role, and run it - hosts: kube-master roles: - { role: kubernetes/master, tags: master } - - { role: apps/k8s-skydns, tags: ['skydns', 'apps'] } + - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] } ... ``` -Please refer to the [k8s-skydns readme](https://github.com/ansibl8s/k8s-skydns) for additionnal info. +Please refer to the [k8s-kubdns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info. #### Calico networking Check if the calico-node container is running @@ -199,6 +199,6 @@ grep path .gitmodules | sed 's/.*= //' For instance if you will probably want to install a [dns server](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) as it is **strongly recommanded**. In order to use this role you'll need to follow these steps ``` -git submodule init roles/apps/k8s-common roles/apps/k8s-skydns +git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns git submodule update ``` diff --git a/cluster.yml b/cluster.yml index 3e5d0a300..2fdb803e1 100644 --- a/cluster.yml +++ b/cluster.yml @@ -14,8 +14,8 @@ - hosts: kube-master roles: - { role: kubernetes/master, tags: master } - # Addons to be installed - # - { role: apps/k8s-skydns, tags: ['skydns', 'apps'] } + # Apps to be installed + # - { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] } # - { role: apps/k8s-fabric8, tags: ['fabric8', 'apps'] } - hosts: kube-node