Browse Source

Flannel running as pod

pull/62/head
Smaine Kahlouch 9 years ago
committed by ant31
parent
commit
8127e8f8e8
31 changed files with 637 additions and 201 deletions
  1. 61
      .gitmodules
  2. 57
      README.md
  3. 1
      roles/apps/k8s-pgbouncer
  4. 9
      roles/dnsmasq/tasks/main.yml
  5. 8
      roles/docker/tasks/main.yml
  6. 4
      roles/docker/vars/centos-6.yml
  7. 4
      roles/docker/vars/debian.yml
  8. 2
      roles/docker/vars/fedora-20.yml
  9. 14
      roles/docker/vars/fedora.yml
  10. 2
      roles/docker/vars/redhat.yml
  11. 8
      roles/download/defaults/main.yml
  12. 7
      roles/etcd/tasks/main.yml
  13. 318
      roles/kubernetes/master/files/kubectl_bash_completion.sh
  14. 6
      roles/kubernetes/node/handlers/main.yml
  15. 1
      roles/kubernetes/node/tasks/install.yml
  16. 8
      roles/kubernetes/preinstall/defaults/main.yml
  17. 43
      roles/kubernetes/preinstall/tasks/main.yml
  18. 4
      roles/kubernetes/preinstall/vars/centos.yml
  19. 4
      roles/kubernetes/preinstall/vars/debian.yml
  20. 3
      roles/kubernetes/preinstall/vars/fedora.yml
  21. 3
      roles/kubernetes/preinstall/vars/redhat.yml
  22. 40
      roles/network_plugin/handlers/main.yml
  23. 18
      roles/network_plugin/tasks/calico.yml
  24. 83
      roles/network_plugin/tasks/flannel.yml
  25. 21
      roles/network_plugin/tasks/main.yml
  26. 6
      roles/network_plugin/templates/docker
  27. 46
      roles/network_plugin/templates/flannel/flannel-pod.yml
  28. 0
      roles/network_plugin/templates/flannel/network.json
  29. 17
      roles/network_plugin/templates/flannel/systemd-docker.service.j2
  30. 12
      roles/network_plugin/templates/flannel/systemd-flannel.service.j2
  31. 28
      roles/network_plugin/templates/systemd-docker.service

61
.gitmodules

@ -1,43 +1,43 @@
[submodule "roles/apps/k8s-kube-ui"]
path = roles/apps/k8s-kube-ui
url = https://github.com/ansibl8s/k8s-kube-ui.git
branch = v1.0
path = roles/apps/k8s-kube-ui
url = https://github.com/ansibl8s/k8s-kube-ui.git
branch = v1.0
[submodule "roles/apps/k8s-kubedns"]
path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0
path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0
[submodule "roles/apps/k8s-common"]
path = roles/apps/k8s-common
url = https://github.com/ansibl8s/k8s-common.git
branch = v1.0
path = roles/apps/k8s-common
url = https://github.com/ansibl8s/k8s-common.git
branch = v1.0
[submodule "roles/apps/k8s-redis"]
path = roles/apps/k8s-redis
url = https://github.com/ansibl8s/k8s-redis.git
branch = v1.0
path = roles/apps/k8s-redis
url = https://github.com/ansibl8s/k8s-redis.git
branch = v1.0
[submodule "roles/apps/k8s-elasticsearch"]
path = roles/apps/k8s-elasticsearch
url = https://github.com/ansibl8s/k8s-elasticsearch.git
path = roles/apps/k8s-elasticsearch
url = https://github.com/ansibl8s/k8s-elasticsearch.git
[submodule "roles/apps/k8s-fabric8"]
path = roles/apps/k8s-fabric8
url = https://github.com/ansibl8s/k8s-fabric8.git
branch = v1.0
path = roles/apps/k8s-fabric8
url = https://github.com/ansibl8s/k8s-fabric8.git
branch = v1.0
[submodule "roles/apps/k8s-memcached"]
path = roles/apps/k8s-memcached
url = https://github.com/ansibl8s/k8s-memcached.git
branch = v1.0
path = roles/apps/k8s-memcached
url = https://github.com/ansibl8s/k8s-memcached.git
branch = v1.0
[submodule "roles/apps/k8s-postgres"]
path = roles/apps/k8s-postgres
url = https://github.com/ansibl8s/k8s-postgres.git
branch = v1.0
path = roles/apps/k8s-postgres
url = https://github.com/ansibl8s/k8s-postgres.git
branch = v1.0
[submodule "roles/apps/k8s-kubedash"]
path = roles/apps/k8s-kubedash
url = https://github.com/ansibl8s/k8s-kubedash.git
path = roles/apps/k8s-kubedash
url = https://github.com/ansibl8s/k8s-kubedash.git
[submodule "roles/apps/k8s-heapster"]
path = roles/apps/k8s-heapster
url = https://github.com/ansibl8s/k8s-heapster.git
path = roles/apps/k8s-heapster
url = https://github.com/ansibl8s/k8s-heapster.git
[submodule "roles/apps/k8s-influxdb"]
path = roles/apps/k8s-influxdb
url = https://github.com/ansibl8s/k8s-influxdb.git
path = roles/apps/k8s-influxdb
url = https://github.com/ansibl8s/k8s-influxdb.git
[submodule "roles/apps/k8s-kube-logstash"]
path = roles/apps/k8s-kube-logstash
url = https://github.com/ansibl8s/k8s-kube-logstash.git
@ -47,3 +47,6 @@
[submodule "roles/apps/k8s-rabbitmq"]
path = roles/apps/k8s-rabbitmq
url = https://github.com/ansibl8s/k8s-rabbitmq.git
[submodule "roles/apps/k8s-pgbouncer"]
path = roles/apps/k8s-pgbouncer
url = https://github.com/ansibl8s/k8s-pgbouncer.git

57
README.md

@ -8,10 +8,14 @@ This project allows to
- A **set of roles** in order to install applications over the k8s cluster
- A **flexible method** which helps to create new roles for apps.
Linux distributions tested:
* **Debian** Wheezy, Jessie
* **Ubuntu** 14.10, 15.04, 15.10
* **Fedora** 23
* **CentOS** 7 (Currently with flannel only)
### Requirements
Tested on **Debian Wheezy/Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
Should work on **RedHat/Fedora/Centos** platforms (to be tested)
* The target servers must have access to the Internet in order to pull docker imaqes.
* The target servers must have **access to the Internet** in order to pull docker imaqes.
* The firewalls are not managed, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should **disable your firewall**
* **Copy your ssh keys** to all the servers part of your inventory.
@ -272,6 +276,53 @@ calicoctl pool show
```
calicoctl endpoint show --detail
```
#### Flannel networking
* Flannel configuration file should have been created there
```
cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.233.0.0/18
FLANNEL_SUBNET=10.233.16.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false
```
* Check if the network interface has been created
```
ip a show dev flannel.1
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
inet 10.233.16.0/18 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::e0f3:a7ff:fe0f:bfcb/64 scope link
valid_lft forever preferred_lft forever
```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address
```
kubectl run test --image=busybox --command -- tail -f /dev/null
replicationcontroller "test" created
kubectl describe po test-34ozs | grep ^IP
IP: 10.233.16.2
```
```
kubectl exec test-34ozs -- ip a show dev eth0
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
inet 10.233.16.2/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:aff:fee9:2b03/64 scope link tentative flags 08
valid_lft forever preferred_lft forever
```
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)

1
roles/apps/k8s-pgbouncer

@ -0,0 +1 @@
Subproject commit 61c41e80e3da8938c7896c07822c19c060be4491

9
roles/dnsmasq/tasks/main.yml

@ -34,7 +34,7 @@
state: directory
when: inventory_hostname in groups['kube-master']
- name: configure dnsmasq
- name: Write dnsmasq configuration
template:
src: 01-kube-dns.conf.j2
dest: /etc/dnsmasq.d/01-kube-dns.conf
@ -42,15 +42,14 @@
backup: yes
when: inventory_hostname in groups['kube-master']
- name: create dnsmasq pod template
- name: Create dnsmasq pod manifest
template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest
when: inventory_hostname in groups['kube-master']
- name: Check for dnsmasq port
- name: Check for dnsmasq port (pulling image and running container)
wait_for:
port: 53
delay: 5
timeout: 100
when: inventory_hostname in groups['kube-master']
- name: check resolvconf
@ -67,7 +66,7 @@
line: search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
dest: "{{resolvconffile}}"
state: present
insertafter: EOF
insertbefore: BOF
backup: yes
follow: yes

8
roles/docker/tasks/main.yml

@ -20,14 +20,6 @@
{{ ansible_distribution }}-{{ ansible_distribution_version }}
when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
- name: ensure docker requirements packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args:
name: "{{item}}"
state: latest
update_cache: yes
with_items: docker_package_info.pre_pkgs
when: docker_package_info.pre_pkgs|length > 0
- name: ensure docker repository public key is installed
action: "{{ docker_repo_key_info.pkg_key }}"

4
roles/docker/vars/centos-6.yml

@ -2,10 +2,6 @@ docker_kernel_min_version: '2.6.32-431'
docker_package_info:
pkg_mgr: yum
pre_pkgs:
- epel-release
- curl
- device-mapper-libs
pkgs:
- docker-io

4
roles/docker/vars/debian.yml

@ -2,10 +2,6 @@ docker_kernel_min_version: '3.2'
docker_package_info:
pkg_mgr: apt
pre_pkgs:
- apt-transport-https
- curl
- software-properties-common
pkgs:
- docker-engine

2
roles/docker/vars/fedora-20.yml

@ -2,8 +2,6 @@ docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
pre_pkgs:
- curl
pkgs:
- docker-io

14
roles/docker/vars/fedora.yml

@ -0,0 +1,14 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: dnf
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

2
roles/docker/vars/redhat.yml

@ -2,8 +2,6 @@ docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
pre_pkgs:
- curl
pkgs:
- docker

8
roles/download/defaults/main.yml

@ -1,7 +1,6 @@
---
local_release_dir: /tmp
flannel_version: 0.5.5
calico_version: v0.13.0
calico_plugin_version: v0.7.0
kube_version: v1.1.3
@ -11,8 +10,6 @@ kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a
kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
flannel_download_url: "https://github.com/coreos/flannel/releases/download/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz"
calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl"
calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes"
@ -26,11 +23,6 @@ downloads:
dest: calico/bin/calico
url: "{{calico_plugin_download_url}}"
- name: flannel
dest: flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
url: "{{flannel_download_url}}"
unarchive: yes
- name: kubernetes-kubelet
dest: kubernetes/bin/kubelet
sha256: "{{kubelet_checksum}}"

7
roles/etcd/tasks/main.yml

@ -1,13 +1,12 @@
---
- name: ETCD2 | Stop etcd2 service
- name: Stop etcd2 service
service: name=etcd state=stopped
ignore_errors: yes
- name: ETCD2 | create etcd pod template
- name: Create etcd pod manifest
template: src=etcd-pod.yml dest=/etc/kubernetes/manifests/etcd-pod.manifest
- name: ETCD2 | Check for etcd2 port
- name: Check for etcd2 port (pulling image and running container)
wait_for:
port: 2379
delay: 5
timeout: 100

318
roles/kubernetes/master/files/kubectl_bash_completion.sh

@ -265,6 +265,7 @@ _kubectl_get()
flags_completion=()
flags+=("--all-namespaces")
flags+=("--export")
flags+=("--filename=")
flags_with_completion+=("--filename")
flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
@ -401,10 +402,204 @@ _kubectl_describe()
must_have_one_noun+=("serviceaccount")
}
_kubectl_create_namespace()
{
last_command="kubectl_create_namespace"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--dry-run")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create_secret_docker-registry()
{
last_command="kubectl_create_secret_docker-registry"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--docker-email=")
flags+=("--docker-password=")
flags+=("--docker-server=")
flags+=("--docker-username=")
flags+=("--dry-run")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_flag+=("--docker-email=")
must_have_one_flag+=("--docker-password=")
must_have_one_flag+=("--docker-username=")
must_have_one_noun=()
}
_kubectl_create_secret_generic()
{
last_command="kubectl_create_secret_generic"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--dry-run")
flags+=("--from-file=")
flags+=("--from-literal=")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--type=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create_secret()
{
last_command="kubectl_create_secret"
commands=()
commands+=("docker-registry")
commands+=("generic")
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create()
{
last_command="kubectl_create"
commands=()
commands+=("namespace")
commands+=("secret")
flags=()
two_word_flags=()
@ -945,6 +1140,125 @@ _kubectl_scale()
must_have_one_noun=()
}
_kubectl_cordon()
{
last_command="kubectl_cordon"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_drain()
{
last_command="kubectl_drain"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--force")
flags+=("--grace-period=")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_uncordon()
{
last_command="kubectl_uncordon"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_attach()
{
last_command="kubectl_attach"
@ -1164,6 +1478,7 @@ _kubectl_run()
two_word_flags+=("-r")
flags+=("--requests=")
flags+=("--restart=")
flags+=("--rm")
flags+=("--save-config")
flags+=("--service-generator=")
flags+=("--service-overrides=")
@ -2045,6 +2360,9 @@ _kubectl()
commands+=("logs")
commands+=("rolling-update")
commands+=("scale")
commands+=("cordon")
commands+=("drain")
commands+=("uncordon")
commands+=("attach")
commands+=("exec")
commands+=("port-forward")

6
roles/kubernetes/node/handlers/main.yml

@ -1,13 +1,13 @@
---
- name: reload systemd
command: systemctl daemon-reload
- name: restart systemd-kubelet
command: /bin/true
notify:
- reload systemd
- restart kubelet
- name: reload systemd
command: systemctl daemon-reload
- name: restart kubelet
service:
name: kubelet

1
roles/kubernetes/node/tasks/install.yml

@ -46,3 +46,4 @@
- name: install | Perms calico plugin binary
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico owner=kube mode=0755 state=file
when: kube_network_plugin == "calico"

8
roles/kubernetes/preinstall/defaults/main.yml

@ -3,13 +3,7 @@ common_required_pkgs:
- python-httplib2
- openssl
- curl
debian_required_pkgs:
- python-apt
- python-pip
rh_required_pkgs:
- libselinux-python
- rsync
pypy_version: 2.4.0
python_pypy_url: "https://bitbucket.org/pypy/pypy/downloads/pypy-{{ pypy_version }}.tar.bz2"

43
roles/kubernetes/preinstall/tasks/main.yml

@ -1,4 +1,17 @@
---
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
- name: "Identify init system"
shell: >
$(pgrep systemd > /dev/null && systemctl status > /dev/null);
@ -14,27 +27,29 @@
- set_fact:
init_system: "{{ init_system_output.stdout }}"
- name: Install packages requirements
action:
module: "{{ ansible_pkg_mgr }}"
name: "{{ item }}"
state: latest
with_items: common_required_pkgs
- name: Install debian packages requirements
apt:
name: "{{ item }}"
state: latest
- name: Install python-apt for Debian distribs
shell: apt-get install -y python-apt
when: ansible_os_family == "Debian"
with_items: debian_required_pkgs
changed_when: False
- name: Install python-dnf for latest RedHat versions
shell: dnf install -y python-dnf yum
when: ansible_distribution == "Fedora" and
ansible_distribution_major_version > 21
changed_when: False
- name: Install redhat packages requirements
- name: Install packages requirements
action:
module: "{{ ansible_pkg_mgr }}"
name: "{{ item }}"
state: latest
with_items: "{{required_pkgs | union(common_required_pkgs)}}"
# Todo : selinux configuration
- name: Set selinux policy to permissive
selinux: policy=targeted state=permissive
when: ansible_os_family == "RedHat"
with_items: rh_required_pkgs
changed_when: False
- include: python-bootstrap.yml
when: ansible_os_family not in [ "Debian", "RedHat" ]

4
roles/kubernetes/preinstall/vars/centos.yml

@ -0,0 +1,4 @@
required_pkgs:
- epel-release
- libselinux-python
- device-mapper-libs

4
roles/kubernetes/preinstall/vars/debian.yml

@ -0,0 +1,4 @@
required_pkgs:
- python-apt
- apt-transport-https
- software-properties-common

3
roles/kubernetes/preinstall/vars/fedora.yml

@ -0,0 +1,3 @@
required_pkgs:
- libselinux-python
- device-mapper-libs

3
roles/kubernetes/preinstall/vars/redhat.yml

@ -0,0 +1,3 @@
required_pkgs:
- libselinux-python
- device-mapper-libs

40
roles/network_plugin/handlers/main.yml

@ -1,36 +1,30 @@
---
- name : reload systemd
shell: systemctl daemon-reload
- name: restart systemd-calico-node
command: /bin/true
notify:
- reload systemd
- restart calico-node
- name: restart calico-node
service:
name: calico-node
state: restarted
- name: restart docker
service: name=docker state=restarted
- name: restart flannel
service: name=flannel state=restarted
- name: restart systemd-docker
command: /bin/true
notify:
- reload systemd
- stop docker
- delete docker0
- start docker
when: inventory_hostname in groups['kube-node']
- name: stop docker
service: name=docker state=stopped
- restart docker
- name: delete docker0
- name: delete default docker bridge
command: ip link delete docker0
ignore_errors: yes
notify: restart docker
- name: start docker
service: name=docker state=started
- name : reload systemd
shell: systemctl daemon-reload
- name: restart calico-node
service:
name: calico-node
state: restarted
- name: restart docker
service:
name: docker
state: restarted

18
roles/network_plugin/tasks/calico.yml

@ -1,5 +1,4 @@
---
- name: Calico | Install calicoctl bin
synchronize:
src: "{{ local_release_dir }}/calico/bin/calicoctl"
@ -18,6 +17,10 @@
dest: /usr/bin/calicoctl
state: link
- wait_for:
port: 2379
when: inventory_hostname in groups['kube-master']
- name: Calico | Check if calico network pool has already been configured
uri:
url: "http://127.0.0.1:2379/v2/keys/calico/v1/ipam/v4/pool"
@ -71,3 +74,16 @@
- name: Calico | Enable calico-node
service: name=calico-node enabled=yes state=started
- name: Calico | Disable node mesh
shell: calicoctl bgp node-mesh off
environment:
ETCD_AUTHORITY: "{{ groups['etcd'][0] }}:2379"
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
- name: Calico | Configure peering with router(s)
shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}
environment:
ETCD_AUTHORITY: "{{ groups['etcd'][0] }}:2379"
with_items: peers
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']

83
roles/network_plugin/tasks/flannel.yml

@ -1,57 +1,34 @@
---
- name: Create flannel user
user: name=flannel shell=/bin/nologin
- name: Install flannel binaries
synchronize:
src: "{{ local_release_dir }}/flannel/bin/flanneld"
dest: "{{ bin_dir }}/flanneld"
archive: no
times: yes
delegate_to: "{{ groups['downloader'][0] }}"
notify:
- restart flannel
- name: Perms flannel binary
file: path={{ bin_dir }}/flanneld owner=flannel mode=0755 state=file
- name: Write flannel.service systemd file
- name: Flannel | Write flannel configuration
template:
src: flannel/systemd-flannel.service.j2
dest: /etc/systemd/system/flannel.service
notify: restart flannel
src: flannel/network.json
dest: /etc/flannel-network.json
backup: yes
when: inventory_hostname in groups['kube-node']
- name: Write docker.service systemd file
- name: Flannel | Create flannel pod manifest
template:
src: flannel/systemd-docker.service.j2
dest: /lib/systemd/system/docker.service
notify: restart docker
- name: Set fact for ectcd command conf file location
set_fact:
conf_file: "/tmp/flannel-conf.json"
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Create flannel config file to go in etcd
template: src=flannel/flannel-conf.json.j2 dest={{ conf_file }}
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Flannel configuration into etcd
shell: "{{ bin_dir }}/etcdctl set /{{ cluster_name }}/network/config < {{ conf_file }}"
delegate_to: "{{ groups['kube-master'][0] }}"
notify: restart flannel
- name: Clean up the flannel config file
file: path=/tmp/flannel-config.json state=absent
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Launch Flannel
service: name=flannel state=started enabled=yes
notify:
- restart flannel
- name: Enable Docker
service: name=docker enabled=yes state=started
src: flannel/flannel-pod.yml
dest: /etc/kubernetes/manifests/flannel-pod.manifest
notify: delete default docker bridge
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:
path: /run/flannel/subnet.env
delay: 5
- name: Get flannel_subnet from subnet.env
shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_SUBNET" {print $2}'
register: flannel_subnet_output
changed_when: false
- set_fact:
flannel_subnet: "{{ flannel_subnet_output.stdout }}"
- name: Get flannel_mtu from subnet.env
shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_MTU" {print $2}'
register: flannel_mtu_output
changed_when: false
- set_fact:
flannel_mtu: "{{ flannel_mtu_output.stdout }}"

21
roles/network_plugin/tasks/main.yml

@ -7,7 +7,24 @@
- include: flannel.yml
when: kube_network_plugin == "flannel"
- include: calico.yml
when: kube_network_plugin == "calico"
- name: Set docker daemon options
template:
src: docker
dest: "/etc/default/docker"
owner: root
group: root
mode: 0644
notify:
- restart docker
- name: Write docker.service systemd file
template:
src: systemd-docker.service
dest: /lib/systemd/system/docker.service
notify: restart systemd-docker
when: init_system == "systemd"
- meta: flush_handlers
- include: calico.yml
when: kube_network_plugin == "calico"

6
roles/network_plugin/templates/docker

@ -0,0 +1,6 @@
# Deployed by Ansible
{% if init_system == "sysvinit" and kube_network_plugin == "flannel" and ansible_os_family == "Debian" %}
DOCKER_OPTS="--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% elif kube_network_plugin == "flannel" %}
OPTIONS="--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% endif %}

46
roles/network_plugin/templates/flannel/flannel-pod.yml

@ -0,0 +1,46 @@
---
kind: "Pod"
apiVersion: "v1"
metadata:
name: "flannel"
namespace: "kube-system"
labels:
app: "flannel"
version: "v0.1"
spec:
volumes:
- name: "subnetenv"
hostPath:
path: "/run/flannel"
- name: "networkconfig"
hostPath:
path: "/etc/flannel-network.json"
containers:
- name: "flannel-server-helper"
image: "gcr.io/google_containers/flannel-server-helper:0.1"
args:
- "--network-config=/etc/flannel-network.json"
- "--etcd-prefix=/{{ cluster_name }}/network"
- "--etcd-server=http://{{ groups['etcd'][0] }}:2379"
volumeMounts:
- name: "networkconfig"
mountPath: "/etc/flannel-network.json"
imagePullPolicy: "Always"
- name: "flannel-container"
image: "quay.io/coreos/flannel:0.5.5"
command:
- "/bin/sh"
- "-c"
- "/opt/bin/flanneld -etcd-endpoints {% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %} -etcd-prefix /{{ cluster_name }}/network 1>>/var/log/flannel_server.log 2>&1"
ports:
- hostPort: 10253
containerPort: 10253
resources:
limits:
cpu: "100m"
volumeMounts:
- name: "subnetenv"
mountPath: "/run/flannel"
securityContext:
privileged: true
hostNetwork: true

roles/network_plugin/templates/flannel/flannel-conf.json.j2 → roles/network_plugin/templates/flannel/network.json

17
roles/network_plugin/templates/flannel/systemd-docker.service.j2

@ -1,17 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker.socket flannel.service
Requires=docker.socket
[Service]
EnvironmentFile=/run/flannel/subnet.env
EnvironmentFile=-/etc/default/docker
ExecStart=/usr/bin/docker -d -H fd:// --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} $DOCKER_OPTS
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

12
roles/network_plugin/templates/flannel/systemd-flannel.service.j2

@ -1,12 +0,0 @@
[Unit]
Description=Flannel Network Overlay
Documentation=https://coreos.com/flannel/docs/latest
[Service]
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/flanneld \
$FLANNEL_ETCD_PREFIX
Restart=on-failure
[Install]
WantedBy=multi-user.target

28
roles/network_plugin/templates/systemd-docker.service

@ -0,0 +1,28 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
{% if ansible_os_family == "RedHat" %}
After=network.target
Wants=docker-storage-setup.service
{% elif ansible_os_family == "Debian" %}
After=network.target docker.socket
Requires=docker.socket
{% endif %}
[Service]
Type=notify
EnvironmentFile=-/etc/default/docker
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/docker daemon \
$OPTIONS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
$INSECURE_REGISTRY
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
MountFlags=slave
TimeoutStartSec=1min
[Install]
WantedBy=multi-user.target
Loading…
Cancel
Save