Browse Source

Add api runtime config option, review kubernetes handlers

pull/21/head
Smaine Kahlouch 9 years ago
parent
commit
97c4edc028
7 changed files with 61 additions and 16 deletions
  1. 6
      roles/kubernetes/common/defaults/main.yml
  2. 32
      roles/kubernetes/master/handlers/main.yml
  3. 4
      roles/kubernetes/master/tasks/config.yml
  4. 8
      roles/kubernetes/master/tasks/install.yml
  5. 3
      roles/kubernetes/master/templates/apiserver.j2
  6. 1
      roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2
  7. 23
      roles/kubernetes/node/handlers/main.yml

6
roles/kubernetes/common/defaults/main.yml

@ -31,10 +31,14 @@ kube_cert_group: kube-cert
dns_domain: "{{ cluster_name }}"
kube_proxy_mode: iptables
kube_master_port: 443
# IP address of the DNS server.
# Kubernetes will create a pod with several containers, serving as the DNS
# server and expose it under this IP address. The IP address must be from
# the range specified as kube_service_addresses. This magic will actually
# pick the 10th ip address in the kube_service_addresses range and use that.
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"
# kube_api_runtime_config:
# - extensions/v1beta1/daemonsets=true
# - extensions/v1beta1/deployments=true

32
roles/kubernetes/master/handlers/main.yml

@ -3,30 +3,54 @@
command: /bin/true
notify:
- reload systemd
- restart apiserver
- restart controller-manager
- restart scheduler
- restart proxy
- restart reloaded-scheduler
- restart reloaded-controller-manager
- restart reloaded-apiserver
- restart reloaded-proxy
- name: reload systemd
command: systemctl daemon-reload
- name: restart apiserver
command: /bin/true
notify:
- reload systemd
- restart reloaded-apiserver
- name: restart reloaded-apiserver
service:
name: kube-apiserver
state: restarted
- name: restart controller-manager
command: /bin/true
notify:
- reload systemd
- restart reloaded-controller-manager
- name: restart reloaded-controller-manager
service:
name: kube-controller-manager
state: restarted
- name: restart scheduler
command: /bin/true
notify:
- reload systemd
- restart reloaded-scheduler
- name: restart reloaded-scheduler
service:
name: kube-scheduler
state: restarted
- name: restart proxy
command: /bin/true
notify:
- reload systemd
- restart reloaded-proxy
- name: restart reloaded-proxy
service:
name: kube-proxy
state: restarted

4
roles/kubernetes/master/tasks/config.yml

@ -20,7 +20,7 @@
- name: write the config files for api server
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver backup=yes
notify:
- restart daemons
- restart apiserver
- name: write config file for controller-manager
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager backup=yes
@ -48,7 +48,7 @@
- name: write the config files for proxy
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy backup=yes
notify:
- restart daemons
- restart proxy
- name: write the kubecfg (auth) file for proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes

8
roles/kubernetes/master/tasks/install.yml

@ -1,19 +1,19 @@
---
- name: Write kube-apiserver systemd init file
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service backup=yes
notify: restart daemons
notify: restart apiserver
- name: Write kube-controller-manager systemd init file
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service backup=yes
notify: restart daemons
notify: restart controller-manager
- name: Write kube-scheduler systemd init file
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service backup=yes
notify: restart daemons
notify: restart scheduler
- name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes
notify: restart daemons
notify: restart proxy
- name: Install kubernetes binaries
copy:

3
roles/kubernetes/master/templates/apiserver.j2

@ -21,5 +21,8 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
# RUNTIME API CONFIGURATION (e.g. enable extensions)
KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
# Add you own!
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"

1
roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2

@ -19,6 +19,7 @@ ExecStart={{ bin_dir }}/kube-apiserver \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_RUNTIME_CONFIG \
$KUBE_API_ARGS
Restart=on-failure
Type=notify

23
roles/kubernetes/node/handlers/main.yml

@ -2,18 +2,31 @@
- name: restart daemons
command: /bin/true
notify:
- restart kubelet
- restart proxy
- reload systemd
- restart reloaded-kubelet
- restart reloaded-proxy
- name: reload systemd
command: systemctl daemon-reload
- name: restart kubelet
command: /bin/true
notify:
- reload systemd
- restart reloaded-kubelet
- name: restart reloaded-kubelet
service:
name: kubelet
state: restarted
- name: restart proxy
command: /bin/true
notify:
- reload systemd
- restart reloaded-proxy
- name: restart reloaded-proxy
service:
name: kube-proxy
state: restarted
- name: reload systemd
command: systemctl daemon-reload
Loading…
Cancel
Save