Browse Source

Move cluster roles and system namespace to new role

This should be done after kubeconfig is set for admin and
before network plugins are up.
pull/1880/head
Matthew Mosesohn 7 years ago
parent
commit
ec53b8b66a
10 changed files with 64 additions and 48 deletions
  1. 1
      .gitlab-ci.yml
  2. 3
      cluster.yml
  3. 2
      extra_playbooks/upgrade-only-k8s.yml
  4. 19
      roles/kubernetes-apps/ansible/tasks/main.yml
  5. 56
      roles/kubernetes-apps/cluster_roles/tasks/main.yml
  6. 0
      roles/kubernetes-apps/cluster_roles/templates/namespace.j2
  7. 0
      roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2
  8. 28
      roles/kubernetes/master/tasks/static-pod-setup.yml
  9. 1
      tests/ansible.cfg
  10. 2
      upgrade-cluster.yml

1
.gitlab-ci.yml

@ -20,7 +20,6 @@ variables:
before_script: before_script:
- pip install -r tests/requirements.txt - pip install -r tests/requirements.txt
- mkdir -p /.ssh - mkdir -p /.ssh
- cp tests/ansible.cfg .
.job: &job .job: &job
tags: tags:

3
cluster.yml

@ -68,6 +68,8 @@
roles: roles:
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@ -83,7 +85,6 @@
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" } - { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller } - { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes/client, tags: client }
- hosts: calico-rr - hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

2
extra_playbooks/upgrade-only-k8s.yml

@ -47,6 +47,8 @@
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: upgrade/post-upgrade, tags: post-upgrade } - { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size #Finally handle worker upgrades, based on given batch size

19
roles/kubernetes-apps/ansible/tasks/main.yml

@ -5,26 +5,9 @@
register: result register: result
until: result.status == 200 until: result.status == 200
retries: 10 retries: 10
delay: 6
delay: 2
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
register: node_crb_manifest
when: rbac_enabled
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
name: "system:node"
kubectl: "{{bin_dir}}/kubectl"
resource: "clusterrolebinding"
filename: "{{ kube_config_dir }}/node-crb.yml"
when:
- rbac_enabled
- node_crb_manifest.changed
- name: Kubernetes Apps | Delete old kubedns resources - name: Kubernetes Apps | Delete old kubedns resources
kube: kube:
name: "kubedns" name: "kubedns"

56
roles/kubernetes-apps/cluster_roles/tasks/main.yml

@ -0,0 +1,56 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: "{{ kube_apiserver_insecure_endpoint }}/healthz"
register: result
until: result.status == 200
retries: 10
delay: 6
when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
template:
src: "node-crb.yml.j2"
dest: "{{ kube_config_dir }}/node-crb.yml"
register: node_crb_manifest
when: rbac_enabled
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
name: "system:node"
kubectl: "{{bin_dir}}/kubectl"
resource: "clusterrolebinding"
filename: "{{ kube_config_dir }}/node-crb.yml"
state: latest
when:
- rbac_enabled
- node_crb_manifest.changed
# This is not a cluster role, but should be run after kubeconfig is set on master
- name: Write kube system namespace manifest
template:
src: namespace.j2
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Check if kube system namespace exists
command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
register: 'kubesystem'
changed_when: False
failed_when: False
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Create kube system namespace
command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
register: create_system_ns
until: create_system_ns.rc == 0
changed_when: False
when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
tags:
- apps

roles/kubernetes/master/templates/namespace.j2 → roles/kubernetes-apps/cluster_roles/templates/namespace.j2

roles/kubernetes-apps/ansible/templates/node-crb.yml.j2 → roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2

28
roles/kubernetes/master/tasks/static-pod-setup.yml

@ -9,34 +9,6 @@
- meta: flush_handlers - meta: flush_handlers
- name: Write kube system namespace manifest
template:
src: namespace.j2
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Check if kube system namespace exists
command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
register: 'kubesystem'
changed_when: False
failed_when: False
when: inventory_hostname == groups['kube-master'][0]
tags:
- apps
- name: Create kube system namespace
command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
register: create_system_ns
until: create_system_ns.rc == 0
changed_when: False
when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
tags:
- apps
- name: Write kube-scheduler kubeconfig - name: Write kube-scheduler kubeconfig
template: template:
src: kube-scheduler-kubeconfig.yaml.j2 src: kube-scheduler-kubeconfig.yaml.j2

1
tests/ansible.cfg

@ -8,4 +8,5 @@ gathering = smart
fact_caching = jsonfile fact_caching = jsonfile
fact_caching_connection = /tmp fact_caching_connection = /tmp
stdout_callback = skippy stdout_callback = skippy
library = ./library:../library
callback_whitelist = profile_tasks callback_whitelist = profile_tasks

2
upgrade-cluster.yml

@ -67,6 +67,8 @@
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade } - { role: upgrade/post-upgrade, tags: post-upgrade }

Loading…
Cancel
Save