Browse Source

Remove rotate_tokens logic

kubeadm never rotates sa.key/sa.pub, so there is no need to delete tokens/restart pods

Signed-off-by: Etienne Champetier <e.champetier@ateme.com>
(cherry picked from commit 8800b5c01d)
pull/7394/head
Etienne Champetier 3 years ago
committed by Kubernetes Prow Robot
parent
commit
9ecbf75cb4
7 changed files with 0 additions and 109 deletions
  1. 1
      cluster.yml
  2. 14
      docs/upgrades.md
  3. 48
      roles/kubernetes-apps/rotate_tokens/tasks/main.yml
  4. 18
      roles/kubernetes/master/handlers/main.yml
  5. 4
      roles/kubernetes/master/tasks/kubeadm-secondary.yml
  6. 23
      roles/kubernetes/master/tasks/kubeadm-setup.yml
  7. 1
      upgrade-cluster.yml

1
cluster.yml

@ -100,7 +100,6 @@
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube-master

14
docs/upgrades.md

@ -284,20 +284,6 @@ follows:
* kube-apiserver, kube-scheduler, and kube-controller-manager
* Add-ons (such as KubeDNS)
## Upgrade considerations
Kubespray supports rotating certificates used for etcd and Kubernetes
components, but some manual steps may be required. If you have a pod that
requires use of a service token and is deployed in a namespace other than
`kube-system`, you will need to manually delete the affected pods after
rotating certificates. This is because all service account tokens are dependent
on the apiserver token that is used to generate them. When the certificate
rotates, all service account tokens must be rotated as well. During the
kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
recreated. All other invalidated service account tokens are cleaned up
automatically, but other pods are not deleted out of an abundance of caution
for impact to user deployed pods.
### Component-based upgrades
A deployer may want to upgrade specific components in order to minimize risk

48
roles/kubernetes-apps/rotate_tokens/tasks/main.yml

@ -1,48 +0,0 @@
---
- name: Rotate Tokens | Get default token name # noqa 306
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
register: default_token
changed_when: false
until: default_token.rc == 0
delay: 4
retries: 10
- name: Rotate Tokens | Get default token data
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets {{ default_token.stdout }} -ojson"
register: default_token_data
changed_when: false
- name: Rotate Tokens | Test if default certificate is expired
uri:
url: https://{{ kube_apiserver_ip }}/api/v1/nodes
method: GET
return_content: no
validate_certs: no
headers:
Authorization: "Bearer {{ (default_token_data.stdout|from_json)['data']['token']|b64decode }}"
register: check_secret
failed_when: false
- name: Rotate Tokens | Determine if certificate is expired
set_fact:
needs_rotation: '{{ check_secret.status not in [200, 403] }}'
# FIXME(mattymo): Exclude built in secrets that were automatically rotated,
# instead of filtering manually
- name: Rotate Tokens | Get all serviceaccount tokens to expire # noqa 306
shell: >-
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
| grep kubernetes.io/service-account-token
| egrep 'default-token|kube-proxy|coredns|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner'
register: tokens_to_delete
when: needs_rotation
- name: Rotate Tokens | Delete expired tokens
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
with_items: "{{ tokens_to_delete.stdout_lines }}"
when: needs_rotation
- name: Rotate Tokens | Delete pods in system namespace
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pods -n kube-system --all --grace-period=0 --force"
when: needs_rotation

18
roles/kubernetes/master/handlers/main.yml

@ -121,21 +121,3 @@
until: result.status == 200
retries: 60
delay: 1
- name: Master | set secret_changed
command: /bin/true
notify:
- Master | set secret_changed to true
- Master | Copy new kubeconfig for root user
- name: Master | set secret_changed to true
set_fact:
secret_changed: true
- name: Master | Copy new kubeconfig for root user
copy:
src: "{{ kube_config_dir }}/admin.conf"
dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
remote_src: yes
mode: "0600"
backup: yes

4
roles/kubernetes/master/tasks/kubeadm-secondary.yml

@ -66,7 +66,3 @@
when:
- inventory_hostname != groups['kube-master']|first
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
- name: Set secret_changed to false to avoid extra token rotation
set_fact:
secret_changed: false

23
roles/kubernetes/master/tasks/kubeadm-setup.yml

@ -10,15 +10,6 @@
- kube_oidc_auth
- kube_oidc_ca_cert is defined
- name: kubeadm | Check serviceaccount key
stat:
path: "{{ kube_cert_dir }}/sa.key"
get_attributes: no
get_checksum: yes
get_mime: no
register: sa_key_before
run_once: true
- name: kubeadm | Check if kubeadm has already run
stat:
path: "/var/lib/kubelet/config.yaml"
@ -180,20 +171,6 @@
- upgrade_cluster_setup
- kubeadm_already_run.stat.exists
- name: kubeadm | Check serviceaccount key again
stat:
path: "{{ kube_cert_dir }}/sa.key"
get_attributes: no
get_checksum: yes
get_mime: no
register: sa_key_after
run_once: true
- name: kubeadm | Set secret_changed if service account key was updated
command: /bin/true
notify: Master | set secret_changed
when: sa_key_before.stat.checksum|default("") != sa_key_after.stat.checksum
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: kubeadm | Remove taint for master with node role
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"

1
upgrade-cluster.yml

@ -134,7 +134,6 @@
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: calico-rr

Loading…
Cancel
Save