Browse Source

Use kube_config_dir for kubeconfig (#7996)

The path of kubeconfig should be configurable, and its default value
is /etc/kubernetes/admin.conf. Most paths of the file are configurable
but some were not. This make those configurable.
pull/8011/head
Kenichi Omichi 4 years ago
committed by GitHub
parent
commit
843252c968
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 7 additions and 7 deletions
  1. 6
      roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
  2. 2
      roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
  3. 2
      roles/remove-node/pre-remove/tasks/main.yml
  4. 2
      roles/upgrade/post-upgrade/tasks/main.yml
  5. 2
      roles/upgrade/pre-upgrade/tasks/main.yml

6
roles/kubernetes/control-plane/tasks/kubeadm-setup.yml

@ -150,8 +150,8 @@
- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined)
shell: >-
{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token delete {{ kubeadm_token }} || :;
{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }}
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token delete {{ kubeadm_token }} || :;
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }}
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane']|first
@ -161,7 +161,7 @@
- kubeadm_token
- name: Create kubeadm token for joining nodes with 24h expiration (default)
command: "{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create"
command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create"
changed_when: false
register: temp_token
retries: 5

2
roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml

@ -62,7 +62,7 @@
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
command: >-
{{ bin_dir }}/kubectl
--kubeconfig /etc/kubernetes/admin.conf
--kubeconfig {{ kube_config_dir }}/admin.conf
-n kube-system
scale deployment/coredns --replicas 0
register: scale_down_coredns

2
roles/remove-node/pre-remove/tasks/main.yml

@ -9,7 +9,7 @@
- name: remove-node | Drain node except daemonsets resource # noqa 301
command: >-
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf drain
--force
--ignore-daemonsets
--grace-period {{ drain_grace_period }}

2
roles/upgrade/post-upgrade/tasks/main.yml

@ -1,6 +1,6 @@
---
- name: Uncordon node
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning|default(false)

2
roles/upgrade/pre-upgrade/tasks/main.yml

@ -84,7 +84,7 @@
delay: "{{ drain_retry_delay_seconds }}"
rescue:
- name: Set node back to schedulable
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ inventory_hostname }}"
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ inventory_hostname }}"
when: upgrade_node_uncordon_after_drain_failure
- name: Fail after rescue
fail:

Loading…
Cancel
Save