From 092bf07cbf57602a289330d0a1971f16f5d8e38e Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Tue, 27 Jun 2017 12:27:25 +0800 Subject: [PATCH 01/19] basic rbac support --- docs/vars.md | 4 ++ .../kubernetes-apps/ansible/defaults/main.yml | 4 ++ roles/kubernetes-apps/ansible/tasks/main.yml | 30 ++++++++++++-- .../kubedns-autoscaler-clusterrole.yml | 32 +++++++++++++++ .../kubedns-autoscaler-clusterrolebinding.yml | 27 ++++++++++++ .../templates/kubedns-autoscaler-sa.yml | 19 +++++++++ .../ansible/templates/kubedns-autoscaler.yml | 8 ++-- .../ansible/templates/kubedns-deploy.yml | 4 +- .../ansible/templates/kubedns-sa.yml | 7 ++++ roles/kubernetes-apps/helm/tasks/main.yml | 26 ++++++++++++ .../templates/tiller-clusterrolebinding.yml | 13 ++++++ .../helm/templates/tiller-sa.yml | 7 ++++ roles/kubernetes/master/defaults/main.yml | 2 +- roles/kubernetes/master/tasks/main.yml | 22 +++++++--- ...kube-controller-manager-kubeconfig.yaml.j2 | 18 ++++++++ .../kube-scheduler-kubeconfig.yaml.j2 | 18 ++++++++ .../manifests/kube-apiserver.manifest.j2 | 3 ++ .../kube-controller-manager.manifest.j2 | 35 ++++++++++++---- .../manifests/kube-scheduler.manifest.j2 | 28 ++++++++++++- roles/kubernetes/node/tasks/install.yml | 2 +- roles/kubernetes/node/tasks/main.yml | 7 +++- .../templates/kube-proxy-kubeconfig.yaml.j2 | 18 ++++++++ .../manifests/kube-proxy.manifest.j2 | 27 ++++++------ .../kubernetes/preinstall/tasks/set_facts.yml | 2 +- roles/kubernetes/secrets/files/make-ssl.sh | 41 +++++++++++++------ .../secrets/tasks/gen_certs_script.yml | 23 +++++++++-- roles/kubespray-defaults/defaults/main.yaml | 6 +++ 27 files changed, 374 insertions(+), 59 deletions(-) create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml create mode 100644 roles/kubernetes-apps/ansible/templates/kubedns-sa.yml create mode 100644 roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml create mode 100644 roles/kubernetes-apps/helm/templates/tiller-sa.yml create mode 100644 roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 create mode 100644 roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 create mode 100644 roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 diff --git a/docs/vars.md b/docs/vars.md index 4b9da186e..46684395f 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,6 +67,10 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes +* *authorization_modes* - A list of [authorization mode]( +https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) + that the cluster should be configured for. Defaults to `['AlwaysAllow']`. + Note: Only `AlwaysAllow`, `AlwaysDeny` and `RBAC` are tested. Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 2787472c8..97d1bcdc4 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -41,3 +41,7 @@ netchecker_server_memory_requests: 64M etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" +kubedns_rbac_resources: + - clusterrole + - clusterrolebinding + - sa diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 4e7236df6..00a1fd74d 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -13,11 +13,34 @@ src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" with_items: - - {name: kube-dns, file: kubedns-deploy.yml, type: deployment} - - {name: kube-dns, file: kubedns-svc.yml, type: svc} + - {name: kubedns, file: kubedns-sa.yml, type: sa} + - {name: kubedns, file: kubedns-deploy.yml, type: deployment} + - {name: kubedns, file: kubedns-svc.yml, type: svc} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole} + - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} register: manifests - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + when: + - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + - rbac_enabled or item.type not in kubedns_rbac_resources + tags: dnsmasq + +# see https://github.com/kubernetes/kubernetes/issues/45084 +# TODO: this is only needed for "old" kube-dns +- name: Kubernetes Apps | Patch system:kube-dns ClusterRole + command: > + {{bin_dir}}/kubectl patch clusterrole system:kube-dns + --patch='{ + "rules": [ + { + "apiGroups" : [""], + "resources" : ["endpoints", "services"], + "verbs": ["list", "watch", "get"] + } + ] + }' + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled tags: dnsmasq - name: Kubernetes Apps | Start Resources @@ -29,6 +52,7 @@ filename: "{{kube_config_dir}}/{{item.item.file}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] tags: dnsmasq diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml new file mode 100644 index 000000000..a194426c6 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml @@ -0,0 +1,32 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml new file mode 100644 index 000000000..a368ae333 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml @@ -0,0 +1,27 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} +subjects: + - kind: ServiceAccount + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} +roleRef: + kind: ClusterRole + name: cluster-proportional-autoscaler + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml new file mode 100644 index 000000000..9544a7dd9 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml @@ -0,0 +1,19 @@ +# Copyright 2016 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: cluster-proportional-autoscaler + namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml index a1d5455ad..9e0462290 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml @@ -16,7 +16,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubedns-autoscaler - namespace: kube-system + namespace: {{ system_namespace }} labels: k8s-app: kubedns-autoscaler kubernetes.io/cluster-service: "true" @@ -39,11 +39,13 @@ spec: memory: "10Mi" command: - /cluster-proportional-autoscaler - - --namespace=kube-system + - --namespace={{ system_namespace }} - --configmap=kubedns-autoscaler # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - --target=Deployment/kube-dns - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - --logtostderr=true - --v=2 - +{% if rbac_enabled %} + serviceAccountName: cluster-proportional-autoscaler +{% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml index 3f07aa905..7e4615676 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml @@ -151,4 +151,6 @@ spec: memory: 20Mi cpu: 10m dnsPolicy: Default # Don't use cluster DNS. - +{% if rbac_enabled %} + serviceAccountName: kube-dns +{% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml new file mode 100644 index 000000000..e520ccbfc --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index f12875da2..2d26c5a0f 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -10,10 +10,36 @@ mode: 0755 register: helm_container +- name: Helm | Lay Down Helm Manifests (RBAC) + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: tiller, file: tiller-sa.yml, type: sa} + - {name: tiller, file: tiller-clusterrolebinding.yml, type: clusterrolebinding} + register: manifests + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled + +- name: Helm | Apply Helm Manifests (RBAC) + kube: + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled + - name: Helm | Install/upgrade helm command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}" when: helm_container.changed +- name: Helm | Patch tiller deployment for RBAC + command: kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n {{ system_namespace }} + when: rbac_enabled + - name: Helm | Set up bash completion shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh" when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] ) diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml new file mode 100644 index 000000000..0ac9341ee --- /dev/null +++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: tiller + namespace: {{ system_namespace }} +subjects: + - kind: ServiceAccount + name: tiller + namespace: {{ system_namespace }} +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml new file mode 100644 index 000000000..c840f57f8 --- /dev/null +++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tiller + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 785ef43af..7cfe9cc9a 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -64,4 +64,4 @@ apiserver_custom_flags: [] controller_mgr_custom_flags: [] -scheduler_custom_flags: [] \ No newline at end of file +scheduler_custom_flags: [] diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index dadef4bf5..6922e6a51 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -60,12 +60,11 @@ when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] tags: apps -- name: Write kube-controller-manager manifest +- name: Write kube-scheduler kubeconfig template: - src: manifests/kube-controller-manager.manifest.j2 - dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" - notify: Master | wait for kube-controller-manager - tags: kube-controller-manager + src: kube-scheduler-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" + tags: kube-scheduler - name: Write kube-scheduler manifest template: @@ -74,6 +73,19 @@ notify: Master | wait for kube-scheduler tags: kube-scheduler +- name: Write kube-controller-manager kubeconfig + template: + src: kube-controller-manager-kubeconfig.yaml.j2 + dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" + tags: kube-controller-manager + +- name: Write kube-controller-manager manifest + template: + src: manifests/kube-controller-manager.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" + notify: Master | wait for kube-controller-manager + tags: kube-controller-manager + - include: post-upgrade.yml tags: k8s-post-upgrade diff --git a/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 new file mode 100644 index 000000000..887d022c1 --- /dev/null +++ b/roles/kubernetes/master/templates/kube-controller-manager-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-controller-manager + user: + client-certificate: {{ kube_cert_dir }}/kube-controller-manager.pem + client-key: {{ kube_cert_dir }}/kube-controller-manager-key.pem +contexts: +- context: + cluster: local + user: kube-controller-manager + name: kube-controller-manager-{{ cluster_name }} +current-context: kube-controller-manager-{{ cluster_name }} diff --git a/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 new file mode 100644 index 000000000..974b72427 --- /dev/null +++ b/roles/kubernetes/master/templates/kube-scheduler-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-scheduler + user: + client-certificate: {{ kube_cert_dir }}/kube-scheduler.pem + client-key: {{ kube_cert_dir }}/kube-scheduler-key.pem +contexts: +- context: + cluster: local + user: kube-scheduler + name: kube-scheduler-{{ cluster_name }} +current-context: kube-scheduler-{{ cluster_name }} diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index bf4979596..24094fefb 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -81,6 +81,9 @@ spec: {% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} - --anonymous-auth={{ kube_api_anonymous_auth }} {% endif %} +{% if authorization_modes %} + - --authorization-mode={{ authorization_modes|join(',') }} +{% endif %} {% if apiserver_custom_flags is string %} - {{ apiserver_custom_flags }} {% else %} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index d3f8a23a5..a6b69fa14 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -24,7 +24,7 @@ spec: command: - /hyperkube - controller-manager - - --master={{ kube_apiserver_endpoint }} + - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml - --leader-elect=true - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem @@ -35,6 +35,9 @@ spec: - --node-monitor-period={{ kube_controller_node_monitor_period }} - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }} - --v={{ kube_log_level }} +{% if rbac_enabled %} + - --use-service-account-credentials +{% endif %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - --cloud-provider={{cloud_provider}} - --cloud-config={{ kube_config_dir }}/cloud_config @@ -61,20 +64,36 @@ spec: initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: - - mountPath: {{ kube_cert_dir }} - name: ssl-certs-kubernetes + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: "{{kube_config_dir}}/ssl" + name: etc-kube-ssl + readOnly: true + - mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" + name: kubeconfig readOnly: true {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %} - - mountPath: {{ kube_config_dir }}/cloud_config + - mountPath: "{{ kube_config_dir }}/cloud_config" name: cloudconfig readOnly: true {% endif %} volumes: - - hostPath: - path: {{ kube_cert_dir }} - name: ssl-certs-kubernetes + - name: ssl-certs-host + hostPath: +{% if ansible_os_family == 'RedHat' %} + path: /etc/pki/tls +{% else %} + path: /usr/share/ca-certificates +{% endif %} + - name: etc-kube-ssl + hostPath: + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig + hostPath: + path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml" {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - hostPath: - path: {{ kube_config_dir }}/cloud_config + path: "{{ kube_config_dir }}/cloud_config" name: cloudconfig {% endif %} diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 441f991eb..fdc16bf7f 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-scheduler - namespace: kube-system + namespace: {{ system_namespace }} labels: k8s-app: kube-scheduler spec: @@ -25,7 +25,7 @@ spec: - /hyperkube - scheduler - --leader-elect=true - - --master={{ kube_apiserver_endpoint }} + - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml - --v={{ kube_log_level }} {% if scheduler_custom_flags is string %} - {{ scheduler_custom_flags }} @@ -41,3 +41,27 @@ spec: port: 10251 initialDelaySeconds: 30 timeoutSeconds: 10 + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: "{{ kube_config_dir }}/ssl" + name: etc-kube-ssl + readOnly: true + - mountPath: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" + name: kubeconfig + readOnly: true + volumes: + - name: ssl-certs-host + hostPath: +{% if ansible_os_family == 'RedHat' %} + path: /etc/pki/tls +{% else %} + path: /usr/share/ca-certificates +{% endif %} + - name: etc-kube-ssl + hostPath: + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig + hostPath: + path: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml" diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index cb7a10c65..ad4cbacf1 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -16,7 +16,7 @@ - include: "install_{{ kubelet_deployment_type }}.yml" - name: install | Write kubelet systemd init file - template: + template: src: "kubelet.{{ kubelet_deployment_type }}.service.j2" dest: "/etc/systemd/system/kubelet.service" backup: "yes" diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index f09845f76..e0558f8cd 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -30,9 +30,12 @@ - name: write the kubecfg (auth) file for kubelet template: - src: node-kubeconfig.yaml.j2 - dest: "{{ kube_config_dir }}/node-kubeconfig.yaml" + src: "{{ item }}-kubeconfig.yaml.j2" + dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml" backup: yes + with_items: + - node + - kube-proxy notify: restart kubelet tags: kubelet diff --git a/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 new file mode 100644 index 000000000..18c47cd3e --- /dev/null +++ b/roles/kubernetes/node/templates/kube-proxy-kubeconfig.yaml.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem + server: {{ kube_apiserver_endpoint }} +users: +- name: kube-proxy + user: + client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem + client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem +contexts: +- context: + cluster: local + user: kube-proxy + name: kube-proxy-{{ cluster_name }} +current-context: kube-proxy-{{ cluster_name }} diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 9b7d53857..65feeee65 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -25,10 +25,7 @@ spec: - /hyperkube - proxy - --v={{ kube_log_level }} - - --master={{ kube_apiserver_endpoint }} -{% if not is_kube_master %} - - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml -{% endif %} + - --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml - --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --cluster-cidr={{ kube_pods_subnet }} - --proxy-mode={{ kube_proxy_mode }} @@ -41,14 +38,14 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml - name: "kubeconfig" + - mountPath: "{{ kube_config_dir }}/ssl" + name: etc-kube-ssl readOnly: true - - mountPath: {{kube_config_dir}}/ssl - name: "etc-kube-ssl" + - mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml" + name: kubeconfig readOnly: true - mountPath: /var/run/dbus - name: "var-run-dbus" + name: var-run-dbus readOnly: false volumes: - name: ssl-certs-host @@ -58,12 +55,12 @@ spec: {% else %} path: /usr/share/ca-certificates {% endif %} - - name: "kubeconfig" + - name: etc-kube-ssl hostPath: - path: "{{kube_config_dir}}/node-kubeconfig.yaml" - - name: "etc-kube-ssl" + path: "{{ kube_config_dir }}/ssl" + - name: kubeconfig hostPath: - path: "{{kube_config_dir}}/ssl" - - name: "var-run-dbus" + path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml" + - name: var-run-dbus hostPath: - path: "/var/run/dbus" + path: /var/run/dbus diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 03057829d..edfac2e2e 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -23,7 +23,7 @@ {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%} https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} {%- elif is_kube_master -%} - http://127.0.0.1:{{ kube_apiserver_insecure_port }} + https://127.0.0.1:{{ kube_apiserver_port }} {%- else -%} {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 55ea13d1e..e8574cc6b 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -72,32 +72,47 @@ else openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 fi +gen_key_and_cert() { + local name=$1 + local subject=$2 + openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1 + openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 +} + if [ ! -e "$SSLDIR/ca-key.pem" ]; then - # kube-apiserver key - openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 + # kube-apiserver + gen_key_and_cert "apiserver" "/CN=kube-apiserver" cat ca.pem >> apiserver.pem + # kube-scheduler + gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler" + # kube-controller-manager + gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager" fi +# Admins if [ -n "$MASTERS" ]; then for host in $MASTERS; do cn="${host%%.*}" - # admin key - openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}/O=system:masters" > /dev/null 2>&1 - openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1 + # admin + gen_key_and_cert "admin-${host}" "/CN=kube-admin-${cn}/O=system:masters" + done +fi + +# Nodes +if [ -n "$HOSTS" ]; then + for host in $HOSTS; do + cn="${host%%.*}" + gen_key_and_cert "node-${host}" "/CN=system:node:${cn}/O=system:nodes" done fi -# Nodes and Admin +# system:kube-proxy if [ -n "$HOSTS" ]; then for host in $HOSTS; do cn="${host%%.*}" - # node key - openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1 - openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=kube-node-${cn}" > /dev/null 2>&1 - openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 > /dev/null 2>&1 + # kube-proxy + gen_key_and_cert "kube-proxy-${host}" "/CN=system:kube-proxy" done fi diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 8df2195bf..61d9c7826 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -56,24 +56,39 @@ - set_fact: all_master_certs: "['ca-key.pem', + 'apiserver.pem', + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', {% for node in groups['kube-master'] %} 'admin-{{ node }}.pem', 'admin-{{ node }}-key.pem', - 'apiserver.pem', - 'apiserver-key.pem', {% endfor %}]" my_master_certs: ['ca-key.pem', 'admin-{{ inventory_hostname }}.pem', 'admin-{{ inventory_hostname }}-key.pem', 'apiserver.pem', - 'apiserver-key.pem' + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', ] all_node_certs: "['ca.pem', {% for node in groups['k8s-cluster'] %} 'node-{{ node }}.pem', 'node-{{ node }}-key.pem', + 'kube-proxy-{{ node }}.pem', + 'kube-proxy-{{ node }}-key.pem', {% endfor %}]" - my_node_certs: ['ca.pem', 'node-{{ inventory_hostname }}.pem', 'node-{{ inventory_hostname }}-key.pem'] + my_node_certs: ['ca.pem', + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem', + 'kube-proxy-{{ inventory_hostname }}.pem', + 'kube-proxy-{{ inventory_hostname }}-key.pem', + ] tags: facts - name: Gen_certs | Gather master certs diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index c18afd39b..db5fc1997 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -114,3 +114,9 @@ vault_deployment_type: docker k8s_image_pull_policy: IfNotPresent efk_enabled: false enable_network_policy: false + +## List of authorization modes that must be configured for +## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and +## 'RBAC' modes are tested. +authorization_modes: ['AlwaysAllow'] +rbac_enabled: "{{ 'RBAC' in authorization_modes }}" From 8d2fc883360d54ed91bf5e2afe86ed54b0b57b6d Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Thu, 29 Jun 2017 11:11:22 +0800 Subject: [PATCH 02/19] add ci test for rbac --- .gitlab-ci.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 259c45614..ddeae95b6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -62,6 +62,7 @@ before_script: KUBELET_DEPLOYMENT: "docker" VAULT_DEPLOYMENT: "docker" WEAVE_CPU_LIMIT: "100m" + AUTHORIZATION_MODES: "{ 'authorization_modes': ['AlwaysAllow'] }" MAGIC: "ci check this" .gce: &gce @@ -132,6 +133,7 @@ before_script: -e local_release_dir=${PWD}/downloads -e resolvconf_mode=${RESOLVCONF_MODE} -e vault_deployment_type=${VAULT_DEPLOYMENT} + -e "${AUTHORIZATION_MODES}" --limit "all:!fake_hosts" cluster.yml @@ -373,6 +375,15 @@ before_script: CLUSTER_MODE: separate STARTUP_SCRIPT: "" +.ubuntu_calico_rbac_variables: &ubuntu_calico_rbac_variables +# stage: deploy-gce-special + AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" + KUBE_NETWORK_PLUGIN: calico + CLOUD_IMAGE: ubuntu-1604-xenial + CLOUD_REGION: europe-west1-b + CLUSTER_MODE: separate + STARTUP_SCRIPT: "" + # Builds for PRs only (premoderated by unit-tests step) and triggers (auto) coreos-calico-sep: stage: deploy-gce-part1 @@ -598,6 +609,17 @@ ubuntu-vault-sep: except: ['triggers'] only: ['master', /^pr-.*$/] +ubuntu-calico-rbac-sep: + stage: deploy-gce-special + <<: *job + <<: *gce + variables: + <<: *gce_variables + <<: *ubuntu_calico_rbac_variables + when: manual + except: ['triggers'] + only: ['master', /^pr-.*$/] + # Premoderated with manual actions ci-authorized: <<: *job From fd01377f12d22aa0bd5f9fccc8307e7373ad0ba3 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Thu, 29 Jun 2017 11:44:52 +0800 Subject: [PATCH 03/19] remove more bins when reset --- roles/reset/tasks/main.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index af3e66601..77140ba6a 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -79,11 +79,10 @@ - /etc/dnsmasq.d-available - /etc/etcd.env - /etc/calico + - /etc/weave.env - /opt/cni - /etc/dhcp/dhclient.d/zdnsupdate.sh - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate - - "{{ bin_dir }}/kubelet" - - "{{ bin_dir }}/kubernetes-scripts" - /run/flannel - /etc/flannel - /run/kubernetes @@ -92,6 +91,15 @@ - /etc/ssl/certs/kube-ca.pem - /etc/ssl/certs/etcd-ca.pem - /var/log/pods/ + - "{{ bin_dir }}/kubelet" + - "{{ bin_dir }}/etcd-scripts" + - "{{ bin_dir }}/etcd" + - "{{ bin_dir }}/etcdctl" + - "{{ bin_dir }}/kubernetes-scripts" + - "{{ bin_dir }}/kubectl" + - "{{ bin_dir }}/helm" + - "{{ bin_dir }}/calicoctl" + - "{{ bin_dir }}/weave" tags: ['files'] From c9734b6d7bcb8ba69295bba07bb54697fb992a93 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Tue, 4 Jul 2017 20:03:55 +0800 Subject: [PATCH 04/19] run calico-policy-controller with proper sa/role/rolebinding --- .../policy_controller/calico/tasks/main.yml | 25 +++++++++++++++++++ .../calico-policy-controller-clusterrole.yml | 16 ++++++++++++ ...o-policy-controller-clusterrolebinding.yml | 12 +++++++++ .../templates/calico-policy-controller-sa.yml | 7 ++++++ .../templates/calico-policy-controller.yml.j2 | 3 +++ 5 files changed, 63 insertions(+) create mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml create mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml create mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index 8b4271d6a..02aac8988 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -3,6 +3,31 @@ when: kube_network_plugin == 'canal' tags: [facts, canal] +- name: Lay Down calico-policy-controller Template + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: calico-policy-controller, file: calico-policy-controller-sa.yml, type: sa} + - {name: calico-policy-controller, file: calico-policy-controller-clusterrole.yml, type: clusterrole} + - {name: calico-policy-controller, file: calico-policy-controller-clusterrolebinding.yml, type: clusterrolebinding} + register: manifests + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled + tags: canal + +- name: Create calico-policy-controller Resources + kube: + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled + tags: canal + - name: Write calico-policy-controller yaml template: src: calico-policy-controller.yml.j2 diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml new file mode 100644 index 000000000..3b71b9001 --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml @@ -0,0 +1,16 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller + namespace: {{ system_namespace }} +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + verbs: + - watch + - list diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml new file mode 100644 index 000000000..535865f01 --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-policy-controller +subjects: +- kind: ServiceAccount + name: calico-policy-controller + namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml new file mode 100644 index 000000000..388f12977 --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-policy-controller + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 index 322d3a37b..9639fed82 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 @@ -60,3 +60,6 @@ spec: - hostPath: path: {{ calico_cert_dir }} name: etcd-certs +{% if rbac_enabled %} + serviceAccountName: calico-policy-controller +{% endif %} From 2cda982345405739dd56180ffe301188d44f235f Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 7 Jul 2017 15:43:48 +0800 Subject: [PATCH 05/19] binding group system:nodes to clusterrole calico-role --- roles/download/tasks/main.yml | 2 +- .../policy_controller/calico/tasks/main.yml | 4 ++-- roles/network_plugin/calico/tasks/main.yml | 22 +++++++++++++++++++ .../templates/calico-node-clusterrole.yml | 12 ++++++++++ .../calico-node-clusterrolebinding.yml | 12 ++++++++++ 5 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 roles/network_plugin/calico/templates/calico-node-clusterrole.yml create mode 100644 roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 24d1b5bca..cc244619e 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -191,7 +191,7 @@ src: "{{ fname }}" dest: "{{ fname }}" mode: push - delegate_to: localhost + #delegate_to: localhost become: false register: get_task until: get_task|succeeded diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index 02aac8988..18ac8c18c 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -3,7 +3,7 @@ when: kube_network_plugin == 'canal' tags: [facts, canal] -- name: Lay Down calico-policy-controller Template +- name: Lay Down calico-policy-controller RBAC Template template: src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" @@ -15,7 +15,7 @@ when: inventory_hostname == groups['kube-master'][0] and rbac_enabled tags: canal -- name: Create calico-policy-controller Resources +- name: Create calico-policy-controller RBAC Resources kube: name: "{{item.item.name}}" namespace: "{{ system_namespace }}" diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 38d3ad5db..59ae25e17 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -195,6 +195,28 @@ when: secret_changed|default(false) or etcd_secret_changed|default(false) notify: restart calico-node +- name: Lay Down calico-node RBAC Template + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: calico-node, file: calico-node-clusterrole.yml, type: clusterrole} + - {name: calico-node, file: calico-node-clusterrolebinding.yml, type: clusterrolebinding} + register: manifests + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled + +- name: Create calico-node RBAC Resources + kube: + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled + - meta: flush_handlers - name: Calico | Enable calico-node diff --git a/roles/network_plugin/calico/templates/calico-node-clusterrole.yml b/roles/network_plugin/calico/templates/calico-node-clusterrole.yml new file mode 100644 index 000000000..b48c74735 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node-clusterrole.yml @@ -0,0 +1,12 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node + namespace: {{ system_namespace }} +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get diff --git a/roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml b/roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml new file mode 100644 index 000000000..cdbd15685 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: Group + name: system:nodes + namespace: kube-system From 1eaf0e1c637db528a7facbb1ecc43236fd063348 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 7 Jul 2017 17:30:41 +0800 Subject: [PATCH 06/19] rename task --- roles/network_plugin/calico/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 59ae25e17..a67cb7fca 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -195,7 +195,7 @@ when: secret_changed|default(false) or etcd_secret_changed|default(false) notify: restart calico-node -- name: Lay Down calico-node RBAC Template +- name: Calico | Lay Down calico-node RBAC Template template: src: "{{item.file}}" dest: "{{kube_config_dir}}/{{item.file}}" @@ -205,7 +205,7 @@ register: manifests when: inventory_hostname == groups['kube-master'][0] and rbac_enabled -- name: Create calico-node RBAC Resources +- name: Calico | Create calico-node RBAC Resources kube: name: "{{item.item.name}}" namespace: "{{ system_namespace }}" From cea3e224aa7b6ac028a35051c9b4f18ae05342d5 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 7 Jul 2017 17:31:11 +0800 Subject: [PATCH 07/19] change authorization_modes default value --- roles/kubespray-defaults/defaults/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index db5fc1997..ed827d27b 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -118,5 +118,5 @@ enable_network_policy: false ## List of authorization modes that must be configured for ## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and ## 'RBAC' modes are tested. -authorization_modes: ['AlwaysAllow'] +authorization_modes: [] rbac_enabled: "{{ 'RBAC' in authorization_modes }}" From 0b3badf3d853f5de0a83bf8effaef0b0423def3b Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 10 Jul 2017 16:53:11 +0800 Subject: [PATCH 08/19] revert calico-related changes --- .../policy_controller/calico/tasks/main.yml | 25 ------------------- .../calico-policy-controller-clusterrole.yml | 16 ------------ ...o-policy-controller-clusterrolebinding.yml | 12 --------- .../templates/calico-policy-controller-sa.yml | 7 ------ .../templates/calico-policy-controller.yml.j2 | 3 --- roles/kubespray-defaults/defaults/main.yaml | 2 +- roles/network_plugin/calico/tasks/main.yml | 22 ---------------- .../templates/calico-node-clusterrole.yml | 12 --------- .../calico-node-clusterrolebinding.yml | 12 --------- 9 files changed, 1 insertion(+), 110 deletions(-) delete mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml delete mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml delete mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml delete mode 100644 roles/network_plugin/calico/templates/calico-node-clusterrole.yml delete mode 100644 roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index 18ac8c18c..8b4271d6a 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -3,31 +3,6 @@ when: kube_network_plugin == 'canal' tags: [facts, canal] -- name: Lay Down calico-policy-controller RBAC Template - template: - src: "{{item.file}}" - dest: "{{kube_config_dir}}/{{item.file}}" - with_items: - - {name: calico-policy-controller, file: calico-policy-controller-sa.yml, type: sa} - - {name: calico-policy-controller, file: calico-policy-controller-clusterrole.yml, type: clusterrole} - - {name: calico-policy-controller, file: calico-policy-controller-clusterrolebinding.yml, type: clusterrolebinding} - register: manifests - when: inventory_hostname == groups['kube-master'][0] and rbac_enabled - tags: canal - -- name: Create calico-policy-controller RBAC Resources - kube: - name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.item.type}}" - filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" - with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg - when: inventory_hostname == groups['kube-master'][0] and rbac_enabled - tags: canal - - name: Write calico-policy-controller yaml template: src: calico-policy-controller.yml.j2 diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml deleted file mode 100644 index 3b71b9001..000000000 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrole.yml +++ /dev/null @@ -1,16 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-policy-controller - namespace: {{ system_namespace }} -rules: - - apiGroups: - - "" - - extensions - resources: - - pods - - namespaces - - networkpolicies - verbs: - - watch - - list diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml deleted file mode 100644 index 535865f01..000000000 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-clusterrolebinding.yml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-policy-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-policy-controller -subjects: -- kind: ServiceAccount - name: calico-policy-controller - namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml deleted file mode 100644 index 388f12977..000000000 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller-sa.yml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-policy-controller - namespace: {{ system_namespace }} - labels: - kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 index 9639fed82..322d3a37b 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 @@ -60,6 +60,3 @@ spec: - hostPath: path: {{ calico_cert_dir }} name: etcd-certs -{% if rbac_enabled %} - serviceAccountName: calico-policy-controller -{% endif %} diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index ed827d27b..db5fc1997 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -118,5 +118,5 @@ enable_network_policy: false ## List of authorization modes that must be configured for ## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and ## 'RBAC' modes are tested. -authorization_modes: [] +authorization_modes: ['AlwaysAllow'] rbac_enabled: "{{ 'RBAC' in authorization_modes }}" diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index a67cb7fca..38d3ad5db 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -195,28 +195,6 @@ when: secret_changed|default(false) or etcd_secret_changed|default(false) notify: restart calico-node -- name: Calico | Lay Down calico-node RBAC Template - template: - src: "{{item.file}}" - dest: "{{kube_config_dir}}/{{item.file}}" - with_items: - - {name: calico-node, file: calico-node-clusterrole.yml, type: clusterrole} - - {name: calico-node, file: calico-node-clusterrolebinding.yml, type: clusterrolebinding} - register: manifests - when: inventory_hostname == groups['kube-master'][0] and rbac_enabled - -- name: Calico | Create calico-node RBAC Resources - kube: - name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.item.type}}" - filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" - with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg - when: inventory_hostname == groups['kube-master'][0] and rbac_enabled - - meta: flush_handlers - name: Calico | Enable calico-node diff --git a/roles/network_plugin/calico/templates/calico-node-clusterrole.yml b/roles/network_plugin/calico/templates/calico-node-clusterrole.yml deleted file mode 100644 index b48c74735..000000000 --- a/roles/network_plugin/calico/templates/calico-node-clusterrole.yml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node - namespace: {{ system_namespace }} -rules: - - apiGroups: [""] - resources: - - pods - - nodes - verbs: - - get diff --git a/roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml b/roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml deleted file mode 100644 index cdbd15685..000000000 --- a/roles/network_plugin/calico/templates/calico-node-clusterrolebinding.yml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: Group - name: system:nodes - namespace: kube-system From 552b2f06357a2add22d63e824ba365f8d2ad0439 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Fri, 7 Jul 2017 17:31:11 +0800 Subject: [PATCH 09/19] change authorization_modes default value --- .gitlab-ci.yml | 2 +- docs/vars.md | 2 +- roles/kubespray-defaults/defaults/main.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ddeae95b6..d51c6887c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -62,7 +62,7 @@ before_script: KUBELET_DEPLOYMENT: "docker" VAULT_DEPLOYMENT: "docker" WEAVE_CPU_LIMIT: "100m" - AUTHORIZATION_MODES: "{ 'authorization_modes': ['AlwaysAllow'] }" + AUTHORIZATION_MODES: "{ 'authorization_modes': [] }" MAGIC: "ci check this" .gce: &gce diff --git a/docs/vars.md b/docs/vars.md index 46684395f..dd9988715 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -69,7 +69,7 @@ following default cluster paramters: Kubernetes * *authorization_modes* - A list of [authorization mode]( https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) - that the cluster should be configured for. Defaults to `['AlwaysAllow']`. + that the cluster should be configured for. Defaults to `[]` (i.e. no authorization). Note: Only `AlwaysAllow`, `AlwaysDeny` and `RBAC` are tested. Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index db5fc1997..ed827d27b 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -118,5 +118,5 @@ enable_network_policy: false ## List of authorization modes that must be configured for ## the k8s cluster. Only 'AlwaysAllow','AlwaysDeny', and ## 'RBAC' modes are tested. -authorization_modes: ['AlwaysAllow'] +authorization_modes: [] rbac_enabled: "{{ 'RBAC' in authorization_modes }}" From a5b84a47b00f78bc389a9f2a3cf74b7d4e0ff0b7 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 10 Jul 2017 17:18:37 +0800 Subject: [PATCH 10/19] docs: experimental, no calico/vault --- docs/vars.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/vars.md b/docs/vars.md index dd9988715..a14ce08c0 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -70,7 +70,8 @@ following default cluster paramters: * *authorization_modes* - A list of [authorization mode]( https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) that the cluster should be configured for. Defaults to `[]` (i.e. no authorization). - Note: Only `AlwaysAllow`, `AlwaysDeny` and `RBAC` are tested. + Note: `RBAC` is currently in experimental phase, and do not support either calico or + vault. Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` From d24520161416cc586cfe0968fe66afc4722c4c0b Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 10 Jul 2017 17:19:34 +0800 Subject: [PATCH 11/19] test: change ubuntu_calico_rbac to ubuntu_flannel_rbac --- .gitlab-ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d51c6887c..7e9a407bf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -375,10 +375,10 @@ before_script: CLUSTER_MODE: separate STARTUP_SCRIPT: "" -.ubuntu_calico_rbac_variables: &ubuntu_calico_rbac_variables +.ubuntu_flannel_rbac_variables: &ubuntu_flannel_rbac_variables # stage: deploy-gce-special AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" - KUBE_NETWORK_PLUGIN: calico + KUBE_NETWORK_PLUGIN: flannel CLOUD_IMAGE: ubuntu-1604-xenial CLOUD_REGION: europe-west1-b CLUSTER_MODE: separate @@ -609,13 +609,13 @@ ubuntu-vault-sep: except: ['triggers'] only: ['master', /^pr-.*$/] -ubuntu-calico-rbac-sep: +ubuntu-flannel-rbac-sep: stage: deploy-gce-special <<: *job <<: *gce variables: <<: *gce_variables - <<: *ubuntu_calico_rbac_variables + <<: *ubuntu_flannel_rbac_variables when: manual except: ['triggers'] only: ['master', /^pr-.*$/] From 5fa31eaead72e005843ceeaa0481f35a505bb43f Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 10 Jul 2017 17:33:42 +0800 Subject: [PATCH 12/19] add '-e "${AUTHORIZATION_MODES}"' for all cluster.yml --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7e9a407bf..b7f3f7f4a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -162,6 +162,7 @@ before_script: -e resolvconf_mode=${RESOLVCONF_MODE} -e weave_cpu_requests=${WEAVE_CPU_LIMIT} -e weave_cpu_limit=${WEAVE_CPU_LIMIT} + -e "${AUTHORIZATION_MODES}" --limit "all:!fake_hosts" $PLAYBOOK; fi @@ -192,6 +193,7 @@ before_script: -e etcd_deployment_type=${ETCD_DEPLOYMENT} -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + -e "${AUTHORIZATION_MODES}" --limit "all:!fake_hosts" cluster.yml; fi @@ -234,6 +236,7 @@ before_script: -e etcd_deployment_type=${ETCD_DEPLOYMENT} -e kubedns_min_replicas=1 -e kubelet_deployment_type=${KUBELET_DEPLOYMENT} + -e "${AUTHORIZATION_MODES}" --limit "all:!fake_hosts" cluster.yml; fi From d8dcb8f6e028ad8fa487fae8eb85ef7516f55d07 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 10 Jul 2017 18:53:59 +0800 Subject: [PATCH 13/19] no need to patch system:kube-dns --- roles/kubernetes-apps/ansible/tasks/main.yml | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 00a1fd74d..421cdec79 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -26,23 +26,6 @@ - rbac_enabled or item.type not in kubedns_rbac_resources tags: dnsmasq -# see https://github.com/kubernetes/kubernetes/issues/45084 -# TODO: this is only needed for "old" kube-dns -- name: Kubernetes Apps | Patch system:kube-dns ClusterRole - command: > - {{bin_dir}}/kubectl patch clusterrole system:kube-dns - --patch='{ - "rules": [ - { - "apiGroups" : [""], - "resources" : ["endpoints", "services"], - "verbs": ["list", "watch", "get"] - } - ] - }' - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled - tags: dnsmasq - - name: Kubernetes Apps | Start Resources kube: name: "{{item.item.name}}" From 83deecb9e9a02c8e2252cea864a33fb583c9c58b Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 10 Jul 2017 19:05:42 +0800 Subject: [PATCH 14/19] Revert "no need to patch system:kube-dns" This reverts commit c2ea8c588aa5c3879f402811d3599a7bb3ccab24. --- roles/kubernetes-apps/ansible/tasks/main.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 421cdec79..00a1fd74d 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -26,6 +26,23 @@ - rbac_enabled or item.type not in kubedns_rbac_resources tags: dnsmasq +# see https://github.com/kubernetes/kubernetes/issues/45084 +# TODO: this is only needed for "old" kube-dns +- name: Kubernetes Apps | Patch system:kube-dns ClusterRole + command: > + {{bin_dir}}/kubectl patch clusterrole system:kube-dns + --patch='{ + "rules": [ + { + "apiGroups" : [""], + "resources" : ["endpoints", "services"], + "verbs": ["list", "watch", "get"] + } + ] + }' + when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled + tags: dnsmasq + - name: Kubernetes Apps | Start Resources kube: name: "{{item.item.name}}" From e1386ba6047ad99e530bdefbc50791beff2526ea Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 10 Jul 2017 19:14:28 +0800 Subject: [PATCH 15/19] only patch system:kube-dns role for old dns --- roles/kubernetes-apps/ansible/tasks/main.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 00a1fd74d..b76ec5b07 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -26,8 +26,7 @@ - rbac_enabled or item.type not in kubedns_rbac_resources tags: dnsmasq -# see https://github.com/kubernetes/kubernetes/issues/45084 -# TODO: this is only needed for "old" kube-dns +# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns - name: Kubernetes Apps | Patch system:kube-dns ClusterRole command: > {{bin_dir}}/kubectl patch clusterrole system:kube-dns @@ -40,7 +39,9 @@ } ] }' - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled + when: + - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True) tags: dnsmasq - name: Kubernetes Apps | Start Resources From a8e6a0763d3594579d5c824dcfb37301c747fe30 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 17 Jul 2017 19:28:09 +0800 Subject: [PATCH 16/19] run netchecker-server with list pods --- roles/kubernetes-apps/ansible/defaults/main.yml | 4 ++-- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- roles/kubernetes-apps/ansible/tasks/netchecker.yml | 8 +++++++- .../templates/netchecker-server-clusterrole.yml.j2 | 9 +++++++++ .../netchecker-server-clusterrolebinding.yml.j2 | 13 +++++++++++++ .../templates/netchecker-server-deployment.yml.j2 | 3 +++ .../ansible/templates/netchecker-server-sa.yml.j2 | 7 +++++++ 7 files changed, 42 insertions(+), 4 deletions(-) create mode 100644 roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 create mode 100644 roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 create mode 100644 roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 97d1bcdc4..d42b2ffed 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -41,7 +41,7 @@ netchecker_server_memory_requests: 64M etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" -kubedns_rbac_resources: +rbac_resources: + - sa - clusterrole - clusterrolebinding - - sa diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index b76ec5b07..e7bd934de 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -23,7 +23,7 @@ register: manifests when: - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] - - rbac_enabled or item.type not in kubedns_rbac_resources + - rbac_enabled or item.type not in rbac_resources tags: dnsmasq # see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index aae75d091..d4bfb7a4f 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -5,10 +5,15 @@ with_items: - {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent} - {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet} + - {file: netchecker-server-sa.yml.j2, type: sa, name: netchecker-server} + - {file: netchecker-server-clusterrole.yml.j2, type: clusterrole, name: netchecker-server} + - {file: netchecker-server-clusterrolebinding.yml.j2, type: clusterrolebinding, name: netchecker-server} - {file: netchecker-server-deployment.yml.j2, type: po, name: netchecker-server} - {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service} register: manifests - when: inventory_hostname == groups['kube-master'][0] + when: + - inventory_hostname == groups['kube-master'][0] + - rbac_enabled or item.type not in rbac_resources #FIXME: remove if kubernetes/features#124 is implemented - name: Kubernetes Apps | Purge old Netchecker daemonsets @@ -31,4 +36,5 @@ filename: "{{kube_config_dir}}/{{item.item.file}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ manifests.results }}" + failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 new file mode 100644 index 000000000..7a8c1d273 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2 @@ -0,0 +1,9 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["list"] diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 new file mode 100644 index 000000000..54c1eaf94 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2 @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} +subjects: + - kind: ServiceAccount + name: netchecker-server + namespace: {{ netcheck_namespace }} +roleRef: + kind: ClusterRole + name: netchecker-server + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 index 6c52352fb..c3dbf3cb5 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 @@ -31,3 +31,6 @@ spec: - "-logtostderr" - "-kubeproxyinit" - "-endpoint=0.0.0.0:8081" +{% if rbac_enabled %} + serviceAccountName: netchecker-server +{% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 new file mode 100644 index 000000000..26d15f3a8 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2 @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: netchecker-server + namespace: {{ netcheck_namespace }} + labels: + kubernetes.io/cluster-service: "true" From 789910d8ebe31201873814c699b400f0ccee63f4 Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 17 Jul 2017 19:29:06 +0800 Subject: [PATCH 17/19] remote unused netchecker-agent-hostnet-ds.j2 --- .../templates/netchecker-agent-hostnet-ds.j2 | 43 ------------------- 1 file changed, 43 deletions(-) delete mode 100644 roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.j2 diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.j2 deleted file mode 100644 index 10a74da84..000000000 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.j2 +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - labels: - app: netchecker-agent-hostnet - name: netchecker-agent-hostnet - namespace: {{ netcheck_namespace }} -spec: - template: - metadata: - name: netchecker-agent-hostnet - labels: - app: netchecker-agent-hostnet - spec: - hostNetwork: True -{% if kube_version | version_compare('v1.6', '>=') %} - dnsPolicy: ClusterFirstWithHostNet -{% endif %} - containers: - - name: netchecker-agent - image: "{{ agent_img }}" - env: - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - args: - - "-v=5" - - "-alsologtostderr=true" - - "-serverendpoint=netchecker-service:8081" - - "-reportinterval={{ agent_report_interval }}" - imagePullPolicy: {{ k8s_image_pull_policy }} - resources: - limits: - cpu: {{ netchecker_agent_cpu_limit }} - memory: {{ netchecker_agent_memory_limit }} - requests: - cpu: {{ netchecker_agent_cpu_requests }} - memory: {{ netchecker_agent_memory_requests }} From 3d87f23bf5b7286817c101f668e7d0cc7f6dad8f Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Wed, 19 Jul 2017 12:11:47 +0800 Subject: [PATCH 18/19] uncomment unintended local changes --- roles/download/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index cc244619e..24d1b5bca 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -191,7 +191,7 @@ src: "{{ fname }}" dest: "{{ fname }}" mode: push - #delegate_to: localhost + delegate_to: localhost become: false register: get_task until: get_task|succeeded From 805d9f22ce61f9888aab162bcfe5f1fc62c49c2e Mon Sep 17 00:00:00 2001 From: jwfang <54740235@qq.com> Date: Mon, 24 Jul 2017 19:11:41 +0800 Subject: [PATCH 19/19] note upgrade from non-RBAC not supported --- docs/vars.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vars.md b/docs/vars.md index a14ce08c0..537aa5753 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -71,7 +71,7 @@ following default cluster paramters: https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) that the cluster should be configured for. Defaults to `[]` (i.e. no authorization). Note: `RBAC` is currently in experimental phase, and do not support either calico or - vault. + vault. Upgrade from non-RBAC to RBAC is not tested. Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses``