Browse Source
Add CoreDNS support with various fixes
Add CoreDNS support with various fixes
Added CoreDNS to downloads Updated with labels. Should now work without RBAC too Fix DNS settings on hosts Rename CoreDNS service from kube-dns to coredns Add rotate based on http://edgeofsanity.net/rant/2017/12/20/systemd-resolved-is-broken.html Updated docs with CoreDNS info Added labels and fixed minor settings from official yaml file: https://github.com/kubernetes/kubernetes/blob/release-1.9/cluster/addons/dns/coredns.yaml.sed Added a secondary deployment and secondary service ip. This is to mitigate dns timeouts and create high resitency for failures. See discussion at 'https://github.com/coreos/coreos-kubernetes/issues/641#issuecomment-281174806' Set dns list correct. Thanks to @whereismyjetpack Only download KubeDNS or CoreDNS if selected Move dns cleanup to its own file and import tasks based on dns mode Fix install of KubeDNS when dnsmask_kubedns mode is selected Add new dns option coredns_dual for dual stack deployment. Added variable to configure replicas deployed. Updated docs for dual stack deployment. Removed rotate option in resolv.conf. Run DNS manifests for CoreDNS and KubeDNS Set skydns servers on dual stack deployment Use only one template for CoreDNS dual deployment Set correct cluster ip for the dns serverpull/2462/head
committed by
Andreas Kruger
20 changed files with 369 additions and 67 deletions
Split View
Diff Options
-
8docs/dns-stack.md
-
7docs/vars.md
-
8inventory/sample/group_vars/k8s-cluster.yml
-
6roles/docker/tasks/set_facts_dns.yml
-
17roles/download/defaults/main.yml
-
3roles/kubernetes-apps/ansible/defaults/main.yml
-
54roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
-
39roles/kubernetes-apps/ansible/tasks/coredns.yml
-
41roles/kubernetes-apps/ansible/tasks/kubedns.yml
-
69roles/kubernetes-apps/ansible/tasks/main.yml
-
19roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2
-
18roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2
-
22roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2
-
81roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2
-
9roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2
-
22roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2
-
4roles/kubernetes/node/templates/kubelet.kubeadm.env.j2
-
4roles/kubernetes/node/templates/kubelet.standard.env.j2
-
4roles/kubernetes/preinstall/tasks/set_resolv_facts.yml
-
1roles/kubespray-defaults/defaults/main.yaml
@ -0,0 +1,54 @@ |
|||
--- |
|||
- name: Kubernetes Apps | Delete old CoreDNS resources |
|||
kube: |
|||
name: "coredns" |
|||
namespace: "{{ system_namespace }}" |
|||
kubectl: "{{ bin_dir }}/kubectl" |
|||
resource: "{{ item }}" |
|||
state: absent |
|||
with_items: |
|||
- 'deploy' |
|||
- 'configmap' |
|||
- 'svc' |
|||
tags: |
|||
- upgrade |
|||
|
|||
- name: Kubernetes Apps | Delete kubeadm CoreDNS |
|||
kube: |
|||
name: "coredns" |
|||
namespace: "{{ system_namespace }}" |
|||
kubectl: "{{ bin_dir }}/kubectl" |
|||
resource: "deploy" |
|||
state: absent |
|||
when: |
|||
- kubeadm_enabled|default(false) |
|||
- kubeadm_init.changed|default(false) |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
|
|||
- name: Kubernetes Apps | Delete old KubeDNS resources |
|||
kube: |
|||
name: "kube-dns" |
|||
namespace: "{{ system_namespace }}" |
|||
kubectl: "{{ bin_dir }}/kubectl" |
|||
resource: "{{ item }}" |
|||
state: absent |
|||
with_items: |
|||
- 'deploy' |
|||
- 'svc' |
|||
tags: |
|||
- upgrade |
|||
|
|||
- name: Kubernetes Apps | Delete kubeadm KubeDNS |
|||
kube: |
|||
name: "kube-dns" |
|||
namespace: "{{ system_namespace }}" |
|||
kubectl: "{{ bin_dir }}/kubectl" |
|||
resource: "{{ item }}" |
|||
state: absent |
|||
with_items: |
|||
- 'deploy' |
|||
- 'svc' |
|||
when: |
|||
- kubeadm_enabled|default(false) |
|||
- kubeadm_init.changed|default(false) |
|||
- inventory_hostname == groups['kube-master'][0] |
@ -0,0 +1,39 @@ |
|||
--- |
|||
- name: Kubernetes Apps | Lay Down CoreDNS Template |
|||
template: |
|||
src: "{{ item.file }}.j2" |
|||
dest: "{{ kube_config_dir }}/{{ item.file }}" |
|||
with_items: |
|||
- { name: coredns, file: coredns-config.yml, type: configmap } |
|||
- { name: coredns, file: coredns-sa.yml, type: sa } |
|||
- { name: coredns, file: coredns-deployment.yml, type: deployment } |
|||
- { name: coredns, file: coredns-svc.yml, type: svc } |
|||
- { name: coredns, file: coredns-clusterrole.yml, type: clusterrole } |
|||
- { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding } |
|||
register: coredns_manifests |
|||
vars: |
|||
clusterIP: "{{ skydns_server }}" |
|||
when: |
|||
- dns_mode in ['coredns', 'coredns_dual'] |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
- rbac_enabled or item.type not in rbac_resources |
|||
tags: |
|||
- coredns |
|||
|
|||
- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template |
|||
template: |
|||
src: "{{ item.src }}.j2" |
|||
dest: "{{ kube_config_dir }}/{{ item.file }}" |
|||
with_items: |
|||
- { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment } |
|||
- { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc } |
|||
register: coredns_secondary_manifests |
|||
vars: |
|||
clusterIP: "{{ skydns_server_secondary }}" |
|||
coredns_ordinal_suffix: "-secondary" |
|||
when: |
|||
- dns_mode == 'coredns_dual' |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
- rbac_enabled or item.type not in rbac_resources |
|||
tags: |
|||
- coredns |
@ -0,0 +1,41 @@ |
|||
--- |
|||
|
|||
- name: Kubernetes Apps | Lay Down KubeDNS Template |
|||
template: |
|||
src: "{{ item.file }}.j2" |
|||
dest: "{{ kube_config_dir }}/{{ item.file }}" |
|||
with_items: |
|||
- { name: kube-dns, file: kubedns-sa.yml, type: sa } |
|||
- { name: kube-dns, file: kubedns-deploy.yml, type: deployment } |
|||
- { name: kube-dns, file: kubedns-svc.yml, type: svc } |
|||
- { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa } |
|||
- { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole } |
|||
- { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding } |
|||
- { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment } |
|||
register: kubedns_manifests |
|||
when: |
|||
- dns_mode in ['kubedns','dnsmasq_kubedns'] |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
- rbac_enabled or item.type not in rbac_resources |
|||
tags: |
|||
- dnsmasq |
|||
|
|||
# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns |
|||
- name: Kubernetes Apps | Patch system:kube-dns ClusterRole |
|||
command: > |
|||
{{ bin_dir }}/kubectl patch clusterrole system:kube-dns |
|||
--patch='{ |
|||
"rules": [ |
|||
{ |
|||
"apiGroups" : [""], |
|||
"resources" : ["endpoints", "services"], |
|||
"verbs": ["list", "watch", "get"] |
|||
} |
|||
] |
|||
}' |
|||
when: |
|||
- dns_mode in ['kubedns', 'dnsmasq_kubedns'] |
|||
- inventory_hostname == groups['kube-master'][0] |
|||
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True) |
|||
tags: |
|||
- dnsmasq |
@ -0,0 +1,19 @@ |
|||
--- |
|||
apiVersion: rbac.authorization.k8s.io/v1beta1 |
|||
kind: ClusterRole |
|||
metadata: |
|||
labels: |
|||
kubernetes.io/bootstrapping: rbac-defaults |
|||
addonmanager.kubernetes.io/mode: Reconcile |
|||
name: system:coredns |
|||
rules: |
|||
- apiGroups: |
|||
- "" |
|||
resources: |
|||
- endpoints |
|||
- services |
|||
- pods |
|||
- namespaces |
|||
verbs: |
|||
- list |
|||
- watch |
@ -0,0 +1,18 @@ |
|||
--- |
|||
apiVersion: rbac.authorization.k8s.io/v1beta1 |
|||
kind: ClusterRoleBinding |
|||
metadata: |
|||
annotations: |
|||
rbac.authorization.kubernetes.io/autoupdate: "true" |
|||
labels: |
|||
kubernetes.io/bootstrapping: rbac-defaults |
|||
addonmanager.kubernetes.io/mode: EnsureExists |
|||
name: system:coredns |
|||
roleRef: |
|||
apiGroup: rbac.authorization.k8s.io |
|||
kind: ClusterRole |
|||
name: system:coredns |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: coredns |
|||
namespace: {{ system_namespace }} |
@ -0,0 +1,22 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: ConfigMap |
|||
metadata: |
|||
name: coredns |
|||
namespace: {{ system_namespace }} |
|||
labels: |
|||
addonmanager.kubernetes.io/mode: EnsureExists |
|||
data: |
|||
Corefile: | |
|||
.:53 { |
|||
errors |
|||
health |
|||
kubernetes {{ cluster_name }} in-addr.arpa ip6.arpa { |
|||
pods insecure |
|||
upstream /etc/resolv.conf |
|||
fallthrough in-addr.arpa ip6.arpa |
|||
} |
|||
prometheus :9153 |
|||
proxy . /etc/resolv.conf |
|||
cache 30 |
|||
} |
@ -0,0 +1,81 @@ |
|||
--- |
|||
apiVersion: extensions/v1beta1 |
|||
kind: Deployment |
|||
metadata: |
|||
name: coredns{{ coredns_ordinal_suffix | default('') }} |
|||
namespace: {{ system_namespace }} |
|||
labels: |
|||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} |
|||
kubernetes.io/cluster-service: "true" |
|||
addonmanager.kubernetes.io/mode: Reconcile |
|||
kubernetes.io/name: "CoreDNS" |
|||
spec: |
|||
replicas: {{ coredns_replicas }} |
|||
strategy: |
|||
type: RollingUpdate |
|||
rollingUpdate: |
|||
maxUnavailable: 0 |
|||
maxSurge: 10% |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} |
|||
template: |
|||
metadata: |
|||
labels: |
|||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} |
|||
annotations: |
|||
scheduler.alpha.kubernetes.io/critical-pod: '' |
|||
spec: |
|||
{% if rbac_enabled %} |
|||
serviceAccountName: coredns |
|||
{% endif %} |
|||
tolerations: |
|||
- key: node-role.kubernetes.io/master |
|||
effect: NoSchedule |
|||
- key: "CriticalAddonsOnly" |
|||
operator: "Exists" |
|||
containers: |
|||
- name: coredns |
|||
image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}" |
|||
imagePullPolicy: {{ k8s_image_pull_policy }} |
|||
resources: |
|||
# TODO: Set memory limits when we've profiled the container for large |
|||
# clusters, then set request = limit to keep this container in |
|||
# guaranteed class. Currently, this container falls into the |
|||
# "burstable" category so the kubelet doesn't backoff from restarting it. |
|||
limits: |
|||
memory: {{ dns_memory_limit }} |
|||
requests: |
|||
cpu: {{ dns_cpu_requests }} |
|||
memory: {{ dns_memory_requests }} |
|||
args: [ "-conf", "/etc/coredns/Corefile" ] |
|||
volumeMounts: |
|||
- name: config-volume |
|||
mountPath: /etc/coredns |
|||
ports: |
|||
- containerPort: 53 |
|||
name: dns |
|||
protocol: UDP |
|||
- containerPort: 53 |
|||
name: dns-tcp |
|||
protocol: TCP |
|||
- containerPort: 9153 |
|||
name: metrics |
|||
protocol: TCP |
|||
livenessProbe: |
|||
httpGet: |
|||
path: /health |
|||
port: 8080 |
|||
scheme: HTTP |
|||
initialDelaySeconds: 60 |
|||
timeoutSeconds: 5 |
|||
successThreshold: 1 |
|||
failureThreshold: 5 |
|||
dnsPolicy: Default |
|||
volumes: |
|||
- name: config-volume |
|||
configMap: |
|||
name: coredns |
|||
items: |
|||
- key: Corefile |
|||
path: Corefile |
@ -0,0 +1,9 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: ServiceAccount |
|||
metadata: |
|||
name: coredns |
|||
namespace: {{ system_namespace }} |
|||
labels: |
|||
kubernetes.io/cluster-service: "true" |
|||
addonmanager.kubernetes.io/mode: Reconcile |
@ -0,0 +1,22 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: Service |
|||
metadata: |
|||
name: coredns{{ coredns_ordinal_suffix | default('') }} |
|||
namespace: {{ system_namespace }} |
|||
labels: |
|||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} |
|||
kubernetes.io/cluster-service: "true" |
|||
addonmanager.kubernetes.io/mode: Reconcile |
|||
kubernetes.io/name: "CoreDNS" |
|||
spec: |
|||
selector: |
|||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} |
|||
clusterIP: {{ clusterIP }} |
|||
ports: |
|||
- name: dns |
|||
port: 53 |
|||
protocol: UDP |
|||
- name: dns-tcp |
|||
port: 53 |
|||
protocol: TCP |
Write
Preview
Loading…
Cancel
Save