Browse Source

Merge pull request #652 from kubernetes-incubator/debug_mode

Tune dnsmasq/kubedns limits, replicas, logging
pull/656/head
Bogdan Dobrelya 8 years ago
committed by GitHub
parent
commit
fbdda81515
11 changed files with 47 additions and 12 deletions
  1. 5
      docs/large-deployments.md
  2. 6
      roles/dnsmasq/defaults/main.yml
  3. 3
      roles/dnsmasq/templates/01-kube-dns.conf.j2
  4. 7
      roles/dnsmasq/templates/dnsmasq-ds.yml
  5. 7
      roles/kubernetes-apps/ansible/defaults/main.yml
  6. 21
      roles/kubernetes-apps/ansible/templates/kubedns-rc.yml
  7. 2
      roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
  8. 2
      roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2
  9. 2
      roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2
  10. 2
      roles/kubernetes/node/templates/kubelet.j2
  11. 2
      roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2

5
docs/large-deployments.md

@ -21,5 +21,10 @@ For a large scaled deployments, consider the following configuration changes:
load on a delegate (the first K8s master node) then retrying failed load on a delegate (the first K8s master node) then retrying failed
push or download operations. push or download operations.
* Tune parameters for DNS related applications (dnsmasq daemon set, kubedns
replication controller). Those are ``dns_replicas``, ``dns_cpu_limit``,
``dns_cpu_requests``, ``dns_memory_limit``, ``dns_memory_requests``.
Please note that limits must always be greater than or equal to requests.
For example, when deploying 200 nodes, you may want to run ansible with For example, when deploying 200 nodes, you may want to run ansible with
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``. ``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.

6
roles/dnsmasq/defaults/main.yml

@ -27,3 +27,9 @@ skip_dnsmasq: false
# Skip setting up dnsmasq daemonset # Skip setting up dnsmasq daemonset
skip_dnsmasq_k8s: "{{ skip_dnsmasq }}" skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi

3
roles/dnsmasq/templates/01-kube-dns.conf.j2

@ -20,6 +20,9 @@ server=169.254.169.254
server=8.8.4.4 server=8.8.4.4
{% endif %} {% endif %}
{% if kube_log_level == 4 %}
log-queries
{% endif %}
bogus-priv bogus-priv
no-resolv no-resolv
no-negcache no-negcache

7
roles/dnsmasq/templates/dnsmasq-ds.yml

@ -29,8 +29,11 @@ spec:
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
limits: limits:
cpu: 100m
memory: 256M
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
ports: ports:
- name: dns - name: dns
containerPort: 53 containerPort: 53

7
roles/kubernetes-apps/ansible/defaults/main.yml

@ -3,6 +3,13 @@ kubedns_version: 1.7
kubednsmasq_version: 1.3 kubednsmasq_version: 1.3
exechealthz_version: 1.1 exechealthz_version: 1.1
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_memory_requests: 70Mi
dns_replicas: 1
# Images # Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64" kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}" kubedns_image_tag: "{{ kubedns_version }}"

21
roles/kubernetes-apps/ansible/templates/kubedns-rc.yml

@ -8,7 +8,7 @@ metadata:
version: v19 version: v19
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
replicas: 1
replicas: {{ dns_replicas }}
selector: selector:
k8s-app: kubedns k8s-app: kubedns
version: v19 version: v19
@ -29,11 +29,11 @@ spec:
# guaranteed class. Currently, this container falls into the # guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it. # "burstable" category so the kubelet doesn't backoff from restarting it.
limits: limits:
cpu: 100m
memory: 170Mi
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests: requests:
cpu: 100m
memory: 70Mi
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
@ -56,6 +56,7 @@ spec:
# command = "/kube-dns" # command = "/kube-dns"
- --domain={{ dns_domain }}. - --domain={{ dns_domain }}.
- --dns-port=10053 - --dns-port=10053
- --v={{ kube_log_level }}
ports: ports:
- containerPort: 10053 - containerPort: 10053
name: dns-local name: dns-local
@ -66,11 +67,21 @@ spec:
- name: dnsmasq - name: dnsmasq
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}" image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }} imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ dns_cpu_limit }}
memory: {{ dns_memory_limit }}
requests:
cpu: {{ dns_cpu_requests }}
memory: {{ dns_memory_requests }}
args: args:
- --log-facility=- - --log-facility=-
- --cache-size=1000 - --cache-size=1000
- --no-resolv - --no-resolv
- --server=127.0.0.1#10053 - --server=127.0.0.1#10053
{% if kube_log_level == 4 %}
- --log-queries
{% endif %}
ports: ports:
- containerPort: 53 - containerPort: 53
name: dns name: dns

2
roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2

@ -41,7 +41,7 @@ spec:
{% if enable_network_policy is defined and enable_network_policy == True %} {% if enable_network_policy is defined and enable_network_policy == True %}
- --runtime-config=extensions/v1beta1/networkpolicies=true - --runtime-config=extensions/v1beta1/networkpolicies=true
{% endif %} {% endif %}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
- --allow-privileged=true - --allow-privileged=true
{% if cloud_provider is defined and cloud_provider == "openstack" %} {% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{ cloud_provider }} - --cloud-provider={{ cloud_provider }}

2
roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2

@ -19,7 +19,7 @@ spec:
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem
- --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }} - --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
{% if cloud_provider is defined and cloud_provider == "openstack" %} {% if cloud_provider is defined and cloud_provider == "openstack" %}
- --cloud-provider={{cloud_provider}} - --cloud-provider={{cloud_provider}}
- --cloud-config={{ kube_config_dir }}/cloud_config - --cloud-config={{ kube_config_dir }}/cloud_config

2
roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2

@ -16,7 +16,7 @@ spec:
- scheduler - scheduler
- --leader-elect=true - --leader-elect=true
- --master={{ kube_apiserver_endpoint }} - --master={{ kube_apiserver_endpoint }}
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
livenessProbe: livenessProbe:
httpGet: httpGet:
host: 127.0.0.1 host: 127.0.0.1

2
roles/kubernetes/node/templates/kubelet.j2

@ -5,7 +5,7 @@ KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
# logging to stderr means we get it in the systemd journal # logging to stderr means we get it in the systemd journal
KUBE_LOGGING="--logtostderr=true" KUBE_LOGGING="--logtostderr=true"
{% endif %} {% endif %}
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}" KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
# The port for the info server to serve on # The port for the info server to serve on

2
roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2

@ -14,7 +14,7 @@ spec:
command: command:
- /hyperkube - /hyperkube
- proxy - proxy
- --v={{ kube_log_level | default('2') }}
- --v={{ kube_log_level }}
- --master={{ kube_apiserver_endpoint }} - --master={{ kube_apiserver_endpoint }}
{% if not is_kube_master %} {% if not is_kube_master %}
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml

Loading…
Cancel
Save