Browse Source

Significantly reduce memory requirements

Canal runs more pods and upgrades need a bit of extra
room to load new pods in and get the old ones out.
pull/1153/head
Matthew Mosesohn 8 years ago
parent
commit
e9a294fd9c
5 changed files with 9 additions and 9 deletions
  1. 2
      roles/dnsmasq/defaults/main.yml
  2. 6
      roles/kubernetes/master/defaults/main.yml
  3. 2
      roles/kubernetes/master/tasks/pre-upgrade.yml
  4. 2
      roles/kubernetes/node/defaults/main.yml
  5. 6
      roles/network_plugin/canal/defaults/main.yml

2
roles/dnsmasq/defaults/main.yml

@ -24,7 +24,7 @@ dnsmasq_image_tag: "{{ dnsmasq_version }}"
# Limits for dnsmasq/kubedns apps
dns_cpu_limit: 100m
dns_memory_limit: 170Mi
dns_cpu_requests: 70m
dns_cpu_requests: 40m
dns_memory_requests: 50Mi
# Autoscaler parameters

6
roles/kubernetes/master/defaults/main.yml

@ -19,7 +19,7 @@ kube_apiserver_storage_backend: etcd3
# Limits for kube components
kube_controller_memory_limit: 512M
kube_controller_cpu_limit: 250m
kube_controller_memory_requests: 170M
kube_controller_memory_requests: 100M
kube_controller_cpu_requests: 100m
kube_controller_node_monitor_grace_period: 40s
kube_controller_node_monitor_period: 5s
@ -27,11 +27,11 @@ kube_controller_pod_eviction_timeout: 5m0s
kube_scheduler_memory_limit: 512M
kube_scheduler_cpu_limit: 250m
kube_scheduler_memory_requests: 170M
kube_scheduler_cpu_requests: 100m
kube_scheduler_cpu_requests: 80m
kube_apiserver_memory_limit: 2000M
kube_apiserver_cpu_limit: 800m
kube_apiserver_memory_requests: 256M
kube_apiserver_cpu_requests: 300m
kube_apiserver_cpu_requests: 100m
## Enable/Disable Kube API Server Authentication Methods

2
roles/kubernetes/master/tasks/pre-upgrade.yml

@ -38,7 +38,7 @@
environment:
ETCDCTL_API: 2
register: old_data_exists
delegate_to: "{{groups['kube-master'][0]}}"
delegate_to: "{{groups['etcd'][0]}}"
when: kube_apiserver_storage_backend == "etcd3"
failed_when: false

2
roles/kubernetes/node/defaults/main.yml

@ -21,7 +21,7 @@ kube_proxy_cpu_requests: 150m
nginx_memory_limit: 512M
nginx_cpu_limit: 300m
nginx_memory_requests: 32M
nginx_cpu_requests: 50m
nginx_cpu_requests: 25m
# kube_api_runtime_config:
# - extensions/v1beta1/daemonsets=true

6
roles/network_plugin/canal/defaults/main.yml

@ -21,13 +21,13 @@ canal_policy_dir: /etc/kubernetes/policy
calico_node_memory_limit: 500M
calico_node_cpu_limit: 200m
calico_node_memory_requests: 64M
calico_node_cpu_requests: 100m
calico_node_cpu_requests: 50m
flannel_memory_limit: 500M
flannel_cpu_limit: 200m
flannel_memory_requests: 64M
flannel_cpu_requests: 100m
flannel_cpu_requests: 50m
calicoctl_memory_limit: 170M
calicoctl_cpu_limit: 100m
calicoctl_memory_requests: 32M
calicoctl_cpu_requests: 50m
calicoctl_cpu_requests: 25m
Loading…
Cancel
Save