You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

685 lines
26 KiB

  1. ---
  2. # Use proxycommand if bastion host is in group all
  3. # This change obseletes editing ansible.cfg file depending on bastion existence
  4. ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p -p {{ hostvars['bastion']['ansible_port'] | default(22) }} {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
  5. # selinux state
  6. preinstall_selinux_state: permissive
  7. # Setting this value to false will fail
  8. # For details, read this comment https://github.com/kubernetes-sigs/kubespray/pull/11016#issuecomment-2004985001
  9. kube_api_anonymous_auth: true
  10. # Default value, but will be set to true automatically if detected
  11. is_fedora_coreos: false
  12. # Swap settings
  13. kubelet_fail_swap_on: true
  14. kubelet_swap_behavior: LimitedSwap
  15. ## Change this to use another Kubernetes version, e.g. a current beta release
  16. kube_version: v1.30.4
  17. ## The minimum version working
  18. kube_version_min_required: v1.28.0
  19. ## Kube Proxy mode One of ['iptables', 'ipvs']
  20. kube_proxy_mode: ipvs
  21. ## The timeout for init first control-plane
  22. kubeadm_init_timeout: 300s
  23. # TODO: remove this
  24. kube_reserved_cgroups_for_service_slice: kube.slice
  25. ## List of kubeadm init phases that should be skipped during control plane setup
  26. ## By default 'addon/coredns' is skipped
  27. ## 'addon/kube-proxy' gets skipped for some network plugins
  28. kubeadm_init_phases_skip_default: [ "addon/coredns" ]
  29. kubeadm_init_phases_skip: >-
  30. {%- if kube_network_plugin == 'kube-router' and (kube_router_run_service_proxy is defined and kube_router_run_service_proxy) -%}
  31. {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
  32. {%- elif kube_network_plugin == 'cilium' and (cilium_kube_proxy_replacement is defined and cilium_kube_proxy_replacement == 'strict') -%}
  33. {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
  34. {%- elif kube_network_plugin == 'calico' and (calico_bpf_enabled is defined and calico_bpf_enabled) -%}
  35. {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
  36. {%- elif kube_proxy_remove is defined and kube_proxy_remove -%}
  37. {{ kubeadm_init_phases_skip_default + ["addon/kube-proxy"] }}
  38. {%- else -%}
  39. {{ kubeadm_init_phases_skip_default }}
  40. {%- endif -%}
  41. # List of kubeadm phases that should be skipped when joining a new node
  42. # You may need to set this to ['preflight'] for air-gaped deployments to avoid failing connectivity tests.
  43. kubeadm_join_phases_skip_default: []
  44. kubeadm_join_phases_skip: >-
  45. {{ kubeadm_join_phases_skip_default }}
  46. # Set to true to remove the role binding to anonymous users created by kubeadm
  47. remove_anonymous_access: false
  48. # A string slice of values which specify the addresses to use for NodePorts.
  49. # Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
  50. # The default empty string slice ([]) means to use all local addresses.
  51. # kube_proxy_nodeport_addresses_cidr is retained for legacy config
  52. kube_proxy_nodeport_addresses: >-
  53. {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
  54. [{{ kube_proxy_nodeport_addresses_cidr }}]
  55. {%- else -%}
  56. []
  57. {%- endif -%}
  58. # Set to true to allow pre-checks to fail and continue deployment
  59. ignore_assert_errors: false
  60. kube_vip_enabled: false
  61. # nginx-proxy configure
  62. nginx_config_dir: "/etc/nginx"
  63. # haproxy configure
  64. haproxy_config_dir: "/etc/haproxy"
  65. # Directory where the binaries will be installed
  66. bin_dir: /usr/local/bin
  67. docker_bin_dir: /usr/bin
  68. containerd_bin_dir: "{{ bin_dir }}"
  69. etcd_data_dir: /var/lib/etcd
  70. # Where the binaries will be downloaded.
  71. # Note: ensure that you've enough disk space (about 1G)
  72. local_release_dir: "/tmp/releases"
  73. # Random shifts for retrying failed ops like pushing/downloading
  74. retry_stagger: 5
  75. # Install epel repo on Centos/RHEL
  76. epel_enabled: false
  77. # DNS configuration.
  78. # Kubernetes cluster name, also will be used as DNS domain
  79. cluster_name: cluster.local
  80. # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
  81. ndots: 2
  82. # Default resolv.conf options
  83. docker_dns_options:
  84. - ndots:{{ ndots }}
  85. - timeout:2
  86. - attempts:2
  87. # Can be coredns, coredns_dual, manual, or none
  88. dns_mode: coredns
  89. # Enable dns autoscaler
  90. enable_dns_autoscaler: true
  91. # Enable nodelocal dns cache
  92. enable_nodelocaldns: true
  93. enable_nodelocaldns_secondary: false
  94. nodelocaldns_ip: 169.254.25.10
  95. nodelocaldns_health_port: 9254
  96. nodelocaldns_second_health_port: 9256
  97. nodelocaldns_bind_metrics_host_ip: false
  98. nodelocaldns_secondary_skew_seconds: 5
  99. # Should be set to a cluster IP if using a custom cluster DNS
  100. manual_dns_server: ""
  101. # Can be host_resolvconf, docker_dns or none
  102. resolvconf_mode: host_resolvconf
  103. # Deploy netchecker app to verify DNS resolve as an HTTP service
  104. deploy_netchecker: false
  105. # Ip address of the kubernetes DNS service (called skydns for historical reasons)
  106. skydns_server: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(3) | ansible.utils.ipaddr('address') }}"
  107. skydns_server_secondary: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(4) | ansible.utils.ipaddr('address') }}"
  108. dns_domain: "{{ cluster_name }}"
  109. docker_dns_search_domains:
  110. - 'default.svc.{{ dns_domain }}'
  111. - 'svc.{{ dns_domain }}'
  112. kube_dns_servers:
  113. coredns: ["{{ skydns_server }}"]
  114. coredns_dual: "{{ [skydns_server] + [skydns_server_secondary] }}"
  115. manual: ["{{ manual_dns_server }}"]
  116. dns_servers: "{{ kube_dns_servers[dns_mode] }}"
  117. enable_coredns_k8s_external: false
  118. coredns_k8s_external_zone: k8s_external.local
  119. enable_coredns_k8s_endpoint_pod_names: false
  120. # Kubernetes configuration dirs and system namespace.
  121. # Those are where all the additional config stuff goes
  122. # the kubernetes normally puts in /srv/kubernetes.
  123. # This puts them in a sane location and namespace.
  124. # Editing those values will almost surely break something.
  125. kube_config_dir: /etc/kubernetes
  126. kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
  127. kube_manifest_dir: "{{ kube_config_dir }}/manifests"
  128. # Kubectl command
  129. # This is for consistency when using kubectl command in roles, and ensure
  130. kubectl: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf"
  131. # This is where all the cert scripts and certs will be located
  132. kube_cert_dir: "{{ kube_config_dir }}/ssl"
  133. # compatibility directory for kubeadm
  134. kube_cert_compat_dir: "/etc/kubernetes/pki"
  135. # This is where all of the bearer tokens will be stored
  136. kube_token_dir: "{{ kube_config_dir }}/tokens"
  137. # This is the user that owns the cluster installation.
  138. kube_owner: kube
  139. # This is the group that the cert creation scripts chgrp the
  140. # cert files to. Not really changeable...
  141. kube_cert_group: kube-cert
  142. # Set to true when the CAs are managed externally.
  143. # When true, disables all tasks manipulating certificates. Ensure before the kubespray run that:
  144. # - Certificates and CAs are present in kube_cert_dir
  145. # - Kubeconfig files are present in kube_config_dir
  146. kube_external_ca_mode: false
  147. # Cluster Loglevel configuration
  148. kube_log_level: 2
  149. # Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin)
  150. # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
  151. kube_network_plugin: calico
  152. kube_network_plugin_multus: false
  153. # Determines if calico_rr group exists
  154. peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr'] | length > 0 }}"
  155. # Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
  156. calico_datastore: "kdd"
  157. # Kubernetes internal network for services, unused block of space.
  158. kube_service_addresses: 10.233.0.0/18
  159. # internal network. When used, it will assign IP
  160. # addresses from this range to individual pods.
  161. # This network must be unused in your network infrastructure!
  162. kube_pods_subnet: 10.233.64.0/18
  163. # internal network node size allocation (optional). This is the size allocated
  164. # to each node for pod IP address allocation. Note that the number of pods per node is
  165. # also limited by the kubelet_max_pods variable which defaults to 110.
  166. #
  167. # Example:
  168. # Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
  169. # - kube_pods_subnet: 10.233.64.0/18
  170. # - kube_network_node_prefix: 24
  171. # - kubelet_max_pods: 110
  172. #
  173. # Example:
  174. # Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
  175. # - kube_pods_subnet: 10.233.64.0/18
  176. # - kube_network_node_prefix: 25
  177. # - kubelet_max_pods: 110
  178. kube_network_node_prefix: 24
  179. # Configure Dual Stack networking (i.e. both IPv4 and IPv6)
  180. enable_dual_stack_networks: false
  181. # Kubernetes internal network for IPv6 services, unused block of space.
  182. # This is only used if enable_dual_stack_networks is set to true
  183. # This provides 4096 IPv6 IPs
  184. kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
  185. # Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
  186. # This network must not already be in your network infrastructure!
  187. # This is only used if enable_dual_stack_networks is set to true.
  188. # This provides room for 256 nodes with 254 pods per node.
  189. kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
  190. # IPv6 subnet size allocated to each for pods.
  191. # This is only used if enable_dual_stack_networks is set to true
  192. # This provides room for 254 pods per node.
  193. kube_network_node_prefix_ipv6: 120
  194. # The virtual cluster IP, real host IPs and ports the API Server will be
  195. # listening on.
  196. # NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
  197. # access IP value (automatically evaluated below)
  198. kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
  199. # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
  200. # loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
  201. kube_apiserver_bind_address: 0.0.0.0
  202. # https
  203. kube_apiserver_port: 6443
  204. # If non-empty, will use this string as identification instead of the actual hostname
  205. kube_override_hostname: >-
  206. {%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
  207. {%- else -%}
  208. {{ inventory_hostname }}
  209. {%- endif -%}
  210. # define kubelet config dir for dynamic kubelet
  211. # kubelet_config_dir:
  212. default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
  213. # Aggregator
  214. kube_api_aggregator_routing: false
  215. # Profiling
  216. kube_profiling: false
  217. # Graceful Node Shutdown
  218. kubelet_shutdown_grace_period: 60s
  219. # kubelet_shutdown_grace_period_critical_pods should be less than kubelet_shutdown_grace_period
  220. # to give normal pods time to be gracefully evacuated
  221. kubelet_shutdown_grace_period_critical_pods: 20s
  222. # Whether to deploy the container engine
  223. deploy_container_engine: "{{ 'k8s_cluster' in group_names or etcd_deployment_type == 'docker' }}"
  224. # Container for runtime
  225. container_manager: containerd
  226. # Enable Node Resource Interface in containerd or CRI-O. Requires crio_version >= v1.26.0
  227. # or containerd_version >= 1.7.0.
  228. nri_enabled: false
  229. # Enable Kata Containers as additional container runtime
  230. # When enabled, it requires `container_manager` different than Docker
  231. kata_containers_enabled: false
  232. # Enable gVisor as an additional container runtime
  233. # gVisor is only supported with container_manager Docker or containerd
  234. gvisor_enabled: false
  235. # Enable crun as additional container runtime
  236. # When enabled, it requires container_manager=crio
  237. crun_enabled: false
  238. # Enable youki as additional container runtime
  239. # When enabled, it requires container_manager=crio
  240. youki_enabled: false
  241. # Container on localhost (download images when download_localhost is true)
  242. container_manager_on_localhost: "{{ container_manager }}"
  243. # CRI socket path
  244. cri_socket: >-
  245. {%- if container_manager == 'crio' -%}
  246. unix:///var/run/crio/crio.sock
  247. {%- elif container_manager == 'containerd' -%}
  248. unix:///var/run/containerd/containerd.sock
  249. {%- elif container_manager == 'docker' -%}
  250. unix:///var/run/cri-dockerd.sock
  251. {%- endif -%}
  252. crio_insecure_registries: []
  253. ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
  254. ## Please note that overlay2 is only supported on newer kernels
  255. # docker_storage_options: -s overlay2
  256. ## Only set this if you have more than 3 nameservers:
  257. ## If true Kubespray will only use the first 3, otherwise it will fail
  258. docker_dns_servers_strict: false
  259. # Path used to store Docker data
  260. docker_daemon_graph: "/var/lib/docker"
  261. ## Used to set docker daemon iptables options to true
  262. docker_iptables_enabled: "false"
  263. # Docker log options
  264. # Rotate container stderr/stdout logs at 50m and keep last 5
  265. docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
  266. ## A list of insecure docker registries (IP address or domain name), for example
  267. ## to allow insecure-registry access to self-hosted registries. Empty by default.
  268. # docker_insecure_registries:
  269. # - mirror.registry.io
  270. # - 172.19.16.11
  271. docker_insecure_registries: []
  272. ## A list of additional registry mirrors, for example China registry mirror. Empty by default.
  273. # docker_registry_mirrors:
  274. # - https://registry.docker-cn.com
  275. # - https://mirror.aliyuncs.com
  276. docker_registry_mirrors: []
  277. ## If non-empty will override default system MounFlags value.
  278. ## This option takes a mount propagation flag: shared, slave
  279. ## or private, which control whether mounts in the file system
  280. ## namespace set up for docker will receive or propagate mounts
  281. ## and unmounts. Leave empty for system default
  282. # docker_mount_flags:
  283. ## A string of extra options to pass to the docker daemon.
  284. # docker_options: ""
  285. ## A list of plugins to install using 'docker plugin install --grant-all-permissions'
  286. ## Empty by default so no plugins will be installed.
  287. docker_plugins: []
  288. # Containerd options - thse are relevant when container_manager == 'containerd'
  289. containerd_use_systemd_cgroup: true
  290. # Containerd conf default dir
  291. containerd_storage_dir: "/var/lib/containerd"
  292. containerd_state_dir: "/run/containerd"
  293. containerd_systemd_dir: "/etc/systemd/system/containerd.service.d"
  294. containerd_cfg_dir: "/etc/containerd"
  295. # Settings for containerized control plane (etcd/kubelet/secrets)
  296. # deployment type for legacy etcd mode
  297. etcd_deployment_type: host
  298. cert_management: script
  299. # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
  300. kubeconfig_localhost: false
  301. # Download kubectl onto the host that runs Ansible in {{ bin_dir }}
  302. kubectl_localhost: false
  303. # Define credentials_dir here so it can be overridden
  304. credentials_dir: "{{ inventory_dir }}/credentials"
  305. # K8s image pull policy (imagePullPolicy)
  306. k8s_image_pull_policy: IfNotPresent
  307. # Kubernetes dashboard
  308. # RBAC required. see docs/getting-started.md for access details.
  309. dashboard_enabled: false
  310. # Addons which can be enabled
  311. helm_enabled: false
  312. krew_enabled: false
  313. registry_enabled: false
  314. metrics_server_enabled: false
  315. enable_network_policy: true
  316. local_path_provisioner_enabled: false
  317. local_volume_provisioner_enabled: false
  318. local_volume_provisioner_directory_mode: "0700"
  319. cinder_csi_enabled: false
  320. aws_ebs_csi_enabled: false
  321. azure_csi_enabled: false
  322. gcp_pd_csi_enabled: false
  323. vsphere_csi_enabled: false
  324. upcloud_csi_enabled: false
  325. csi_snapshot_controller_enabled: false
  326. persistent_volumes_enabled: false
  327. cephfs_provisioner_enabled: false
  328. rbd_provisioner_enabled: false
  329. ingress_nginx_enabled: false
  330. ingress_alb_enabled: false
  331. cert_manager_enabled: false
  332. expand_persistent_volumes: false
  333. metallb_enabled: false
  334. metallb_speaker_enabled: "{{ metallb_enabled }}"
  335. argocd_enabled: false
  336. ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
  337. # openstack_blockstorage_version: "v1/v2/auto (default)"
  338. openstack_blockstorage_ignore_volume_az: "{{ volume_cross_zone_attachment | default('false') }}"
  339. # set max volumes per node (cinder-csi), default not set
  340. # node_volume_attach_limit: 25
  341. # Cinder CSI topology, when false volumes can be cross-mounted between availability zones
  342. # cinder_topology: false
  343. # Set Cinder topology zones (can be multiple zones, default not set)
  344. # cinder_topology_zones:
  345. # - nova
  346. cinder_csi_ignore_volume_az: "{{ volume_cross_zone_attachment | default('false') }}"
  347. ## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
  348. openstack_lbaas_enabled: false
  349. # openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
  350. ## To enable automatic floating ip provisioning, specify a subnet.
  351. # openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
  352. ## Override default LBaaS behavior
  353. # openstack_lbaas_use_octavia: False
  354. # openstack_lbaas_method: "ROUND_ROBIN"
  355. # openstack_lbaas_provider: "haproxy"
  356. openstack_lbaas_create_monitor: "yes"
  357. openstack_lbaas_monitor_delay: "1m"
  358. openstack_lbaas_monitor_timeout: "30s"
  359. openstack_lbaas_monitor_max_retries: "3"
  360. openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}"
  361. # Default values for the external OpenStack Cloud Controller
  362. external_openstack_lbaas_enabled: true
  363. external_openstack_network_ipv6_disabled: false
  364. external_openstack_network_internal_networks: []
  365. external_openstack_network_public_networks: []
  366. # Default values for the external Hcloud Cloud Controller
  367. external_hcloud_cloud:
  368. hcloud_api_token: ""
  369. token_secret_name: hcloud
  370. service_account_name: cloud-controller-manager
  371. controller_image_tag: "latest"
  372. ## A dictionary of extra arguments to add to the openstack cloud controller manager daemonset
  373. ## Format:
  374. ## external_hcloud_cloud.controller_extra_args:
  375. ## arg1: "value1"
  376. ## arg2: "value2"
  377. controller_extra_args: {}
  378. ## List of authorization modes that must be configured for
  379. ## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
  380. ## 'RBAC' modes are tested. Order is important.
  381. authorization_modes: ['Node', 'RBAC']
  382. rbac_enabled: "{{ 'RBAC' in authorization_modes }}"
  383. # When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet's HTTPS endpoint
  384. kubelet_authentication_token_webhook: true
  385. # When enabled, access to the kubelet API requires authorization by delegation to the API server
  386. kubelet_authorization_mode_webhook: true
  387. # kubelet uses certificates for authenticating to the Kubernetes API
  388. # Automatically generate a new key and request a new certificate from the Kubernetes API as the current certificate approaches expiration
  389. kubelet_rotate_certificates: true
  390. # kubelet can also request a new server certificate from the Kubernetes API
  391. kubelet_rotate_server_certificates: false
  392. # If set to true, kubelet errors if any of kernel tunables is different than kubelet defaults
  393. kubelet_protect_kernel_defaults: true
  394. # Set additional sysctl variables to modify Linux kernel variables, for example:
  395. # additional_sysctl:
  396. # - { name: kernel.pid_max, value: 131072 }
  397. #
  398. additional_sysctl: []
  399. ## List of key=value pairs that describe feature gates for
  400. ## the k8s cluster.
  401. kube_feature_gates: []
  402. kube_apiserver_feature_gates: []
  403. kube_controller_feature_gates: []
  404. kube_scheduler_feature_gates: []
  405. kube_proxy_feature_gates: []
  406. kubelet_feature_gates: []
  407. kubeadm_feature_gates: []
  408. # Local volume provisioner storage classes
  409. # Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted
  410. # see https://github.com/ansible/ansible/issues/17324
  411. local_volume_provisioner_storage_classes: |
  412. {
  413. "{{ local_volume_provisioner_storage_class | default('local-storage') }}": {
  414. "host_dir": "{{ local_volume_provisioner_base_dir | default('/mnt/disks') }}",
  415. "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}",
  416. "volume_mode": "Filesystem",
  417. "fs_type": "ext4"
  418. }
  419. }
  420. # weave's network password for encryption
  421. # if null then no network encryption
  422. # you can use --extra-vars to pass the password in command line
  423. weave_password: EnterPasswordHere
  424. ssl_ca_dirs: |-
  425. [
  426. {% if ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] -%}
  427. '/usr/share/ca-certificates',
  428. {% elif ansible_os_family == 'RedHat' -%}
  429. '/etc/pki/tls',
  430. '/etc/pki/ca-trust',
  431. {% elif ansible_os_family == 'Debian' -%}
  432. '/usr/share/ca-certificates',
  433. {% endif -%}
  434. ]
  435. # Vars for pointing to kubernetes api endpoints
  436. kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
  437. kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
  438. kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
  439. first_kube_control_plane_address: "{{ hostvars[groups['kube_control_plane'][0]]['access_ip'] | default(hostvars[groups['kube_control_plane'][0]]['ip'] | default(fallback_ips[groups['kube_control_plane'][0]])) }}"
  440. loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
  441. loadbalancer_apiserver_type: "nginx"
  442. # applied if only external loadbalancer_apiserver is defined, otherwise ignored
  443. apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
  444. kube_apiserver_global_endpoint: |-
  445. {% if loadbalancer_apiserver is defined -%}
  446. https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
  447. {%- elif loadbalancer_apiserver_localhost and (loadbalancer_apiserver_port is not defined or loadbalancer_apiserver_port == kube_apiserver_port) -%}
  448. https://localhost:{{ kube_apiserver_port }}
  449. {%- else -%}
  450. https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
  451. {%- endif %}
  452. kube_apiserver_endpoint: |-
  453. {% if loadbalancer_apiserver is defined -%}
  454. https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
  455. {%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%}
  456. https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
  457. {%- elif 'kube_control_plane' in group_names -%}
  458. https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
  459. {%- else -%}
  460. https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
  461. {%- endif %}
  462. kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt"
  463. kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key"
  464. # Set to true to deploy etcd-events cluster
  465. etcd_events_cluster_enabled: false
  466. # etcd group can be empty when kubeadm manages etcd
  467. etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
  468. # Vars for pointing to etcd endpoints
  469. etcd_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
  470. etcd_access_address: "{{ access_ip | default(etcd_address) }}"
  471. etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
  472. etcd_peer_url: "https://{{ etcd_access_address }}:2380"
  473. etcd_client_url: "https://{{ etcd_access_address }}:2379"
  474. etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382"
  475. etcd_events_client_url: "https://{{ etcd_events_access_address }}:2383"
  476. etcd_access_addresses: |-
  477. {% for item in etcd_hosts -%}
  478. https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:2379{% if not loop.last %},{% endif %}
  479. {%- endfor %}
  480. etcd_events_access_addresses_list: |-
  481. [
  482. {% for item in etcd_hosts -%}
  483. 'https://{{ hostvars[item]['etcd_events_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:2383'{% if not loop.last %},{% endif %}
  484. {%- endfor %}
  485. ]
  486. etcd_metrics_addresses: |-
  487. {% for item in etcd_hosts -%}
  488. https://{{ hostvars[item]['etcd_access_address'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}:{{ etcd_metrics_port | default(2381) }}{% if not loop.last %},{% endif %}
  489. {%- endfor %}
  490. etcd_events_access_addresses: "{{ etcd_events_access_addresses_list | join(',') }}"
  491. etcd_events_access_addresses_semicolon: "{{ etcd_events_access_addresses_list | join(';') }}"
  492. # user should set etcd_member_name in inventory/mycluster/hosts.ini
  493. etcd_member_name: |-
  494. {% for host in groups['etcd'] %}
  495. {% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index | string) }}{% endif %}
  496. {% endfor %}
  497. etcd_peer_addresses: |-
  498. {% for item in groups['etcd'] -%}
  499. {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}=https://{{ hostvars[item].etcd_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2380{% if not loop.last %},{% endif %}
  500. {%- endfor %}
  501. etcd_events_peer_addresses: |-
  502. {% for item in groups['etcd'] -%}
  503. {{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %}
  504. {%- endfor %}
  505. etcd_heartbeat_interval: "250"
  506. etcd_election_timeout: "5000"
  507. etcd_snapshot_count: "10000"
  508. certificates_key_size: 2048
  509. certificates_duration: 36500
  510. etcd_config_dir: /etc/ssl/etcd
  511. etcd_events_data_dir: "/var/lib/etcd-events"
  512. etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
  513. typha_enabled: false
  514. calico_apiserver_enabled: false
  515. _host_architecture_groups:
  516. x86_64: amd64
  517. aarch64: arm64
  518. armv7l: arm
  519. host_architecture: >-
  520. {%- if ansible_architecture in _host_architecture_groups -%}
  521. {{ _host_architecture_groups[ansible_architecture] }}
  522. {%- else -%}
  523. {{ ansible_architecture }}
  524. {%- endif -%}
  525. _host_os_groups:
  526. Linux: linux
  527. Darwin: darwin
  528. Win32NT: windows
  529. host_os: >-
  530. {%- if ansible_system in _host_os_groups -%}
  531. {{ _host_os_groups[ansible_system] }}
  532. {%- else -%}
  533. {{ ansible_system }}
  534. {%- endif -%}
  535. # Sets the eventRecordQPS parameter in kubelet-config.yaml.
  536. # Setting it to 0 allows unlimited requests per second.
  537. kubelet_event_record_qps: 50
  538. proxy_env_defaults:
  539. http_proxy: "{{ http_proxy | default('') }}"
  540. HTTP_PROXY: "{{ http_proxy | default('') }}"
  541. https_proxy: "{{ https_proxy | default('') }}"
  542. HTTPS_PROXY: "{{ https_proxy | default('') }}"
  543. no_proxy: "{{ no_proxy | default('') }}"
  544. NO_PROXY: "{{ no_proxy | default('') }}"
  545. # If we use SSL_CERT_FILE: {{ omit }} it cause in value __omit_place_holder__ and break environments
  546. # Combine dict is avoiding the problem with omit placeholder. Maybe it can be better solution?
  547. proxy_env: "{{ proxy_env_defaults | combine({'SSL_CERT_FILE': https_proxy_cert_file}) if https_proxy_cert_file is defined else proxy_env_defaults }}"
  548. proxy_disable_env:
  549. ALL_PROXY: ''
  550. FTP_PROXY: ''
  551. HTTPS_PROXY: ''
  552. HTTP_PROXY: ''
  553. NO_PROXY: ''
  554. all_proxy: ''
  555. ftp_proxy: ''
  556. http_proxy: ''
  557. https_proxy: ''
  558. no_proxy: ''
  559. # krew root dir
  560. krew_root_dir: "/usr/local/krew"
  561. # sysctl_file_path to add sysctl conf to
  562. sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"
  563. system_upgrade: false
  564. system_upgrade_reboot: on-upgrade # never, always
  565. # Enables or disables the scheduler plugins.
  566. scheduler_plugins_enabled: false