You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

227 lines
8.5 KiB

7 years ago
  1. # Valid bootstrap options (required): ubuntu, coreos, centos, none
  2. bootstrap_os: none
  3. # Directory where the binaries will be installed
  4. bin_dir: /usr/local/bin
  5. # Kubernetes configuration dirs and system namespace.
  6. # Those are where all the additional config stuff goes
  7. # the kubernetes normally puts in /srv/kubernets.
  8. # This puts them in a sane location and namespace.
  9. # Editing those values will almost surely break something.
  10. kube_config_dir: /etc/kubernetes
  11. kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
  12. kube_manifest_dir: "{{ kube_config_dir }}/manifests"
  13. system_namespace: kube-system
  14. # This is where all the cert scripts and certs will be located
  15. kube_cert_dir: "{{ kube_config_dir }}/ssl"
  16. # This is where all of the bearer tokens will be stored
  17. kube_token_dir: "{{ kube_config_dir }}/tokens"
  18. # This is where to save basic auth file
  19. kube_users_dir: "{{ kube_config_dir }}/users"
  20. ## Change this to use another Kubernetes version, e.g. a current beta release
  21. kube_version: v1.5.3
  22. # Where the binaries will be downloaded.
  23. # Note: ensure that you've enough disk space (about 1G)
  24. local_release_dir: "/tmp/releases"
  25. # Random shifts for retrying failed ops like pushing/downloading
  26. retry_stagger: 5
  27. # Uncomment this line for CoreOS only.
  28. # Directory where python binary is installed
  29. # ansible_python_interpreter: "/opt/bin/python"
  30. # This is the group that the cert creation scripts chgrp the
  31. # cert files to. Not really changable...
  32. kube_cert_group: kube-cert
  33. # Cluster Loglevel configuration
  34. kube_log_level: 2
  35. # Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
  36. # not implemented. As the new flag defaults to true, we have to explicitly disable it. Change this line if you want the
  37. # 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
  38. kube_api_anonymous_auth: false
  39. #
  40. # For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
  41. # for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
  42. # processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
  43. # modules.
  44. #
  45. kubelet_load_modules: false
  46. # Users to create for basic auth in Kubernetes API via HTTP
  47. kube_api_pwd: "changeme"
  48. kube_users:
  49. kube:
  50. pass: "{{kube_api_pwd}}"
  51. role: admin
  52. root:
  53. pass: "{{kube_api_pwd}}"
  54. role: admin
  55. # Kubernetes cluster name, also will be used as DNS domain
  56. cluster_name: cluster.local
  57. # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
  58. ndots: 2
  59. # Deploy netchecker app to verify DNS resolve as an HTTP service
  60. deploy_netchecker: false
  61. # For some environments, each node has a publicly accessible
  62. # address and an address it should bind services to. These are
  63. # really inventory level variables, but described here for consistency.
  64. #
  65. # When advertising access, the access_ip will be used, but will defer to
  66. # ip and then the default ansible ip when unspecified.
  67. #
  68. # When binding to restrict access, the ip variable will be used, but will
  69. # defer to the default ansible ip when unspecified.
  70. #
  71. # The ip variable is used for specific address binding, e.g. listen address
  72. # for etcd. This is use to help with environments like Vagrant or multi-nic
  73. # systems where one address should be preferred over another.
  74. # ip: 10.2.2.2
  75. #
  76. # The access_ip variable is used to define how other nodes should access
  77. # the node. This is used in flannel to allow other flannel nodes to see
  78. # this node for example. The access_ip is really useful AWS and Google
  79. # environments where the nodes are accessed remotely by the "public" ip,
  80. # but don't know about that address themselves.
  81. # access_ip: 1.1.1.1
  82. # Etcd access modes:
  83. # Enable multiaccess to configure clients to access all of the etcd members directly
  84. # as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
  85. # This may be the case if clients support and loadbalance multiple etcd servers natively.
  86. etcd_multiaccess: true
  87. # Assume there are no internal loadbalancers for apiservers exist and listen on
  88. # kube_apiserver_port (default 443)
  89. loadbalancer_apiserver_localhost: true
  90. # Choose network plugin (calico, canal, weave or flannel)
  91. # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
  92. kube_network_plugin: calico
  93. # Kubernetes internal network for services, unused block of space.
  94. kube_service_addresses: 10.233.0.0/18
  95. # internal network. When used, it will assign IP
  96. # addresses from this range to individual pods.
  97. # This network must be unused in your network infrastructure!
  98. kube_pods_subnet: 10.233.64.0/18
  99. # internal network total size (optional). This is the prefix of the
  100. # entire network. Must be unused in your environment.
  101. # kube_network_prefix: 18
  102. # internal network node size allocation (optional). This is the size allocated
  103. # to each node on your network. With these defaults you should have
  104. # room for 4096 nodes with 254 pods per node.
  105. kube_network_node_prefix: 24
  106. # With calico it is possible to distributed routes with border routers of the datacenter.
  107. peer_with_router: false
  108. # Warning : enabling router peering will disable calico's default behavior ('node mesh').
  109. # The subnets of each nodes will be distributed by the datacenter router
  110. # API Server service IP address in Kubernetes internal network.
  111. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
  112. # The port the API Server will be listening on.
  113. kube_apiserver_port: 443 # (https)
  114. kube_apiserver_insecure_port: 8080 # (http)
  115. # local loadbalancer should use this port instead - default to kube_apiserver_port
  116. nginx_kube_apiserver_port: "{{ kube_apiserver_port }}"
  117. # Internal DNS configuration.
  118. # Kubernetes can create and mainatain its own DNS server to resolve service names
  119. # into appropriate IP addresses. It's highly advisable to run such DNS server,
  120. # as it greatly simplifies configuration of your applications - you can use
  121. # service names instead of magic environment variables.
  122. # Can be dnsmasq_kubedns, kubedns or none
  123. dns_mode: dnsmasq_kubedns
  124. # Can be docker_dns, host_resolvconf or none
  125. resolvconf_mode: docker_dns
  126. ## Upstream dns servers used by dnsmasq
  127. #upstream_dns_servers:
  128. # - 8.8.8.8
  129. # - 8.8.4.4
  130. dns_domain: "{{ cluster_name }}"
  131. # Ip address of the kubernetes skydns service
  132. skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
  133. dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
  134. # There are some changes specific to the cloud providers
  135. # for instance we need to encapsulate packets with some network plugins
  136. # If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
  137. # When openstack is used make sure to source in the openstack credentials
  138. # like you would do when using nova-client before starting the playbook.
  139. # When azure is used, you need to also set the following variables.
  140. # cloud_provider:
  141. # see docs/azure.md for details on how to get these values
  142. #azure_tenant_id:
  143. #azure_subscription_id:
  144. #azure_aad_client_id:
  145. #azure_aad_client_secret:
  146. #azure_resource_group:
  147. #azure_location:
  148. #azure_subnet_name:
  149. #azure_security_group_name:
  150. #azure_vnet_name:
  151. #azure_route_table_name:
  152. ## Set these proxy values in order to update docker daemon to use proxies
  153. # http_proxy: ""
  154. # https_proxy: ""
  155. # no_proxy: ""
  156. # Path used to store Docker data
  157. docker_daemon_graph: "/var/lib/docker"
  158. ## A string of extra options to pass to the docker daemon.
  159. ## This string should be exactly as you wish it to appear.
  160. ## An obvious use case is allowing insecure-registry access
  161. ## to self hosted registries like so:
  162. docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}"
  163. docker_bin_dir: "/usr/bin"
  164. ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
  165. ## Please note that overlay2 is only supported on newer kernels
  166. #docker_storage_options: -s overlay2
  167. # K8s image pull policy (imagePullPolicy)
  168. k8s_image_pull_policy: IfNotPresent
  169. # default packages to install within the cluster
  170. kpm_packages: []
  171. # - name: kube-system/grafana
  172. # Settings for containerized control plane (etcd/kubelet)
  173. rkt_version: 1.21.0
  174. etcd_deployment_type: docker
  175. kubelet_deployment_type: docker
  176. vault_deployment_type: docker
  177. efk_enabled: false
  178. ## Certificate Management
  179. ## This setting determines whether certs are generated via scripts or whether a
  180. ## cluster of Hashicorp's Vault is started to issue certificates (using etcd
  181. ## as a backend). Options are "script" or "vault"
  182. cert_management: script
  183. # Please specify true if you want to perform a kernel upgrade
  184. kernel_upgrade: false