You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

106 lines
3.9 KiB

  1. # Valid bootstrap options (required): ubuntu, coreos, centos, none
  2. bootstrap_os: none
  3. #Directory where etcd data stored
  4. etcd_data_dir: /var/lib/etcd
  5. # Directory where the binaries will be installed
  6. bin_dir: /usr/local/bin
  7. ## The access_ip variable is used to define how other nodes should access
  8. ## the node. This is used in flannel to allow other flannel nodes to see
  9. ## this node for example. The access_ip is really useful AWS and Google
  10. ## environments where the nodes are accessed remotely by the "public" ip,
  11. ## but don't know about that address themselves.
  12. #access_ip: 1.1.1.1
  13. ### LOADBALANCING AND ACCESS MODES
  14. ## Enable multiaccess to configure etcd clients to access all of the etcd members directly
  15. ## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
  16. ## This may be the case if clients support and loadbalance multiple etcd servers natively.
  17. #etcd_multiaccess: true
  18. ## External LB example config
  19. ## apiserver_loadbalancer_domain_name: "elb.some.domain"
  20. #loadbalancer_apiserver:
  21. # address: 1.2.3.4
  22. # port: 1234
  23. ## Internal loadbalancers for apiservers
  24. #loadbalancer_apiserver_localhost: true
  25. ## Local loadbalancer should use this port instead, if defined.
  26. ## Defaults to kube_apiserver_port (6443)
  27. #nginx_kube_apiserver_port: 8443
  28. ### OTHER OPTIONAL VARIABLES
  29. ## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
  30. ## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
  31. ## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
  32. ## modules.
  33. # kubelet_load_modules: false
  34. ## Internal network total size. This is the prefix of the
  35. ## entire network. Must be unused in your environment.
  36. #kube_network_prefix: 18
  37. ## With calico it is possible to distributed routes with border routers of the datacenter.
  38. ## Warning : enabling router peering will disable calico's default behavior ('node mesh').
  39. ## The subnets of each nodes will be distributed by the datacenter router
  40. #peer_with_router: false
  41. ## Upstream dns servers used by dnsmasq
  42. #upstream_dns_servers:
  43. # - 8.8.8.8
  44. # - 8.8.4.4
  45. ## There are some changes specific to the cloud providers
  46. ## for instance we need to encapsulate packets with some network plugins
  47. ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere'
  48. ## When openstack is used make sure to source in the openstack credentials
  49. ## like you would do when using nova-client before starting the playbook.
  50. #cloud_provider:
  51. ## When azure is used, you need to also set the following variables.
  52. ## see docs/azure.md for details on how to get these values
  53. #azure_tenant_id:
  54. #azure_subscription_id:
  55. #azure_aad_client_id:
  56. #azure_aad_client_secret:
  57. #azure_resource_group:
  58. #azure_location:
  59. #azure_subnet_name:
  60. #azure_security_group_name:
  61. #azure_vnet_name:
  62. #azure_route_table_name:
  63. ## Set these proxy values in order to update docker daemon to use proxies
  64. #http_proxy: ""
  65. #https_proxy: ""
  66. #no_proxy: ""
  67. ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
  68. ## Please note that overlay2 is only supported on newer kernels
  69. #docker_storage_options: -s overlay2
  70. # Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
  71. #docker_dns_servers_strict: false
  72. ## Default packages to install within the cluster, f.e:
  73. #kpm_packages:
  74. # - name: kube-system/grafana
  75. ## Certificate Management
  76. ## This setting determines whether certs are generated via scripts or whether a
  77. ## cluster of Hashicorp's Vault is started to issue certificates (using etcd
  78. ## as a backend). Options are "script" or "vault"
  79. #cert_management: script
  80. ## Please specify true if you want to perform a kernel upgrade
  81. kernel_upgrade: false
  82. ## Etcd auto compaction retention for mvcc key value store in hour
  83. #etcd_compaction_retention: 0
  84. ## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
  85. #etcd_metrics: basic