You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

124 lines
4.8 KiB

  1. # Directory where the binaries will be installed
  2. bin_dir: /usr/local/bin
  3. # Where the binaries will be downloaded.
  4. # Note: ensure that you've enough disk space (about 1G)
  5. local_release_dir: "/tmp/releases"
  6. # Uncomment this line for CoreOS only.
  7. # Directory where python binary is installed
  8. # ansible_python_interpreter: "/opt/bin/python"
  9. # This is the group that the cert creation scripts chgrp the
  10. # cert files to. Not really changable...
  11. kube_cert_group: kube-cert
  12. # Cluster Loglevel configuration
  13. kube_log_level: 2
  14. # Users to create for basic auth in Kubernetes API via HTTP
  15. kube_users:
  16. kube:
  17. pass: changeme
  18. role: admin
  19. # root:
  20. # pass: changeme
  21. # role: admin
  22. # Kubernetes cluster name, also will be used as DNS domain
  23. cluster_name: cluster.local
  24. # For some environments, each node has a pubilcally accessible
  25. # address and an address it should bind services to. These are
  26. # really inventory level variables, but described here for consistency.
  27. #
  28. # When advertising access, the access_ip will be used, but will defer to
  29. # ip and then the default ansible ip when unspecified.
  30. #
  31. # When binding to restrict access, the ip variable will be used, but will
  32. # defer to the default ansible ip when unspecified.
  33. #
  34. # The ip variable is used for specific address binding, e.g. listen address
  35. # for etcd. This is use to help with environments like Vagrant or multi-nic
  36. # systems where one address should be preferred over another.
  37. # ip: 10.2.2.2
  38. #
  39. # The access_ip variable is used to define how other nodes should access
  40. # the node. This is used in flannel to allow other flannel nodes to see
  41. # this node for example. The access_ip is really useful AWS and Google
  42. # environments where the nodes are accessed remotely by the "public" ip,
  43. # but don't know about that address themselves.
  44. # access_ip: 1.1.1.1
  45. # Choose network plugin (calico, weave or flannel)
  46. kube_network_plugin: flannel
  47. # Kubernetes internal network for services, unused block of space.
  48. kube_service_addresses: 10.233.0.0/18
  49. # internal network. When used, it will assign IP
  50. # addresses from this range to individual pods.
  51. # This network must be unused in your network infrastructure!
  52. kube_pods_subnet: 10.233.64.0/18
  53. # internal network total size (optional). This is the prefix of the
  54. # entire network. Must be unused in your environment.
  55. # kube_network_prefix: 18
  56. # internal network node size allocation (optional). This is the size allocated
  57. # to each node on your network. With these defaults you should have
  58. # room for 4096 nodes with 254 pods per node.
  59. kube_network_node_prefix: 24
  60. # With calico it is possible to distributed routes with border routers of the datacenter.
  61. peer_with_router: false
  62. # Warning : enabling router peering will disable calico's default behavior ('node mesh').
  63. # The subnets of each nodes will be distributed by the datacenter router
  64. # The port the API Server will be listening on.
  65. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
  66. kube_apiserver_port: 443 # (https)
  67. kube_apiserver_insecure_port: 8080 # (http)
  68. # Internal DNS configuration.
  69. # Kubernetes can create and mainatain its own DNS server to resolve service names
  70. # into appropriate IP addresses. It's highly advisable to run such DNS server,
  71. # as it greatly simplifies configuration of your applications - you can use
  72. # service names instead of magic environment variables.
  73. # You still must manually configure all your containers to use this DNS server,
  74. # Kubernetes won't do this for you (yet).
  75. # Upstream dns servers used by dnsmasq
  76. upstream_dns_servers:
  77. - 8.8.8.8
  78. - 4.4.8.8
  79. #
  80. # # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
  81. dns_setup: true
  82. dns_domain: "{{ cluster_name }}"
  83. #
  84. # # Ip address of the kubernetes skydns service
  85. skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
  86. dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
  87. # There are some changes specific to the cloud providers
  88. # for instance we need to encapsulate packets with some network plugins
  89. # If set the possible values are either 'gce', 'aws' or 'openstack'
  90. # When openstack is used make sure to source in the openstack credentials
  91. # like you would do when using nova-client before starting the playbook.
  92. # cloud_provider:
  93. # For multi masters architecture:
  94. # kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
  95. # This domain name will be inserted into the /etc/hosts file of all servers
  96. # configuration example with haproxy :
  97. # listen kubernetes-apiserver-https
  98. # bind 10.99.0.21:8383
  99. # option ssl-hello-chk
  100. # mode tcp
  101. # timeout client 3h
  102. # timeout server 3h
  103. # server master1 10.99.0.26:443
  104. # server master2 10.99.0.27:443
  105. # balance roundrobin
  106. # apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"