You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

214 lines
7.4 KiB

contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
7 years ago
  1. # Kubernetes configuration dirs and system namespace.
  2. # Those are where all the additional config stuff goes
  3. # the kubernetes normally puts in /srv/kubernets.
  4. # This puts them in a sane location and namespace.
  5. # Editting those values will almost surely break something.
  6. kube_config_dir: /etc/kubernetes
  7. kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
  8. kube_manifest_dir: "{{ kube_config_dir }}/manifests"
  9. system_namespace: kube-system
  10. # This is where all the cert scripts and certs will be located
  11. kube_cert_dir: "{{ kube_config_dir }}/ssl"
  12. # This is where all of the bearer tokens will be stored
  13. kube_token_dir: "{{ kube_config_dir }}/tokens"
  14. # This is where to save basic auth file
  15. kube_users_dir: "{{ kube_config_dir }}/users"
  16. kube_api_anonymous_auth: true
  17. ## Change this to use another Kubernetes version, e.g. a current beta release
  18. kube_version: v1.9.2
  19. # Where the binaries will be downloaded.
  20. # Note: ensure that you've enough disk space (about 1G)
  21. local_release_dir: "/tmp/releases"
  22. # Random shifts for retrying failed ops like pushing/downloading
  23. retry_stagger: 5
  24. # This is the group that the cert creation scripts chgrp the
  25. # cert files to. Not really changable...
  26. kube_cert_group: kube-cert
  27. # Cluster Loglevel configuration
  28. kube_log_level: 2
  29. # Users to create for basic auth in Kubernetes API via HTTP
  30. # Optionally add groups for user
  31. kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
  32. kube_users:
  33. kube:
  34. pass: "{{kube_api_pwd}}"
  35. role: admin
  36. groups:
  37. - system:masters
  38. ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
  39. #kube_oidc_auth: false
  40. #kube_basic_auth: false
  41. #kube_token_auth: false
  42. ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
  43. ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
  44. # kube_oidc_url: https:// ...
  45. # kube_oidc_client_id: kubernetes
  46. ## Optional settings for OIDC
  47. # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
  48. # kube_oidc_username_claim: sub
  49. # kube_oidc_groups_claim: groups
  50. # Choose network plugin (calico, contiv, weave or flannel)
  51. # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
  52. kube_network_plugin: calico
  53. # weave's network password for encryption
  54. # if null then no network encryption
  55. # you can use --extra-vars to pass the password in command line
  56. weave_password: EnterPasswordHere
  57. # Weave uses consensus mode by default
  58. # Enabling seed mode allow to dynamically add or remove hosts
  59. # https://www.weave.works/docs/net/latest/ipam/
  60. weave_mode_seed: false
  61. # This two variable are automatically changed by the weave's role, do not manually change these values
  62. # To reset values :
  63. # weave_seed: uninitialized
  64. # weave_peers: uninitialized
  65. weave_seed: uninitialized
  66. weave_peers: uninitialized
  67. # Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
  68. weave_mtu: 1376
  69. # Enable kubernetes network policies
  70. enable_network_policy: false
  71. # Kubernetes internal network for services, unused block of space.
  72. kube_service_addresses: 10.233.0.0/18
  73. # internal network. When used, it will assign IP
  74. # addresses from this range to individual pods.
  75. # This network must be unused in your network infrastructure!
  76. kube_pods_subnet: 10.233.64.0/18
  77. # internal network node size allocation (optional). This is the size allocated
  78. # to each node on your network. With these defaults you should have
  79. # room for 4096 nodes with 254 pods per node.
  80. kube_network_node_prefix: 24
  81. # The port the API Server will be listening on.
  82. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
  83. kube_apiserver_port: 6443 # (https)
  84. kube_apiserver_insecure_port: 8080 # (http)
  85. # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
  86. #kube_apiserver_insecure_port: 0 # (disabled)
  87. # Kube-proxy proxyMode configuration.
  88. # Can be ipvs, iptables
  89. kube_proxy_mode: iptables
  90. # DNS configuration.
  91. # Kubernetes cluster name, also will be used as DNS domain
  92. cluster_name: cluster.local
  93. # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
  94. ndots: 2
  95. # Can be dnsmasq_kubedns, kubedns, manual or none
  96. dns_mode: kubedns
  97. # Set manual server if using a custom cluster DNS server
  98. #manual_dns_server: 10.x.x.x
  99. # Can be docker_dns, host_resolvconf or none
  100. resolvconf_mode: docker_dns
  101. # Deploy netchecker app to verify DNS resolve as an HTTP service
  102. deploy_netchecker: false
  103. # Ip address of the kubernetes skydns service
  104. skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
  105. dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
  106. dns_domain: "{{ cluster_name }}"
  107. # Path used to store Docker data
  108. docker_daemon_graph: "/var/lib/docker"
  109. ## A string of extra options to pass to the docker daemon.
  110. ## This string should be exactly as you wish it to appear.
  111. ## An obvious use case is allowing insecure-registry access
  112. ## to self hosted registries like so:
  113. docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
  114. docker_bin_dir: "/usr/bin"
  115. # Settings for containerized control plane (etcd/kubelet/secrets)
  116. etcd_deployment_type: docker
  117. kubelet_deployment_type: host
  118. vault_deployment_type: docker
  119. helm_deployment_type: host
  120. # K8s image pull policy (imagePullPolicy)
  121. k8s_image_pull_policy: IfNotPresent
  122. # Kubernetes dashboard
  123. # RBAC required. see docs/getting-started.md for access details.
  124. dashboard_enabled: true
  125. # Monitoring apps for k8s
  126. efk_enabled: false
  127. # Helm deployment
  128. helm_enabled: false
  129. # Istio deployment
  130. istio_enabled: false
  131. # Registry deployment
  132. registry_enabled: false
  133. # Local volume provisioner deployment
  134. # deprecated will be removed
  135. local_volumes_enabled: false
  136. local_volume_provisioner_enabled: "{{ local_volumes_enabled }}"
  137. # CephFS provisioner deployment
  138. cephfs_provisioner_enabled: false
  139. # cephfs_provisioner_namespace: "{{ system_namespace }}"
  140. # cephfs_provisioner_cluster: ceph
  141. # cephfs_provisioner_monitors:
  142. # - 172.24.0.1:6789
  143. # - 172.24.0.2:6789
  144. # - 172.24.0.3:6789
  145. # cephfs_provisioner_admin_id: admin
  146. # cephfs_provisioner_secret: secret
  147. # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
  148. persistent_volumes_enabled: false
  149. # Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
  150. # kubeconfig_localhost: false
  151. # Download kubectl onto the host that runs Ansible in GITDIR/artifacts
  152. # kubectl_localhost: false
  153. # dnsmasq
  154. # dnsmasq_upstream_dns_servers:
  155. # - /resolvethiszone.with/10.0.4.250
  156. # - 8.8.8.8
  157. # Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
  158. # kubelet_cgroups_per_qos: true
  159. # A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
  160. # Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
  161. # kubelet_enforce_node_allocatable: pods
  162. ## Supplementary addresses that can be added in kubernetes ssl keys.
  163. ## That can be useful for example to setup a keepalived virtual IP
  164. # supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
  165. ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
  166. ## See https://github.com/kubernetes-incubator/kubespray/issues/2141
  167. ## Set this variable to true to get rid of this issue
  168. volume_cross_zone_attachment: false