You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

191 lines
6.8 KiB

contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
7 years ago
  1. # Kubernetes configuration dirs and system namespace.
  2. # Those are where all the additional config stuff goes
  3. # the kubernetes normally puts in /srv/kubernets.
  4. # This puts them in a sane location and namespace.
  5. # Editting those values will almost surely break something.
  6. kube_config_dir: /etc/kubernetes
  7. kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
  8. kube_manifest_dir: "{{ kube_config_dir }}/manifests"
  9. system_namespace: kube-system
  10. # This is where all the cert scripts and certs will be located
  11. kube_cert_dir: "{{ kube_config_dir }}/ssl"
  12. # This is where all of the bearer tokens will be stored
  13. kube_token_dir: "{{ kube_config_dir }}/tokens"
  14. # This is where to save basic auth file
  15. kube_users_dir: "{{ kube_config_dir }}/users"
  16. kube_api_anonymous_auth: true
  17. ## Change this to use another Kubernetes version, e.g. a current beta release
  18. kube_version: v1.9.1
  19. # Where the binaries will be downloaded.
  20. # Note: ensure that you've enough disk space (about 1G)
  21. local_release_dir: "/tmp/releases"
  22. # Random shifts for retrying failed ops like pushing/downloading
  23. retry_stagger: 5
  24. # This is the group that the cert creation scripts chgrp the
  25. # cert files to. Not really changable...
  26. kube_cert_group: kube-cert
  27. # Cluster Loglevel configuration
  28. kube_log_level: 2
  29. # Users to create for basic auth in Kubernetes API via HTTP
  30. # Optionally add groups for user
  31. kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
  32. kube_users:
  33. kube:
  34. pass: "{{kube_api_pwd}}"
  35. role: admin
  36. groups:
  37. - system:masters
  38. ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
  39. #kube_oidc_auth: false
  40. #kube_basic_auth: false
  41. #kube_token_auth: false
  42. ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
  43. ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
  44. # kube_oidc_url: https:// ...
  45. # kube_oidc_client_id: kubernetes
  46. ## Optional settings for OIDC
  47. # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
  48. # kube_oidc_username_claim: sub
  49. # kube_oidc_groups_claim: groups
  50. # Choose network plugin (calico, contiv, weave or flannel)
  51. # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
  52. kube_network_plugin: calico
  53. # weave's network password for encryption
  54. # if null then no network encryption
  55. # you can use --extra-vars to pass the password in command line
  56. weave_password: EnterPasswordHere
  57. # Weave uses consensus mode by default
  58. # Enabling seed mode allow to dynamically add or remove hosts
  59. # https://www.weave.works/docs/net/latest/ipam/
  60. weave_mode_seed: false
  61. # This two variable are automatically changed by the weave's role, do not manually change these values
  62. # To reset values :
  63. # weave_seed: uninitialized
  64. # weave_peers: uninitialized
  65. weave_seed: uninitialized
  66. weave_peers: uninitialized
  67. # Enable kubernetes network policies
  68. enable_network_policy: false
  69. # Kubernetes internal network for services, unused block of space.
  70. kube_service_addresses: 10.233.0.0/18
  71. # internal network. When used, it will assign IP
  72. # addresses from this range to individual pods.
  73. # This network must be unused in your network infrastructure!
  74. kube_pods_subnet: 10.233.64.0/18
  75. # internal network node size allocation (optional). This is the size allocated
  76. # to each node on your network. With these defaults you should have
  77. # room for 4096 nodes with 254 pods per node.
  78. kube_network_node_prefix: 24
  79. # The port the API Server will be listening on.
  80. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
  81. kube_apiserver_port: 6443 # (https)
  82. kube_apiserver_insecure_port: 8080 # (http)
  83. # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
  84. #kube_apiserver_insecure_port: 0 # (disabled)
  85. # DNS configuration.
  86. # Kubernetes cluster name, also will be used as DNS domain
  87. cluster_name: cluster.local
  88. # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
  89. ndots: 2
  90. # Can be dnsmasq_kubedns, kubedns, manual or none
  91. dns_mode: kubedns
  92. # Set manual server if using a custom cluster DNS server
  93. #manual_dns_server: 10.x.x.x
  94. # Can be docker_dns, host_resolvconf or none
  95. resolvconf_mode: docker_dns
  96. # Deploy netchecker app to verify DNS resolve as an HTTP service
  97. deploy_netchecker: false
  98. # Ip address of the kubernetes skydns service
  99. skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
  100. dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
  101. dns_domain: "{{ cluster_name }}"
  102. # Path used to store Docker data
  103. docker_daemon_graph: "/var/lib/docker"
  104. ## A string of extra options to pass to the docker daemon.
  105. ## This string should be exactly as you wish it to appear.
  106. ## An obvious use case is allowing insecure-registry access
  107. ## to self hosted registries like so:
  108. docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
  109. docker_bin_dir: "/usr/bin"
  110. # Settings for containerized control plane (etcd/kubelet/secrets)
  111. etcd_deployment_type: docker
  112. kubelet_deployment_type: host
  113. vault_deployment_type: docker
  114. helm_deployment_type: host
  115. # K8s image pull policy (imagePullPolicy)
  116. k8s_image_pull_policy: IfNotPresent
  117. # Kubernetes dashboard
  118. # RBAC required. see docs/getting-started.md for access details.
  119. dashboard_enabled: true
  120. # Monitoring apps for k8s
  121. efk_enabled: false
  122. # Helm deployment
  123. helm_enabled: false
  124. # Istio deployment
  125. istio_enabled: false
  126. # Local volume provisioner deployment
  127. local_volumes_enabled: false
  128. # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
  129. persistent_volumes_enabled: false
  130. # Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
  131. # kubeconfig_localhost: false
  132. # Download kubectl onto the host that runs Ansible in GITDIR/artifacts
  133. # kubectl_localhost: false
  134. # dnsmasq
  135. # dnsmasq_upstream_dns_servers:
  136. # - /resolvethiszone.with/10.0.4.250
  137. # - 8.8.8.8
  138. # Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
  139. # kubelet_cgroups_per_qos: true
  140. # A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
  141. # Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
  142. # kubelet_enforce_node_allocatable: pods
  143. ## Supplementary addresses that can be added in kubernetes ssl keys.
  144. ## That can be useful for example to setup a keepalived virtual IP
  145. # supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
  146. ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
  147. ## See https://github.com/kubernetes-incubator/kubespray/issues/2141
  148. ## Set this variable to true to get rid of this issue
  149. volume_cross_zone_attachment: false