Browse Source

create separate options files for network plugins

remove plugin options from common files
pull/3337/head
Sergey Bondarev 6 years ago
parent
commit
93429bc661
8 changed files with 121 additions and 23 deletions
  1. 7
      inventory/sample/group_vars/all/all.yml
  2. 16
      inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml
  3. 15
      inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml
  4. 11
      inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml
  5. 1
      inventory/sample/group_vars/k8s-cluster/k8s-net-cilium.yml
  6. 20
      inventory/sample/group_vars/k8s-cluster/k8s-net-contiv.yml
  7. 16
      inventory/sample/group_vars/k8s-cluster/k8s-net-flannel.yml
  8. 58
      inventory/sample/group_vars/k8s-cluster/k8s-net-weave.yml

7
inventory/sample/group_vars/all/all.yml

@ -43,13 +43,6 @@ bin_dir: /usr/local/bin
## The subnets of each nodes will be distributed by the datacenter router
#peer_with_router: false
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
## In this case, you may need to peer with an uplink
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
#contiv_peer_with_uplink_leaf: false
#contiv_global_as: "65002"
#contiv_global_neighbor_as: "500"
## Upstream dns servers used by dnsmasq
#upstream_dns_servers:
# - 8.8.8.8

16
inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml

@ -70,22 +70,6 @@ kube_users:
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Weave deployment
# weave_password: ~
# weave_checkpoint_disable: false
# weave_conn_limit: 100
# weave_hairpin_mode: true
# weave_ipalloc_range: {{ kube_pods_subnet }}
# weave_expect_npc: {{ enable_network_policy }}
# weave_kube_peers: ~
# weave_ipalloc_init: ~
# weave_expose_ip: ~
# weave_metrics_addr: ~
# weave_status_addr: ~
# weave_mtu: 1376
# weave_no_masq_local: true
# weave_extra_args: ~
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18

15
inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml

@ -0,0 +1,15 @@
# see roles/network_plugin/calico/defaults/main.yml
# Enables Internet connectivity from containers
# nat_outgoing: true
# add default ippool name
# calico_pool_name: "default-pool"
# Global as_num (/calico/bgp/v1/global/as_num)
# global_as_num: "64512"
# You can set MTU value here. If left undefined or empty, it will
# not be specified in calico CNI config, so Calico will use built-in
# defaults. The value should be a number, not a string.
# calico_mtu: 1500

11
inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml

@ -0,0 +1,11 @@
# see roles/network_plugin/canal/defaults/main.yml
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosing using the node's
# default route.
# canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
# canal_masquerade: "true"

1
inventory/sample/group_vars/k8s-cluster/k8s-net-cilium.yml

@ -0,0 +1 @@
# see roles/network_plugin/cilium/defaults/main.yml

20
inventory/sample/group_vars/k8s-cluster/k8s-net-contiv.yml

@ -0,0 +1,20 @@
# see roles/network_plugin/contiv/defaults/main.yml
# Forwarding mode: bridge or routing
# contiv_fwd_mode: routing
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
## In this case, you may need to peer with an uplink
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
#contiv_peer_with_uplink_leaf: false
#contiv_global_as: "65002"
#contiv_global_neighbor_as: "500"
# Fabric mode: aci, aci-opflex or default
# contiv_fabric_mode: default
# Defaut netmode: vxlan or vlan
# contiv_net_mode: vxlan
# Dataplane interface
# contiv_vlan_interface: ""

16
inventory/sample/group_vars/k8s-cluster/k8s-net-flannel.yml

@ -0,0 +1,16 @@
# see roles/network_plugin/flannel/defaults/main.yml
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item
# flannel_interface:
## Select interface that should be used for flannel operations by regexp on Name or IP
## This is actually an inventory cluster-level item
## example: select interface with ip from net 10.0.0.0/23
## single quote and escape backslashes
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
# for experimental backend
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
# flannel_backend_type: "vxlan"

58
inventory/sample/group_vars/k8s-cluster/k8s-net-weave.yml

@ -0,0 +1,58 @@
# see roles/network_plugin/weave/defaults/main.yml
# Weave's network password for encryption, if null then no network encryption.
# weave_password: ~
# If set to 1, disable checking for new Weave Net versions (default is blank,
# i.e. check is enabled)
# weave_checkpoint_disable: false
# Soft limit on the number of connections between peers. Defaults to 100.
# weave_conn_limit: 100
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
# for containers attached. If you need to disable hairpin, e.g. your kernel is
# one of those that can panic if hairpin is enabled, then you can disable it by
# setting `HAIRPIN_MODE=false`.
# weave_hairpin_mode: true
# The range of IP addresses used by Weave Net and the subnet they are placed in
# (CIDR format; default 10.32.0.0/12)
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
# Set to 0 to disable Network Policy Controller (default is on)
# weave_expect_npc: "{{ enable_network_policy }}"
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
# list from the api-server)
# weave_kube_peers: ~
# Set the initialization mode of the IP Address Manager (defaults to consensus
# amongst the KUBE_PEERS)
# weave_ipalloc_init: ~
# Set the IP address used as a gateway from the Weave network to the host
# network - this is useful if you are configuring the addon as a static pod.
# weave_expose_ip: ~
# Address and port that the Weave Net daemon will serve Prometheus-style
# metrics on (defaults to 0.0.0.0:6782)
# weave_metrics_addr: ~
# Address and port that the Weave Net daemon will serve status requests on
# (defaults to disabled)
# weave_status_addr: ~
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
# underlying network has a tighter limit, or set a larger size for better
# performance if your network supports jumbo frames (e.g. 8916)
# weave_mtu: 1376
# Set to 1 to preserve the client source IP address when accessing Service
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
# only with Weave IPAM (default).
# weave_no_masq_local: true
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
# weave_extra_args: ~
Loading…
Cancel
Save