k8s-sig-cluster-lifecycleawskubesprayhigh-availabilityansiblekubernetes-clustergcekubernetesbare-metal
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1763 lines
53 KiB
1763 lines
53 KiB
---
|
|
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: "cilium"
|
|
namespace: kube-system
|
|
---
|
|
# Source: cilium/templates/cilium-envoy/serviceaccount.yaml
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: "cilium-envoy"
|
|
namespace: kube-system
|
|
---
|
|
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: "cilium-operator"
|
|
namespace: kube-system
|
|
---
|
|
# Source: cilium/templates/cilium-configmap.yaml
|
|
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: cilium-config
|
|
namespace: kube-system
|
|
data:
|
|
|
|
# Identity allocation mode selects how identities are shared between cilium
|
|
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
|
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
|
# These can be queried with:
|
|
# kubectl get ciliumid
|
|
# - "kvstore" stores identities in an etcd kvstore, that is
|
|
# configured below. Cilium versions before 1.6 supported only the kvstore
|
|
# backend. Upgrades from these older cilium versions should continue using
|
|
# the kvstore by commenting out the identity-allocation-mode below, or
|
|
# setting it to "kvstore".
|
|
identity-allocation-mode: crd
|
|
identity-heartbeat-timeout: "30m0s"
|
|
identity-gc-interval: "15m0s"
|
|
cilium-endpoint-gc-interval: "5m0s"
|
|
nodes-gc-interval: "5m0s"
|
|
|
|
# If you want to run cilium in debug mode change this value to true
|
|
debug: "false"
|
|
debug-verbose: ""
|
|
# The agent can be put into the following three policy enforcement modes
|
|
# default, always and never.
|
|
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
|
|
enable-policy: "default"
|
|
policy-cidr-match-mode: ""
|
|
# If you want metrics enabled in cilium-operator, set the port for
|
|
# which the Cilium Operator will have their metrics exposed.
|
|
# NOTE that this will open the port on the nodes where Cilium operator pod
|
|
# is scheduled.
|
|
operator-prometheus-serve-addr: ":9963"
|
|
enable-metrics: "true"
|
|
|
|
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
|
|
# address.
|
|
enable-ipv4: "true"
|
|
|
|
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
|
|
# address.
|
|
enable-ipv6: "false"
|
|
# Users who wish to specify their own custom CNI configuration file must set
|
|
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
|
|
custom-cni-conf: "false"
|
|
enable-bpf-clock-probe: "false"
|
|
# If you want cilium monitor to aggregate tracing for packets, set this level
|
|
# to "low", "medium", or "maximum". The higher the level, the less packets
|
|
# that will be seen in monitor output.
|
|
monitor-aggregation: medium
|
|
|
|
# The monitor aggregation interval governs the typical time between monitor
|
|
# notification events for each allowed connection.
|
|
#
|
|
# Only effective when monitor aggregation is set to "medium" or higher.
|
|
monitor-aggregation-interval: "5s"
|
|
|
|
# The monitor aggregation flags determine which TCP flags which, upon the
|
|
# first observation, cause monitor notifications to be generated.
|
|
#
|
|
# Only effective when monitor aggregation is set to "medium" or higher.
|
|
monitor-aggregation-flags: all
|
|
# Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
|
|
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
|
bpf-map-dynamic-size-ratio: "0.0025"
|
|
# bpf-policy-map-max specifies the maximum number of entries in endpoint
|
|
# policy map (per endpoint)
|
|
bpf-policy-map-max: "16384"
|
|
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
|
|
# backend and affinity maps.
|
|
bpf-lb-map-max: "65536"
|
|
bpf-lb-external-clusterip: "false"
|
|
|
|
bpf-events-drop-enabled: "true"
|
|
bpf-events-policy-verdict-enabled: "true"
|
|
bpf-events-trace-enabled: "true"
|
|
|
|
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
|
# the expense of up-front memory allocation for the entries in the maps. The
|
|
# default value below will minimize memory usage in the default installation;
|
|
# users who are sensitive to latency may consider setting this to "true".
|
|
#
|
|
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
|
# this option and behave as though it is set to "true".
|
|
#
|
|
# If this value is modified, then during the next Cilium startup the restore
|
|
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
|
# As a result, reply packets may be dropped and the load-balancing decisions
|
|
# for established connections may change.
|
|
#
|
|
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
|
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
|
preallocate-bpf-maps: "false"
|
|
|
|
# Name of the cluster. Only relevant when building a mesh of clusters.
|
|
cluster-name: default
|
|
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
|
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
|
cluster-id: "0"
|
|
|
|
# Encapsulation mode for communication between nodes
|
|
# Possible values:
|
|
# - disabled
|
|
# - vxlan (default)
|
|
# - geneve
|
|
# Default case
|
|
routing-mode: "tunnel"
|
|
tunnel-protocol: "vxlan"
|
|
service-no-backend-response: "reject"
|
|
|
|
|
|
# Enables L7 proxy for L7 policy enforcement and visibility
|
|
enable-l7-proxy: "true"
|
|
|
|
enable-ipv4-masquerade: "true"
|
|
enable-ipv4-big-tcp: "false"
|
|
enable-ipv6-big-tcp: "false"
|
|
enable-ipv6-masquerade: "true"
|
|
enable-tcx: "true"
|
|
datapath-mode: "veth"
|
|
enable-masquerade-to-route-source: "false"
|
|
|
|
enable-xt-socket-fallback: "true"
|
|
install-no-conntrack-iptables-rules: "false"
|
|
|
|
auto-direct-node-routes: "false"
|
|
direct-routing-skip-unreachable: "false"
|
|
enable-local-redirect-policy: "false"
|
|
enable-runtime-device-detection: "true"
|
|
|
|
kube-proxy-replacement: "false"
|
|
kube-proxy-replacement-healthz-bind-address: ""
|
|
bpf-lb-sock: "false"
|
|
bpf-lb-sock-terminate-pod-connections: "false"
|
|
enable-host-port: "false"
|
|
enable-external-ips: "false"
|
|
enable-node-port: "false"
|
|
nodeport-addresses: ""
|
|
enable-health-check-nodeport: "true"
|
|
enable-health-check-loadbalancer-ip: "false"
|
|
node-port-bind-protection: "true"
|
|
enable-auto-protect-node-port-range: "true"
|
|
bpf-lb-acceleration: "disabled"
|
|
enable-svc-source-range-check: "true"
|
|
enable-l2-neigh-discovery: "true"
|
|
arping-refresh-period: "30s"
|
|
k8s-require-ipv4-pod-cidr: "false"
|
|
k8s-require-ipv6-pod-cidr: "false"
|
|
enable-k8s-networkpolicy: "true"
|
|
# Tell the agent to generate and write a CNI configuration file
|
|
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
|
|
cni-exclusive: "true"
|
|
cni-log-file: "/var/run/cilium/cilium-cni.log"
|
|
enable-endpoint-health-checking: "true"
|
|
enable-health-checking: "true"
|
|
enable-well-known-identities: "false"
|
|
enable-node-selector-labels: "false"
|
|
synchronize-k8s-nodes: "true"
|
|
operator-api-serve-addr: "127.0.0.1:9234"
|
|
ipam: "cluster-pool"
|
|
ipam-cilium-node-update-rate: "15s"
|
|
cluster-pool-ipv4-cidr: "{{ kube_pods_subnet }}"
|
|
cluster-pool-ipv4-mask-size: "24"
|
|
egress-gateway-reconciliation-trigger-interval: "1s"
|
|
enable-vtep: "false"
|
|
vtep-endpoint: ""
|
|
vtep-cidr: ""
|
|
vtep-mask: ""
|
|
vtep-mac: ""
|
|
procfs: "/host/proc"
|
|
bpf-root: "/sys/fs/bpf"
|
|
cgroup-root: "/run/cilium/cgroupv2"
|
|
enable-k8s-terminating-endpoint: "true"
|
|
enable-sctp: "false"
|
|
|
|
k8s-client-qps: "10"
|
|
k8s-client-burst: "20"
|
|
remove-cilium-node-taints: "true"
|
|
set-cilium-node-taints: "true"
|
|
set-cilium-is-up-condition: "true"
|
|
unmanaged-pod-watcher-interval: "15"
|
|
# default DNS proxy to transparent mode in non-chaining modes
|
|
dnsproxy-enable-transparent-mode: "true"
|
|
dnsproxy-socket-linger-timeout: "10"
|
|
tofqdns-dns-reject-response-code: "refused"
|
|
tofqdns-enable-dns-compression: "true"
|
|
tofqdns-endpoint-max-ip-per-hostname: "50"
|
|
tofqdns-idle-connection-grace-period: "0s"
|
|
tofqdns-max-deferred-connection-deletes: "10000"
|
|
tofqdns-proxy-response-max-delay: "100ms"
|
|
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
|
|
|
|
mesh-auth-enabled: "true"
|
|
mesh-auth-queue-size: "1024"
|
|
mesh-auth-rotated-identities-queue-size: "1024"
|
|
mesh-auth-gc-interval: "5m0s"
|
|
|
|
proxy-xff-num-trusted-hops-ingress: "0"
|
|
proxy-xff-num-trusted-hops-egress: "0"
|
|
proxy-connect-timeout: "2"
|
|
proxy-max-requests-per-connection: "0"
|
|
proxy-max-connection-duration-seconds: "0"
|
|
proxy-idle-timeout-seconds: "60"
|
|
|
|
external-envoy-proxy: "true"
|
|
envoy-base-id: "0"
|
|
|
|
envoy-keep-cap-netbindservice: "false"
|
|
max-connected-clusters: "255"
|
|
clustermesh-enable-endpoint-sync: "false"
|
|
clustermesh-enable-mcs-api: "false"
|
|
|
|
nat-map-stats-entries: "32"
|
|
nat-map-stats-interval: "30s"
|
|
|
|
# Extra config allows adding arbitrary properties to the cilium config.
|
|
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.
|
|
---
|
|
# Source: cilium/templates/cilium-envoy/configmap.yaml
|
|
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: cilium-envoy-config
|
|
namespace: kube-system
|
|
data:
|
|
bootstrap-config.json: |
|
|
{
|
|
"node": {
|
|
"id": "host~127.0.0.1~no-id~localdomain",
|
|
"cluster": "ingress-cluster"
|
|
},
|
|
"staticResources": {
|
|
"listeners": [
|
|
{
|
|
"name": "envoy-prometheus-metrics-listener",
|
|
"address": {
|
|
"socket_address": {
|
|
"address": "0.0.0.0",
|
|
"port_value": 9964
|
|
}
|
|
},
|
|
"filter_chains": [
|
|
{
|
|
"filters": [
|
|
{
|
|
"name": "envoy.filters.network.http_connection_manager",
|
|
"typed_config": {
|
|
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
|
|
"stat_prefix": "envoy-prometheus-metrics-listener",
|
|
"route_config": {
|
|
"virtual_hosts": [
|
|
{
|
|
"name": "prometheus_metrics_route",
|
|
"domains": [
|
|
"*"
|
|
],
|
|
"routes": [
|
|
{
|
|
"name": "prometheus_metrics_route",
|
|
"match": {
|
|
"prefix": "/metrics"
|
|
},
|
|
"route": {
|
|
"cluster": "/envoy-admin",
|
|
"prefix_rewrite": "/stats/prometheus"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
]
|
|
},
|
|
"http_filters": [
|
|
{
|
|
"name": "envoy.filters.http.router",
|
|
"typed_config": {
|
|
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
|
}
|
|
}
|
|
],
|
|
"stream_idle_timeout": "0s"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"name": "envoy-health-listener",
|
|
"address": {
|
|
"socket_address": {
|
|
"address": "127.0.0.1",
|
|
"port_value": 9878
|
|
}
|
|
},
|
|
"filter_chains": [
|
|
{
|
|
"filters": [
|
|
{
|
|
"name": "envoy.filters.network.http_connection_manager",
|
|
"typed_config": {
|
|
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
|
|
"stat_prefix": "envoy-health-listener",
|
|
"route_config": {
|
|
"virtual_hosts": [
|
|
{
|
|
"name": "health",
|
|
"domains": [
|
|
"*"
|
|
],
|
|
"routes": [
|
|
{
|
|
"name": "health",
|
|
"match": {
|
|
"prefix": "/healthz"
|
|
},
|
|
"route": {
|
|
"cluster": "/envoy-admin",
|
|
"prefix_rewrite": "/ready"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
]
|
|
},
|
|
"http_filters": [
|
|
{
|
|
"name": "envoy.filters.http.router",
|
|
"typed_config": {
|
|
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
|
}
|
|
}
|
|
],
|
|
"stream_idle_timeout": "0s"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"clusters": [
|
|
{
|
|
"name": "ingress-cluster",
|
|
"type": "ORIGINAL_DST",
|
|
"connectTimeout": "2s",
|
|
"lbPolicy": "CLUSTER_PROVIDED",
|
|
"typedExtensionProtocolOptions": {
|
|
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
|
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
|
"commonHttpProtocolOptions": {
|
|
"idleTimeout": "60s",
|
|
"maxConnectionDuration": "0s",
|
|
"maxRequestsPerConnection": 0
|
|
},
|
|
"useDownstreamProtocolConfig": {}
|
|
}
|
|
},
|
|
"cleanupInterval": "2.500s"
|
|
},
|
|
{
|
|
"name": "egress-cluster-tls",
|
|
"type": "ORIGINAL_DST",
|
|
"connectTimeout": "2s",
|
|
"lbPolicy": "CLUSTER_PROVIDED",
|
|
"typedExtensionProtocolOptions": {
|
|
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
|
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
|
"commonHttpProtocolOptions": {
|
|
"idleTimeout": "60s",
|
|
"maxConnectionDuration": "0s",
|
|
"maxRequestsPerConnection": 0
|
|
},
|
|
"upstreamHttpProtocolOptions": {},
|
|
"useDownstreamProtocolConfig": {}
|
|
}
|
|
},
|
|
"cleanupInterval": "2.500s",
|
|
"transportSocket": {
|
|
"name": "cilium.tls_wrapper",
|
|
"typedConfig": {
|
|
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"name": "egress-cluster",
|
|
"type": "ORIGINAL_DST",
|
|
"connectTimeout": "2s",
|
|
"lbPolicy": "CLUSTER_PROVIDED",
|
|
"typedExtensionProtocolOptions": {
|
|
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
|
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
|
"commonHttpProtocolOptions": {
|
|
"idleTimeout": "60s",
|
|
"maxConnectionDuration": "0s",
|
|
"maxRequestsPerConnection": 0
|
|
},
|
|
"useDownstreamProtocolConfig": {}
|
|
}
|
|
},
|
|
"cleanupInterval": "2.500s"
|
|
},
|
|
{
|
|
"name": "ingress-cluster-tls",
|
|
"type": "ORIGINAL_DST",
|
|
"connectTimeout": "2s",
|
|
"lbPolicy": "CLUSTER_PROVIDED",
|
|
"typedExtensionProtocolOptions": {
|
|
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
|
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
|
"commonHttpProtocolOptions": {
|
|
"idleTimeout": "60s",
|
|
"maxConnectionDuration": "0s",
|
|
"maxRequestsPerConnection": 0
|
|
},
|
|
"upstreamHttpProtocolOptions": {},
|
|
"useDownstreamProtocolConfig": {}
|
|
}
|
|
},
|
|
"cleanupInterval": "2.500s",
|
|
"transportSocket": {
|
|
"name": "cilium.tls_wrapper",
|
|
"typedConfig": {
|
|
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"name": "xds-grpc-cilium",
|
|
"type": "STATIC",
|
|
"connectTimeout": "2s",
|
|
"loadAssignment": {
|
|
"clusterName": "xds-grpc-cilium",
|
|
"endpoints": [
|
|
{
|
|
"lbEndpoints": [
|
|
{
|
|
"endpoint": {
|
|
"address": {
|
|
"pipe": {
|
|
"path": "/var/run/cilium/envoy/sockets/xds.sock"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
]
|
|
}
|
|
]
|
|
},
|
|
"typedExtensionProtocolOptions": {
|
|
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
|
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
|
"explicitHttpConfig": {
|
|
"http2ProtocolOptions": {}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"name": "/envoy-admin",
|
|
"type": "STATIC",
|
|
"connectTimeout": "2s",
|
|
"loadAssignment": {
|
|
"clusterName": "/envoy-admin",
|
|
"endpoints": [
|
|
{
|
|
"lbEndpoints": [
|
|
{
|
|
"endpoint": {
|
|
"address": {
|
|
"pipe": {
|
|
"path": "/var/run/cilium/envoy/sockets/admin.sock"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
]
|
|
}
|
|
]
|
|
}
|
|
}
|
|
]
|
|
},
|
|
"dynamicResources": {
|
|
"ldsConfig": {
|
|
"apiConfigSource": {
|
|
"apiType": "GRPC",
|
|
"transportApiVersion": "V3",
|
|
"grpcServices": [
|
|
{
|
|
"envoyGrpc": {
|
|
"clusterName": "xds-grpc-cilium"
|
|
}
|
|
}
|
|
],
|
|
"setNodeOnFirstMessageOnly": true
|
|
},
|
|
"resourceApiVersion": "V3"
|
|
},
|
|
"cdsConfig": {
|
|
"apiConfigSource": {
|
|
"apiType": "GRPC",
|
|
"transportApiVersion": "V3",
|
|
"grpcServices": [
|
|
{
|
|
"envoyGrpc": {
|
|
"clusterName": "xds-grpc-cilium"
|
|
}
|
|
}
|
|
],
|
|
"setNodeOnFirstMessageOnly": true
|
|
},
|
|
"resourceApiVersion": "V3"
|
|
}
|
|
},
|
|
"bootstrapExtensions": [
|
|
{
|
|
"name": "envoy.bootstrap.internal_listener",
|
|
"typed_config": {
|
|
"@type": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"
|
|
}
|
|
}
|
|
],
|
|
"layeredRuntime": {
|
|
"layers": [
|
|
{
|
|
"name": "static_layer_0",
|
|
"staticLayer": {
|
|
"overload": {
|
|
"global_downstream_max_connections": 50000
|
|
}
|
|
}
|
|
}
|
|
]
|
|
},
|
|
"admin": {
|
|
"address": {
|
|
"pipe": {
|
|
"path": "/var/run/cilium/envoy/sockets/admin.sock"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
---
|
|
# Source: cilium/templates/cilium-agent/clusterrole.yaml
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: ClusterRole
|
|
metadata:
|
|
name: cilium
|
|
labels:
|
|
app.kubernetes.io/part-of: cilium
|
|
rules:
|
|
- apiGroups:
|
|
- networking.k8s.io
|
|
resources:
|
|
- networkpolicies
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- discovery.k8s.io
|
|
resources:
|
|
- endpointslices
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- namespaces
|
|
- services
|
|
- pods
|
|
- endpoints
|
|
- nodes
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- apiextensions.k8s.io
|
|
resources:
|
|
- customresourcedefinitions
|
|
verbs:
|
|
- list
|
|
- watch
|
|
# This is used when validating policies in preflight. This will need to stay
|
|
# until we figure out how to avoid "get" inside the preflight, and then
|
|
# should be removed ideally.
|
|
- get
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumloadbalancerippools
|
|
- ciliumbgppeeringpolicies
|
|
- ciliumbgpnodeconfigs
|
|
- ciliumbgpadvertisements
|
|
- ciliumbgppeerconfigs
|
|
- ciliumclusterwideenvoyconfigs
|
|
- ciliumclusterwidenetworkpolicies
|
|
- ciliumegressgatewaypolicies
|
|
- ciliumendpoints
|
|
- ciliumendpointslices
|
|
- ciliumenvoyconfigs
|
|
- ciliumidentities
|
|
- ciliumlocalredirectpolicies
|
|
- ciliumnetworkpolicies
|
|
- ciliumnodes
|
|
- ciliumnodeconfigs
|
|
- ciliumcidrgroups
|
|
- ciliuml2announcementpolicies
|
|
- ciliumpodippools
|
|
verbs:
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumidentities
|
|
- ciliumendpoints
|
|
- ciliumnodes
|
|
verbs:
|
|
- create
|
|
- apiGroups:
|
|
- cilium.io
|
|
# To synchronize garbage collection of such resources
|
|
resources:
|
|
- ciliumidentities
|
|
verbs:
|
|
- update
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumendpoints
|
|
verbs:
|
|
- delete
|
|
- get
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumnodes
|
|
- ciliumnodes/status
|
|
verbs:
|
|
- get
|
|
- update
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumendpoints/status
|
|
- ciliumendpoints
|
|
- ciliuml2announcementpolicies/status
|
|
- ciliumbgpnodeconfigs/status
|
|
verbs:
|
|
- patch
|
|
---
|
|
# Source: cilium/templates/cilium-operator/clusterrole.yaml
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: ClusterRole
|
|
metadata:
|
|
name: cilium-operator
|
|
labels:
|
|
app.kubernetes.io/part-of: cilium
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- pods
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
# to automatically delete [core|kube]dns pods so that are starting to being
|
|
# managed by Cilium
|
|
- delete
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- configmaps
|
|
resourceNames:
|
|
- cilium-config
|
|
verbs:
|
|
# allow patching of the configmap to set annotations
|
|
- patch
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- nodes
|
|
verbs:
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
# To remove node taints
|
|
- nodes
|
|
# To set NetworkUnavailable false on startup
|
|
- nodes/status
|
|
verbs:
|
|
- patch
|
|
- apiGroups:
|
|
- discovery.k8s.io
|
|
resources:
|
|
- endpointslices
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
# to perform LB IP allocation for BGP
|
|
- services/status
|
|
verbs:
|
|
- update
|
|
- patch
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
# to check apiserver connectivity
|
|
- namespaces
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
|
|
- services
|
|
- endpoints
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumnetworkpolicies
|
|
- ciliumclusterwidenetworkpolicies
|
|
verbs:
|
|
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
|
|
- create
|
|
- update
|
|
- deletecollection
|
|
# To update the status of the CNPs and CCNPs
|
|
- patch
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumnetworkpolicies/status
|
|
- ciliumclusterwidenetworkpolicies/status
|
|
verbs:
|
|
# Update the auto-generated CNPs and CCNPs status.
|
|
- patch
|
|
- update
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumendpoints
|
|
- ciliumidentities
|
|
verbs:
|
|
# To perform garbage collection of such resources
|
|
- delete
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumidentities
|
|
verbs:
|
|
# To synchronize garbage collection of such resources
|
|
- update
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumnodes
|
|
verbs:
|
|
- create
|
|
- update
|
|
- get
|
|
- list
|
|
- watch
|
|
# To perform CiliumNode garbage collector
|
|
- delete
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumnodes/status
|
|
verbs:
|
|
- update
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumendpointslices
|
|
- ciliumenvoyconfigs
|
|
- ciliumbgppeerconfigs
|
|
- ciliumbgpadvertisements
|
|
- ciliumbgpnodeconfigs
|
|
verbs:
|
|
- create
|
|
- update
|
|
- get
|
|
- list
|
|
- watch
|
|
- delete
|
|
- patch
|
|
- apiGroups:
|
|
- apiextensions.k8s.io
|
|
resources:
|
|
- customresourcedefinitions
|
|
verbs:
|
|
- create
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- apiextensions.k8s.io
|
|
resources:
|
|
- customresourcedefinitions
|
|
verbs:
|
|
- update
|
|
resourceNames:
|
|
- ciliumloadbalancerippools.cilium.io
|
|
- ciliumbgppeeringpolicies.cilium.io
|
|
- ciliumbgpclusterconfigs.cilium.io
|
|
- ciliumbgppeerconfigs.cilium.io
|
|
- ciliumbgpadvertisements.cilium.io
|
|
- ciliumbgpnodeconfigs.cilium.io
|
|
- ciliumbgpnodeconfigoverrides.cilium.io
|
|
- ciliumclusterwideenvoyconfigs.cilium.io
|
|
- ciliumclusterwidenetworkpolicies.cilium.io
|
|
- ciliumegressgatewaypolicies.cilium.io
|
|
- ciliumendpoints.cilium.io
|
|
- ciliumendpointslices.cilium.io
|
|
- ciliumenvoyconfigs.cilium.io
|
|
- ciliumexternalworkloads.cilium.io
|
|
- ciliumidentities.cilium.io
|
|
- ciliumlocalredirectpolicies.cilium.io
|
|
- ciliumnetworkpolicies.cilium.io
|
|
- ciliumnodes.cilium.io
|
|
- ciliumnodeconfigs.cilium.io
|
|
- ciliumcidrgroups.cilium.io
|
|
- ciliuml2announcementpolicies.cilium.io
|
|
- ciliumpodippools.cilium.io
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumloadbalancerippools
|
|
- ciliumpodippools
|
|
- ciliumbgppeeringpolicies
|
|
- ciliumbgpclusterconfigs
|
|
- ciliumbgpnodeconfigoverrides
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumpodippools
|
|
verbs:
|
|
- create
|
|
- apiGroups:
|
|
- cilium.io
|
|
resources:
|
|
- ciliumloadbalancerippools/status
|
|
verbs:
|
|
- patch
|
|
# For cilium-operator running in HA mode.
|
|
#
|
|
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
|
|
# between multiple running instances.
|
|
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
|
|
# common and fewer objects in the cluster watch "all Leases".
|
|
- apiGroups:
|
|
- coordination.k8s.io
|
|
resources:
|
|
- leases
|
|
verbs:
|
|
- create
|
|
- get
|
|
- update
|
|
---
|
|
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: ClusterRoleBinding
|
|
metadata:
|
|
name: cilium
|
|
labels:
|
|
app.kubernetes.io/part-of: cilium
|
|
roleRef:
|
|
apiGroup: rbac.authorization.k8s.io
|
|
kind: ClusterRole
|
|
name: cilium
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: "cilium"
|
|
namespace: kube-system
|
|
---
|
|
# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: ClusterRoleBinding
|
|
metadata:
|
|
name: cilium-operator
|
|
labels:
|
|
app.kubernetes.io/part-of: cilium
|
|
roleRef:
|
|
apiGroup: rbac.authorization.k8s.io
|
|
kind: ClusterRole
|
|
name: cilium-operator
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: "cilium-operator"
|
|
namespace: kube-system
|
|
---
|
|
# Source: cilium/templates/cilium-agent/role.yaml
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: Role
|
|
metadata:
|
|
name: cilium-config-agent
|
|
namespace: kube-system
|
|
labels:
|
|
app.kubernetes.io/part-of: cilium
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- configmaps
|
|
verbs:
|
|
- get
|
|
- list
|
|
- watch
|
|
---
|
|
# Source: cilium/templates/cilium-agent/rolebinding.yaml
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: RoleBinding
|
|
metadata:
|
|
name: cilium-config-agent
|
|
namespace: kube-system
|
|
labels:
|
|
app.kubernetes.io/part-of: cilium
|
|
roleRef:
|
|
apiGroup: rbac.authorization.k8s.io
|
|
kind: Role
|
|
name: cilium-config-agent
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: "cilium"
|
|
namespace: kube-system
|
|
---
|
|
# Source: cilium/templates/cilium-envoy/service.yaml
|
|
apiVersion: v1
|
|
kind: Service
|
|
metadata:
|
|
name: cilium-envoy
|
|
namespace: kube-system
|
|
annotations:
|
|
prometheus.io/scrape: "true"
|
|
prometheus.io/port: "9964"
|
|
labels:
|
|
k8s-app: cilium-envoy
|
|
app.kubernetes.io/name: cilium-envoy
|
|
app.kubernetes.io/part-of: cilium
|
|
io.cilium/app: proxy
|
|
spec:
|
|
clusterIP: None
|
|
type: ClusterIP
|
|
selector:
|
|
k8s-app: cilium-envoy
|
|
ports:
|
|
- name: envoy-metrics
|
|
port: 9964
|
|
protocol: TCP
|
|
targetPort: envoy-metrics
|
|
---
|
|
# Source: cilium/templates/cilium-agent/daemonset.yaml
|
|
apiVersion: apps/v1
|
|
kind: DaemonSet
|
|
metadata:
|
|
name: cilium
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: cilium
|
|
app.kubernetes.io/part-of: cilium
|
|
app.kubernetes.io/name: cilium-agent
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: cilium
|
|
updateStrategy:
|
|
rollingUpdate:
|
|
maxUnavailable: 2
|
|
type: RollingUpdate
|
|
template:
|
|
metadata:
|
|
annotations:
|
|
labels:
|
|
k8s-app: cilium
|
|
app.kubernetes.io/name: cilium-agent
|
|
app.kubernetes.io/part-of: cilium
|
|
spec:
|
|
securityContext:
|
|
appArmorProfile:
|
|
type: Unconfined
|
|
containers:
|
|
- name: cilium-agent
|
|
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
|
|
imagePullPolicy: IfNotPresent
|
|
command:
|
|
- cilium-agent
|
|
args:
|
|
- --config-dir=/tmp/cilium/config-map
|
|
startupProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9879
|
|
scheme: HTTP
|
|
httpHeaders:
|
|
- name: "brief"
|
|
value: "true"
|
|
failureThreshold: 105
|
|
periodSeconds: 2
|
|
successThreshold: 1
|
|
initialDelaySeconds: 5
|
|
livenessProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9879
|
|
scheme: HTTP
|
|
httpHeaders:
|
|
- name: "brief"
|
|
value: "true"
|
|
periodSeconds: 30
|
|
successThreshold: 1
|
|
failureThreshold: 10
|
|
timeoutSeconds: 5
|
|
readinessProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9879
|
|
scheme: HTTP
|
|
httpHeaders:
|
|
- name: "brief"
|
|
value: "true"
|
|
periodSeconds: 30
|
|
successThreshold: 1
|
|
failureThreshold: 3
|
|
timeoutSeconds: 5
|
|
env:
|
|
- name: K8S_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: spec.nodeName
|
|
- name: CILIUM_K8S_NAMESPACE
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: metadata.namespace
|
|
- name: CILIUM_CLUSTERMESH_CONFIG
|
|
value: /var/lib/cilium/clustermesh/
|
|
- name: GOMEMLIMIT
|
|
valueFrom:
|
|
resourceFieldRef:
|
|
resource: limits.memory
|
|
divisor: '1'
|
|
lifecycle:
|
|
postStart:
|
|
exec:
|
|
command:
|
|
- "bash"
|
|
- "-c"
|
|
- |
|
|
set -o errexit
|
|
set -o pipefail
|
|
set -o nounset
|
|
|
|
# When running in AWS ENI mode, it's likely that 'aws-node' has
|
|
# had a chance to install SNAT iptables rules. These can result
|
|
# in dropped traffic, so we should attempt to remove them.
|
|
# We do it using a 'postStart' hook since this may need to run
|
|
# for nodes which might have already been init'ed but may still
|
|
# have dangling rules. This is safe because there are no
|
|
# dependencies on anything that is part of the startup script
|
|
# itself, and can be safely run multiple times per node (e.g. in
|
|
# case of a restart).
|
|
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
|
|
then
|
|
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
|
|
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
|
|
fi
|
|
echo 'Done!'
|
|
|
|
preStop:
|
|
exec:
|
|
command:
|
|
- /cni-uninstall.sh
|
|
securityContext:
|
|
seLinuxOptions:
|
|
level: s0
|
|
type: spc_t
|
|
capabilities:
|
|
add:
|
|
- CHOWN
|
|
- KILL
|
|
- NET_ADMIN
|
|
- NET_RAW
|
|
- IPC_LOCK
|
|
- SYS_MODULE
|
|
- SYS_ADMIN
|
|
- SYS_RESOURCE
|
|
- DAC_OVERRIDE
|
|
- FOWNER
|
|
- SETGID
|
|
- SETUID
|
|
drop:
|
|
- ALL
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
volumeMounts:
|
|
- name: envoy-sockets
|
|
mountPath: /var/run/cilium/envoy/sockets
|
|
readOnly: false
|
|
# Unprivileged containers need to mount /proc/sys/net from the host
|
|
# to have write access
|
|
- mountPath: /host/proc/sys/net
|
|
name: host-proc-sys-net
|
|
# Unprivileged containers need to mount /proc/sys/kernel from the host
|
|
# to have write access
|
|
- mountPath: /host/proc/sys/kernel
|
|
name: host-proc-sys-kernel
|
|
- name: bpf-maps
|
|
mountPath: /sys/fs/bpf
|
|
# Unprivileged containers can't set mount propagation to bidirectional
|
|
# in this case we will mount the bpf fs from an init container that
|
|
# is privileged and set the mount propagation from host to container
|
|
# in Cilium.
|
|
mountPropagation: HostToContainer
|
|
- name: cilium-run
|
|
mountPath: /var/run/cilium
|
|
- name: etc-cni-netd
|
|
mountPath: /host/etc/cni/net.d
|
|
- name: clustermesh-secrets
|
|
mountPath: /var/lib/cilium/clustermesh
|
|
readOnly: true
|
|
# Needed to be able to load kernel modules
|
|
- name: lib-modules
|
|
mountPath: /lib/modules
|
|
readOnly: true
|
|
- name: xtables-lock
|
|
mountPath: /run/xtables.lock
|
|
- name: tmp
|
|
mountPath: /tmp
|
|
initContainers:
|
|
- name: config
|
|
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
|
|
imagePullPolicy: IfNotPresent
|
|
command:
|
|
- cilium-dbg
|
|
- build-config
|
|
env:
|
|
- name: K8S_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: spec.nodeName
|
|
- name: CILIUM_K8S_NAMESPACE
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: metadata.namespace
|
|
volumeMounts:
|
|
- name: tmp
|
|
mountPath: /tmp
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
|
|
# We use nsenter command with host's cgroup and mount namespaces enabled.
|
|
- name: mount-cgroup
|
|
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
|
|
imagePullPolicy: IfNotPresent
|
|
env:
|
|
- name: CGROUP_ROOT
|
|
value: /run/cilium/cgroupv2
|
|
- name: BIN_PATH
|
|
value: /opt/cni/bin
|
|
command:
|
|
- sh
|
|
- -ec
|
|
# The statically linked Go program binary is invoked to avoid any
|
|
# dependency on utilities like sh and mount that can be missing on certain
|
|
# distros installed on the underlying host. Copy the binary to the
|
|
# same directory where we install cilium cni plugin so that exec permissions
|
|
# are available.
|
|
- |
|
|
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
|
|
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
|
|
rm /hostbin/cilium-mount
|
|
volumeMounts:
|
|
- name: hostproc
|
|
mountPath: /hostproc
|
|
- name: cni-path
|
|
mountPath: /hostbin
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
securityContext:
|
|
seLinuxOptions:
|
|
level: s0
|
|
type: spc_t
|
|
capabilities:
|
|
add:
|
|
- SYS_ADMIN
|
|
- SYS_CHROOT
|
|
- SYS_PTRACE
|
|
drop:
|
|
- ALL
|
|
- name: apply-sysctl-overwrites
|
|
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
|
|
imagePullPolicy: IfNotPresent
|
|
env:
|
|
- name: BIN_PATH
|
|
value: /opt/cni/bin
|
|
command:
|
|
- sh
|
|
- -ec
|
|
# The statically linked Go program binary is invoked to avoid any
|
|
# dependency on utilities like sh that can be missing on certain
|
|
# distros installed on the underlying host. Copy the binary to the
|
|
# same directory where we install cilium cni plugin so that exec permissions
|
|
# are available.
|
|
- |
|
|
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
|
|
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
|
|
rm /hostbin/cilium-sysctlfix
|
|
volumeMounts:
|
|
- name: hostproc
|
|
mountPath: /hostproc
|
|
- name: cni-path
|
|
mountPath: /hostbin
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
securityContext:
|
|
seLinuxOptions:
|
|
level: s0
|
|
type: spc_t
|
|
capabilities:
|
|
add:
|
|
- SYS_ADMIN
|
|
- SYS_CHROOT
|
|
- SYS_PTRACE
|
|
drop:
|
|
- ALL
|
|
# Mount the bpf fs if it is not mounted. We will perform this task
|
|
# from a privileged container because the mount propagation bidirectional
|
|
# only works from privileged containers.
|
|
- name: mount-bpf-fs
|
|
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
|
|
imagePullPolicy: IfNotPresent
|
|
args:
|
|
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
|
|
command:
|
|
- /bin/bash
|
|
- -c
|
|
- --
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
securityContext:
|
|
privileged: true
|
|
volumeMounts:
|
|
- name: bpf-maps
|
|
mountPath: /sys/fs/bpf
|
|
mountPropagation: Bidirectional
|
|
- name: clean-cilium-state
|
|
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
|
|
imagePullPolicy: IfNotPresent
|
|
command:
|
|
- /init-container.sh
|
|
env:
|
|
- name: CILIUM_ALL_STATE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: cilium-config
|
|
key: clean-cilium-state
|
|
optional: true
|
|
- name: CILIUM_BPF_STATE
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: cilium-config
|
|
key: clean-cilium-bpf-state
|
|
optional: true
|
|
- name: WRITE_CNI_CONF_WHEN_READY
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
name: cilium-config
|
|
key: write-cni-conf-when-ready
|
|
optional: true
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
securityContext:
|
|
seLinuxOptions:
|
|
level: s0
|
|
type: spc_t
|
|
capabilities:
|
|
add:
|
|
- NET_ADMIN
|
|
- SYS_MODULE
|
|
- SYS_ADMIN
|
|
- SYS_RESOURCE
|
|
drop:
|
|
- ALL
|
|
volumeMounts:
|
|
- name: bpf-maps
|
|
mountPath: /sys/fs/bpf
|
|
# Required to mount cgroup filesystem from the host to cilium agent pod
|
|
- name: cilium-cgroup
|
|
mountPath: /run/cilium/cgroupv2
|
|
mountPropagation: HostToContainer
|
|
- name: cilium-run
|
|
mountPath: /var/run/cilium # wait-for-kube-proxy
|
|
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
|
|
- name: install-cni-binaries
|
|
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
|
|
imagePullPolicy: IfNotPresent
|
|
command:
|
|
- "/install-plugin.sh"
|
|
resources:
|
|
requests:
|
|
cpu: 100m
|
|
memory: 10Mi
|
|
securityContext:
|
|
seLinuxOptions:
|
|
level: s0
|
|
type: spc_t
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
volumeMounts:
|
|
- name: cni-path
|
|
mountPath: /host/opt/cni/bin # .Values.cni.install
|
|
restartPolicy: Always
|
|
priorityClassName: system-node-critical
|
|
serviceAccountName: "cilium"
|
|
automountServiceAccountToken: true
|
|
terminationGracePeriodSeconds: 1
|
|
hostNetwork: true
|
|
affinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- labelSelector:
|
|
matchLabels:
|
|
k8s-app: cilium
|
|
topologyKey: kubernetes.io/hostname
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
tolerations:
|
|
- operator: Exists
|
|
volumes:
|
|
# For sharing configuration between the "config" initContainer and the agent
|
|
- name: tmp
|
|
emptyDir: {}
|
|
# To keep state between restarts / upgrades
|
|
- name: cilium-run
|
|
hostPath:
|
|
path: /var/run/cilium
|
|
type: DirectoryOrCreate
|
|
# To keep state between restarts / upgrades for bpf maps
|
|
- name: bpf-maps
|
|
hostPath:
|
|
path: /sys/fs/bpf
|
|
type: DirectoryOrCreate
|
|
# To mount cgroup2 filesystem on the host or apply sysctlfix
|
|
- name: hostproc
|
|
hostPath:
|
|
path: /proc
|
|
type: Directory
|
|
# To keep state between restarts / upgrades for cgroup2 filesystem
|
|
- name: cilium-cgroup
|
|
hostPath:
|
|
path: /run/cilium/cgroupv2
|
|
type: DirectoryOrCreate
|
|
# To install cilium cni plugin in the host
|
|
- name: cni-path
|
|
hostPath:
|
|
path: /opt/cni/bin
|
|
type: DirectoryOrCreate
|
|
# To install cilium cni configuration in the host
|
|
- name: etc-cni-netd
|
|
hostPath:
|
|
path: /etc/cni/net.d
|
|
type: DirectoryOrCreate
|
|
# To be able to load kernel modules
|
|
- name: lib-modules
|
|
hostPath:
|
|
path: /lib/modules
|
|
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
|
- name: xtables-lock
|
|
hostPath:
|
|
path: /run/xtables.lock
|
|
type: FileOrCreate
|
|
# Sharing socket with Cilium Envoy on the same node by using a host path
|
|
- name: envoy-sockets
|
|
hostPath:
|
|
path: "/var/run/cilium/envoy/sockets"
|
|
type: DirectoryOrCreate
|
|
# To read the clustermesh configuration
|
|
- name: clustermesh-secrets
|
|
projected:
|
|
# note: the leading zero means this number is in octal representation: do not remove it
|
|
defaultMode: 0400
|
|
sources:
|
|
- secret:
|
|
name: cilium-clustermesh
|
|
optional: true
|
|
# note: items are not explicitly listed here, since the entries of this secret
|
|
# depend on the peers configured, and that would cause a restart of all agents
|
|
# at every addition/removal. Leaving the field empty makes each secret entry
|
|
# to be automatically projected into the volume as a file whose name is the key.
|
|
- secret:
|
|
name: clustermesh-apiserver-remote-cert
|
|
optional: true
|
|
items:
|
|
- key: tls.key
|
|
path: common-etcd-client.key
|
|
- key: tls.crt
|
|
path: common-etcd-client.crt
|
|
- key: ca.crt
|
|
path: common-etcd-client-ca.crt
|
|
# note: we configure the volume for the kvstoremesh-specific certificate
|
|
# regardless of whether KVStoreMesh is enabled or not, so that it can be
|
|
# automatically mounted in case KVStoreMesh gets subsequently enabled,
|
|
# without requiring an agent restart.
|
|
- secret:
|
|
name: clustermesh-apiserver-local-cert
|
|
optional: true
|
|
items:
|
|
- key: tls.key
|
|
path: local-etcd-client.key
|
|
- key: tls.crt
|
|
path: local-etcd-client.crt
|
|
- key: ca.crt
|
|
path: local-etcd-client-ca.crt
|
|
- name: host-proc-sys-net
|
|
hostPath:
|
|
path: /proc/sys/net
|
|
type: Directory
|
|
- name: host-proc-sys-kernel
|
|
hostPath:
|
|
path: /proc/sys/kernel
|
|
type: Directory
|
|
---
|
|
# Source: cilium/templates/cilium-envoy/daemonset.yaml
|
|
apiVersion: apps/v1
|
|
kind: DaemonSet
|
|
metadata:
|
|
name: cilium-envoy
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: cilium-envoy
|
|
app.kubernetes.io/part-of: cilium
|
|
app.kubernetes.io/name: cilium-envoy
|
|
name: cilium-envoy
|
|
spec:
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: cilium-envoy
|
|
updateStrategy:
|
|
rollingUpdate:
|
|
maxUnavailable: 2
|
|
type: RollingUpdate
|
|
template:
|
|
metadata:
|
|
annotations:
|
|
labels:
|
|
k8s-app: cilium-envoy
|
|
name: cilium-envoy
|
|
app.kubernetes.io/name: cilium-envoy
|
|
app.kubernetes.io/part-of: cilium
|
|
spec:
|
|
securityContext:
|
|
appArmorProfile:
|
|
type: Unconfined
|
|
containers:
|
|
- name: cilium-envoy
|
|
image: "quay.io/cilium/cilium-envoy:v1.29.9-1728346947-0d05e48bfbb8c4737ec40d5781d970a550ed2bbd@sha256:42614a44e508f70d03a04470df5f61e3cffd22462471a0be0544cf116f2c50ba"
|
|
imagePullPolicy: IfNotPresent
|
|
command:
|
|
- /usr/bin/cilium-envoy-starter
|
|
args:
|
|
- '--'
|
|
- '-c /var/run/cilium/envoy/bootstrap-config.json'
|
|
- '--base-id 0'
|
|
- '--log-level info'
|
|
- '--log-format [%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v'
|
|
startupProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9878
|
|
scheme: HTTP
|
|
failureThreshold: 105
|
|
periodSeconds: 2
|
|
successThreshold: 1
|
|
initialDelaySeconds: 5
|
|
livenessProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9878
|
|
scheme: HTTP
|
|
periodSeconds: 30
|
|
successThreshold: 1
|
|
failureThreshold: 10
|
|
timeoutSeconds: 5
|
|
readinessProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9878
|
|
scheme: HTTP
|
|
periodSeconds: 30
|
|
successThreshold: 1
|
|
failureThreshold: 3
|
|
timeoutSeconds: 5
|
|
env:
|
|
- name: K8S_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: spec.nodeName
|
|
- name: CILIUM_K8S_NAMESPACE
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: metadata.namespace
|
|
ports:
|
|
- name: envoy-metrics
|
|
containerPort: 9964
|
|
hostPort: 9964
|
|
protocol: TCP
|
|
securityContext:
|
|
seLinuxOptions:
|
|
level: s0
|
|
type: spc_t
|
|
capabilities:
|
|
add:
|
|
- NET_ADMIN
|
|
- SYS_ADMIN
|
|
drop:
|
|
- ALL
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
volumeMounts:
|
|
- name: envoy-sockets
|
|
mountPath: /var/run/cilium/envoy/sockets
|
|
readOnly: false
|
|
- name: envoy-artifacts
|
|
mountPath: /var/run/cilium/envoy/artifacts
|
|
readOnly: true
|
|
- name: envoy-config
|
|
mountPath: /var/run/cilium/envoy/
|
|
readOnly: true
|
|
- name: bpf-maps
|
|
mountPath: /sys/fs/bpf
|
|
mountPropagation: HostToContainer
|
|
restartPolicy: Always
|
|
priorityClassName: system-node-critical
|
|
serviceAccountName: "cilium-envoy"
|
|
automountServiceAccountToken: true
|
|
terminationGracePeriodSeconds: 1
|
|
hostNetwork: true
|
|
affinity:
|
|
nodeAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
nodeSelectorTerms:
|
|
- matchExpressions:
|
|
- key: cilium.io/no-schedule
|
|
operator: NotIn
|
|
values:
|
|
- "true"
|
|
podAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- labelSelector:
|
|
matchLabels:
|
|
k8s-app: cilium
|
|
topologyKey: kubernetes.io/hostname
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- labelSelector:
|
|
matchLabels:
|
|
k8s-app: cilium-envoy
|
|
topologyKey: kubernetes.io/hostname
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
tolerations:
|
|
- operator: Exists
|
|
volumes:
|
|
- name: envoy-sockets
|
|
hostPath:
|
|
path: "/var/run/cilium/envoy/sockets"
|
|
type: DirectoryOrCreate
|
|
- name: envoy-artifacts
|
|
hostPath:
|
|
path: "/var/run/cilium/envoy/artifacts"
|
|
type: DirectoryOrCreate
|
|
- name: envoy-config
|
|
configMap:
|
|
name: cilium-envoy-config
|
|
# note: the leading zero means this number is in octal representation: do not remove it
|
|
defaultMode: 0400
|
|
items:
|
|
- key: bootstrap-config.json
|
|
path: bootstrap-config.json
|
|
# To keep state between restarts / upgrades
|
|
# To keep state between restarts / upgrades for bpf maps
|
|
- name: bpf-maps
|
|
hostPath:
|
|
path: /sys/fs/bpf
|
|
type: DirectoryOrCreate
|
|
---
|
|
# Source: cilium/templates/cilium-operator/deployment.yaml
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: cilium-operator
|
|
namespace: kube-system
|
|
labels:
|
|
io.cilium/app: operator
|
|
name: cilium-operator
|
|
app.kubernetes.io/part-of: cilium
|
|
app.kubernetes.io/name: cilium-operator
|
|
spec:
|
|
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
|
|
# for more details.
|
|
replicas: 2
|
|
selector:
|
|
matchLabels:
|
|
io.cilium/app: operator
|
|
name: cilium-operator
|
|
# ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
|
|
# of one replica and no user configured Recreate strategy.
|
|
# otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
|
|
# podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
|
|
strategy:
|
|
rollingUpdate:
|
|
maxSurge: 25%
|
|
maxUnavailable: 50%
|
|
type: RollingUpdate
|
|
template:
|
|
metadata:
|
|
annotations:
|
|
prometheus.io/port: "9963"
|
|
prometheus.io/scrape: "true"
|
|
labels:
|
|
io.cilium/app: operator
|
|
name: cilium-operator
|
|
app.kubernetes.io/part-of: cilium
|
|
app.kubernetes.io/name: cilium-operator
|
|
spec:
|
|
containers:
|
|
- name: cilium-operator
|
|
image: "quay.io/cilium/operator-generic:v1.16.3@sha256:6e2925ef47a1c76e183c48f95d4ce0d34a1e5e848252f910476c3e11ce1ec94b"
|
|
imagePullPolicy: IfNotPresent
|
|
command:
|
|
- cilium-operator-generic
|
|
args:
|
|
- --config-dir=/tmp/cilium/config-map
|
|
- --debug=$(CILIUM_DEBUG)
|
|
env:
|
|
- name: K8S_NODE_NAME
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: spec.nodeName
|
|
- name: CILIUM_K8S_NAMESPACE
|
|
valueFrom:
|
|
fieldRef:
|
|
apiVersion: v1
|
|
fieldPath: metadata.namespace
|
|
- name: CILIUM_DEBUG
|
|
valueFrom:
|
|
configMapKeyRef:
|
|
key: debug
|
|
name: cilium-config
|
|
optional: true
|
|
ports:
|
|
- name: prometheus
|
|
containerPort: 9963
|
|
hostPort: 9963
|
|
protocol: TCP
|
|
livenessProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9234
|
|
scheme: HTTP
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 10
|
|
timeoutSeconds: 3
|
|
readinessProbe:
|
|
httpGet:
|
|
host: "127.0.0.1"
|
|
path: /healthz
|
|
port: 9234
|
|
scheme: HTTP
|
|
initialDelaySeconds: 0
|
|
periodSeconds: 5
|
|
timeoutSeconds: 3
|
|
failureThreshold: 5
|
|
volumeMounts:
|
|
- name: cilium-config-path
|
|
mountPath: /tmp/cilium/config-map
|
|
readOnly: true
|
|
terminationMessagePolicy: FallbackToLogsOnError
|
|
hostNetwork: true
|
|
restartPolicy: Always
|
|
priorityClassName: system-cluster-critical
|
|
serviceAccountName: "cilium-operator"
|
|
automountServiceAccountToken: true
|
|
# In HA mode, cilium-operator pods must not be scheduled on the same
|
|
# node as they will clash with each other.
|
|
affinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- labelSelector:
|
|
matchLabels:
|
|
io.cilium/app: operator
|
|
topologyKey: kubernetes.io/hostname
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
tolerations:
|
|
- operator: Exists
|
|
volumes:
|
|
# To read the configuration from the config map
|
|
- name: cilium-config-path
|
|
configMap:
|
|
name: cilium-config
|