Browse Source
nodelocaldns: allow a secondary pod for nodelocaldns for local-HA (#8100)
nodelocaldns: allow a secondary pod for nodelocaldns for local-HA (#8100)
* nodelocaldns: allow a secondary pod for nodelocaldns for local-HA * CI: add job to test nodelocaldns secondarypull/8184/head
Cristian Calin
3 years ago
committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 281 additions and 17 deletions
Split View
Diff Options
-
5.gitlab-ci/packet.yml
-
16docs/dns-stack.md
-
3inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml
-
2roles/download/defaults/main.yml
-
2roles/kubernetes-apps/ansible/defaults/main.yml
-
1roles/kubernetes-apps/ansible/tasks/main.yml
-
28roles/kubernetes-apps/ansible/tasks/nodelocaldns.yml
-
88roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2
-
32roles/kubernetes-apps/ansible/templates/nodelocaldns-daemonset.yml.j2
-
103roles/kubernetes-apps/ansible/templates/nodelocaldns-second-daemonset.yml.j2
-
3roles/kubespray-defaults/defaults/main.yaml
-
15tests/files/packet_centos8-calico-nodelocaldns-secondary.yml
@ -0,0 +1,103 @@ |
|||
apiVersion: apps/v1 |
|||
kind: DaemonSet |
|||
metadata: |
|||
name: nodelocaldns-second |
|||
namespace: kube-system |
|||
labels: |
|||
k8s-app: kube-dns |
|||
addonmanager.kubernetes.io/mode: Reconcile |
|||
spec: |
|||
selector: |
|||
matchLabels: |
|||
k8s-app: nodelocaldns-second |
|||
template: |
|||
metadata: |
|||
labels: |
|||
k8s-app: nodelocaldns-second |
|||
annotations: |
|||
prometheus.io/scrape: 'true' |
|||
prometheus.io/port: '{{ nodelocaldns_secondary_prometheus_port }}' |
|||
spec: |
|||
nodeSelector: |
|||
{{ nodelocaldns_ds_nodeselector }} |
|||
priorityClassName: system-cluster-critical |
|||
serviceAccountName: nodelocaldns |
|||
hostNetwork: true |
|||
dnsPolicy: Default # Don't use cluster DNS. |
|||
tolerations: |
|||
- effect: NoSchedule |
|||
operator: "Exists" |
|||
- effect: NoExecute |
|||
operator: "Exists" |
|||
containers: |
|||
- name: node-cache |
|||
image: "{{ nodelocaldns_image_repo }}:{{ nodelocaldns_image_tag }}" |
|||
resources: |
|||
limits: |
|||
memory: {{ nodelocaldns_memory_limit }} |
|||
requests: |
|||
cpu: {{ nodelocaldns_cpu_requests }} |
|||
memory: {{ nodelocaldns_memory_requests }} |
|||
args: [ "-localip", "{{ nodelocaldns_ip }}", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns", "-skipteardown" ] |
|||
securityContext: |
|||
privileged: true |
|||
{% if nodelocaldns_bind_metrics_host_ip %} |
|||
env: |
|||
- name: MY_HOST_IP |
|||
valueFrom: |
|||
fieldRef: |
|||
fieldPath: status.hostIP |
|||
{% endif %} |
|||
livenessProbe: |
|||
httpGet: |
|||
host: {{ nodelocaldns_ip }} |
|||
path: /health |
|||
port: {{ nodelocaldns_health_port }} |
|||
scheme: HTTP |
|||
timeoutSeconds: 5 |
|||
successThreshold: 1 |
|||
failureThreshold: 10 |
|||
readinessProbe: |
|||
httpGet: |
|||
host: {{ nodelocaldns_ip }} |
|||
path: /health |
|||
port: {{ nodelocaldns_health_port }} |
|||
scheme: HTTP |
|||
timeoutSeconds: 5 |
|||
successThreshold: 1 |
|||
failureThreshold: 10 |
|||
volumeMounts: |
|||
- name: config-volume |
|||
mountPath: /etc/coredns |
|||
- name: xtables-lock |
|||
mountPath: /run/xtables.lock |
|||
lifecycle: |
|||
preStop: |
|||
exec: |
|||
command: |
|||
- sh |
|||
- -c |
|||
- sleep {{ nodelocaldns_secondary_skew_seconds }} && kill -9 1 |
|||
volumes: |
|||
- name: config-volume |
|||
configMap: |
|||
name: nodelocaldns |
|||
items: |
|||
- key: Corefile-second |
|||
path: Corefile |
|||
{% if dns_etchosts | default(None) %} |
|||
- key: hosts |
|||
path: hosts |
|||
{% endif %} |
|||
- name: xtables-lock |
|||
hostPath: |
|||
path: /run/xtables.lock |
|||
type: FileOrCreate |
|||
# Implement a time skew between the main nodelocaldns and this secondary. |
|||
# Since the two nodelocaldns instances share the :53 port, we want to keep |
|||
# at least one running at any time enven if the manifests are replaced simultaneously |
|||
terminationGracePeriodSeconds: {{ nodelocaldns_secondary_skew_seconds }} |
|||
updateStrategy: |
|||
rollingUpdate: |
|||
maxUnavailable: {{ serial | default('20%') }} |
|||
type: RollingUpdate |
@ -0,0 +1,15 @@ |
|||
--- |
|||
# Instance settings |
|||
cloud_image: centos-8 |
|||
mode: default |
|||
vm_memory: 3072Mi |
|||
|
|||
# Kubespray settings |
|||
kube_network_plugin: calico |
|||
deploy_netchecker: true |
|||
dns_min_replicas: 1 |
|||
enable_nodelocaldns_secondary: true |
|||
loadbalancer_apiserver_type: haproxy |
|||
|
|||
# required |
|||
calico_iptables_backend: "Auto" |
Write
Preview
Loading…
Cancel
Save