Browse Source
pre-commit: apply autofixes hooks and fix the rest manually
- markdownlint (manual fix)
- end-of-file-fixer
- requirements-txt-fixer
- trailing-whitespace
pull/11244/head
Max Gautier
6 months ago
Failed to extract signature
44 changed files with
42 additions and
47 deletions
-
contrib/terraform/nifcloud/README.md
-
contrib/terraform/upcloud/cluster-settings.tfvars
-
contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf
-
contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf
-
contrib/terraform/upcloud/sample-inventory/cluster.tfvars
-
docs/operations/recover-control-plane.md
-
inventory/sample/group_vars/etcd.yml
-
requirements.txt
-
roles/container-engine/containerd/defaults/main.yml
-
roles/container-engine/containerd/templates/config.toml.j2
-
roles/etcd/defaults/main.yml
-
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2
-
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2
-
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-sc-regional.yml.j2
-
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-sc-zonal.yml.j2
-
roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2
-
roles/kubernetes-apps/external_cloud_controller/huaweicloud/templates/external-huawei-cloud-controller-manager-role-bindings.yml.j2
-
roles/kubernetes-apps/external_cloud_controller/huaweicloud/templates/external-huawei-cloud-controller-manager-roles.yml.j2
-
roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2
-
roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2
-
roles/kubernetes-apps/metallb/defaults/main.yml
-
roles/kubernetes-apps/node_feature_discovery/templates/nfd-rolebinding.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/appgroup.diktyo.x-k8s.io_appgroups.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/cm-scheduler-plugins.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/deploy-scheduler-plugins.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/namespace.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/networktopology.diktyo.x-k8s.io_networktopologies.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/rbac-scheduler-plugins.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/sa-scheduler-plugins.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/scheduling.x-k8s.io_elasticquotas.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/scheduling.x-k8s.io_podgroups.yaml.j2
-
roles/kubernetes-apps/scheduler_plugins/templates/topology.node.k8s.io_noderesourcetopologies.yaml.j2
-
roles/kubernetes/control-plane/templates/apiserver-tracing.yaml.j2
-
roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
-
roles/network_plugin/calico/templates/calico-config.yml.j2
-
roles/network_plugin/cilium/templates/cilium/config.yml.j2
-
roles/network_plugin/cilium/templates/cilium/cr.yml.j2
-
roles/network_plugin/cilium/templates/hubble/config.yml.j2
-
roles/network_plugin/cilium/templates/hubble/service.yml.j2
-
roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2
-
scale.yml
-
scripts/openstack-cleanup/main.py
-
tests/files/vagrant_ubuntu20-flannel-collection.rb
-
tests/requirements.txt
|
|
@ -72,6 +72,7 @@ The setup looks like following |
|
|
|
|
|
|
|
```bash |
|
|
|
./generate-inventory.sh > sample-inventory/inventory.ini |
|
|
|
``` |
|
|
|
|
|
|
|
* Export Variables: |
|
|
|
|
|
|
|
|
|
@ -146,4 +146,4 @@ server_groups = { |
|
|
|
# ] |
|
|
|
# anti_affinity_policy = "yes" |
|
|
|
# } |
|
|
|
} |
|
|
|
} |
|
|
@ -558,4 +558,4 @@ resource "upcloud_server_group" "server_groups" { |
|
|
|
anti_affinity_policy = each.value.anti_affinity_policy |
|
|
|
labels = {} |
|
|
|
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id] |
|
|
|
} |
|
|
|
} |
|
|
@ -106,4 +106,4 @@ variable "server_groups" { |
|
|
|
anti_affinity_policy = string |
|
|
|
servers = list(string) |
|
|
|
})) |
|
|
|
} |
|
|
|
} |
|
|
@ -146,4 +146,4 @@ server_groups = { |
|
|
|
# ] |
|
|
|
# anti_affinity_policy = "yes" |
|
|
|
# } |
|
|
|
} |
|
|
|
} |
|
|
@ -8,7 +8,6 @@ Examples of what broken means in this context: |
|
|
|
* One or more bare metal node(s) suffer from unrecoverable hardware failure |
|
|
|
* One or more node(s) fail during patching or upgrading |
|
|
|
* Etcd database corruption |
|
|
|
|
|
|
|
* Other node related failures leaving your control plane degraded or nonfunctional |
|
|
|
|
|
|
|
__Note that you need at least one functional node to be able to recover using this method.__ |
|
|
|
|
|
@ -32,4 +32,4 @@ |
|
|
|
# etcd_experimental_enable_distributed_tracing: false |
|
|
|
# etcd_experimental_distributed_tracing_sample_rate: 100 |
|
|
|
# etcd_experimental_distributed_tracing_address: "localhost:4317" |
|
|
|
# etcd_experimental_distributed_tracing_service_name: etcd |
|
|
|
# etcd_experimental_distributed_tracing_service_name: etcd |
|
|
@ -2,9 +2,9 @@ ansible==9.5.1 |
|
|
|
cryptography==42.0.7 |
|
|
|
jinja2==3.1.4 |
|
|
|
jmespath==1.0.1 |
|
|
|
jsonschema==4.22.0 |
|
|
|
MarkupSafe==2.1.5 |
|
|
|
netaddr==1.2.1 |
|
|
|
pbr==6.0.0 |
|
|
|
ruamel.yaml==0.18.6 |
|
|
|
ruamel.yaml.clib==0.2.8 |
|
|
|
jsonschema==4.22.0 |
|
|
@ -116,4 +116,4 @@ containerd_tracing_enabled: false |
|
|
|
containerd_tracing_endpoint: "0.0.0.0:4317" |
|
|
|
containerd_tracing_protocol: "grpc" |
|
|
|
containerd_tracing_sampling_ratio: 1.0 |
|
|
|
containerd_tracing_service_name: "containerd" |
|
|
|
containerd_tracing_service_name: "containerd" |
|
|
@ -107,4 +107,3 @@ oom_score = {{ containerd_oom_score }} |
|
|
|
sampling_ratio = {{ containerd_tracing_sampling_ratio }} |
|
|
|
service_name = "{{ containerd_tracing_service_name }}" |
|
|
|
{% endif %} |
|
|
|
|
|
|
@ -124,4 +124,4 @@ unsafe_show_logs: false |
|
|
|
etcd_experimental_enable_distributed_tracing: false |
|
|
|
etcd_experimental_distributed_tracing_sample_rate: 100 |
|
|
|
etcd_experimental_distributed_tracing_address: "localhost:4317" |
|
|
|
etcd_experimental_distributed_tracing_service_name: etcd |
|
|
|
etcd_experimental_distributed_tracing_service_name: etcd |
|
|
@ -162,4 +162,4 @@ metadata: |
|
|
|
name: pd.csi.storage.gke.io |
|
|
|
spec: |
|
|
|
attachRequired: true |
|
|
|
podInfoOnMount: false |
|
|
|
podInfoOnMount: false |
|
|
@ -109,4 +109,4 @@ spec: |
|
|
|
# See "special case". This will tolerate everything. Node component should |
|
|
|
# be scheduled on all nodes. |
|
|
|
tolerations: |
|
|
|
- operator: Exists |
|
|
|
- operator: Exists |
|
|
@ -6,4 +6,4 @@ provisioner: pd.csi.storage.gke.io |
|
|
|
parameters: |
|
|
|
type: pd-balanced |
|
|
|
replication-type: regional-pd |
|
|
|
volumeBindingMode: WaitForFirstConsumer |
|
|
|
volumeBindingMode: WaitForFirstConsumer |
|
|
@ -5,4 +5,4 @@ metadata: |
|
|
|
provisioner: pd.csi.storage.gke.io |
|
|
|
parameters: |
|
|
|
type: pd-balanced |
|
|
|
volumeBindingMode: WaitForFirstConsumer |
|
|
|
volumeBindingMode: WaitForFirstConsumer |
|
|
@ -18,7 +18,7 @@ data: |
|
|
|
"max-pvscsi-targets-per-vm": "true" |
|
|
|
"multi-vcenter-csi-topology": "true" |
|
|
|
"csi-internal-generated-cluster-id": "true" |
|
|
|
"listview-tasks": "true" |
|
|
|
"listview-tasks": "true" |
|
|
|
{% if vsphere_csi_controller is version('v2.7.0', '>=') %} |
|
|
|
"improved-csi-idempotency": "true" |
|
|
|
"improved-volume-topology": "true" |
|
|
|
|
|
@ -9,4 +9,4 @@ roleRef: |
|
|
|
subjects: |
|
|
|
- kind: ServiceAccount |
|
|
|
name: cloud-controller-manager |
|
|
|
namespace: kube-system |
|
|
|
namespace: kube-system |
|
|
@ -110,4 +110,4 @@ rules: |
|
|
|
- list |
|
|
|
- watch |
|
|
|
apiGroups: |
|
|
|
- discovery.k8s.io |
|
|
|
- discovery.k8s.io |
|
|
@ -32,4 +32,3 @@ data: |
|
|
|
- name: helper-pod |
|
|
|
image: "{{ local_path_provisioner_helper_image_repo }}:{{ local_path_provisioner_helper_image_tag }}" |
|
|
|
imagePullPolicy: IfNotPresent |
|
|
|
|
|
|
@ -15,4 +15,4 @@ rules: |
|
|
|
verbs: [ "create", "patch" ] |
|
|
|
- apiGroups: [ "storage.k8s.io" ] |
|
|
|
resources: [ "storageclasses" ] |
|
|
|
verbs: [ "get", "list", "watch" ] |
|
|
|
verbs: [ "get", "list", "watch" ] |
|
|
@ -13,4 +13,4 @@ metallb_speaker_tolerations: |
|
|
|
key: node-role.kubernetes.io/control-plane |
|
|
|
operator: Exists |
|
|
|
metallb_controller_tolerations: [] |
|
|
|
metallb_loadbalancer_class: "" |
|
|
|
metallb_loadbalancer_class: "" |
|
|
@ -11,4 +11,3 @@ subjects: |
|
|
|
- kind: ServiceAccount |
|
|
|
name: {{ node_feature_discovery_worker_sa_name }} |
|
|
|
namespace: {{ node_feature_discovery_namespace }} |
|
|
|
|
|
|
@ -194,4 +194,4 @@ spec: |
|
|
|
type: object |
|
|
|
type: object |
|
|
|
served: true |
|
|
|
storage: true |
|
|
|
storage: true |
|
|
@ -25,4 +25,4 @@ data: |
|
|
|
{% if scheduler_plugins_plugin_config is defined and scheduler_plugins_plugin_config | length != 0 %} |
|
|
|
pluginConfig: |
|
|
|
{{ scheduler_plugins_plugin_config | to_nice_yaml(indent=2, width=256) | indent(6, true) }} |
|
|
|
{% endif %} |
|
|
|
{% endif %} |
|
|
@ -71,4 +71,4 @@ spec: |
|
|
|
volumes: |
|
|
|
- name: scheduler-config |
|
|
|
configMap: |
|
|
|
name: scheduler-config |
|
|
|
name: scheduler-config |
|
|
@ -4,4 +4,4 @@ kind: Namespace |
|
|
|
metadata: |
|
|
|
name: {{ scheduler_plugins_namespace }} |
|
|
|
labels: |
|
|
|
name: {{ scheduler_plugins_namespace }} |
|
|
|
name: {{ scheduler_plugins_namespace }} |
|
|
@ -145,4 +145,4 @@ spec: |
|
|
|
type: object |
|
|
|
type: object |
|
|
|
served: true |
|
|
|
storage: true |
|
|
|
storage: true |
|
|
@ -137,4 +137,4 @@ subjects: |
|
|
|
namespace: {{ scheduler_plugins_namespace }} |
|
|
|
- kind: ServiceAccount |
|
|
|
name: scheduler-plugins-controller |
|
|
|
namespace: {{ scheduler_plugins_namespace }} |
|
|
|
namespace: {{ scheduler_plugins_namespace }} |
|
|
@ -8,4 +8,4 @@ apiVersion: v1 |
|
|
|
kind: ServiceAccount |
|
|
|
metadata: |
|
|
|
name: scheduler-plugins-controller |
|
|
|
namespace: {{ scheduler_plugins_namespace }} |
|
|
|
namespace: {{ scheduler_plugins_namespace }} |
|
|
@ -79,4 +79,4 @@ spec: |
|
|
|
served: true |
|
|
|
storage: true |
|
|
|
subresources: |
|
|
|
status: {} |
|
|
|
status: {} |
|
|
@ -94,4 +94,4 @@ spec: |
|
|
|
served: true |
|
|
|
storage: true |
|
|
|
subresources: |
|
|
|
status: {} |
|
|
|
status: {} |
|
|
@ -150,4 +150,4 @@ spec: |
|
|
|
- zones |
|
|
|
type: object |
|
|
|
served: true |
|
|
|
storage: true |
|
|
|
storage: true |
|
|
@ -1,4 +1,4 @@ |
|
|
|
apiVersion: apiserver.config.k8s.io/v1beta1 |
|
|
|
kind: TracingConfiguration |
|
|
|
endpoint: {{ kube_apiserver_tracing_endpoint }} |
|
|
|
samplingRatePerMillion: {{ kube_apiserver_tracing_sampling_rate_per_million }} |
|
|
|
samplingRatePerMillion: {{ kube_apiserver_tracing_sampling_rate_per_million }} |
|
|
@ -174,4 +174,4 @@ topologyManagerScope: {{ kubelet_topology_manager_scope }} |
|
|
|
tracing: |
|
|
|
endpoint: {{ kubelet_tracing_endpoint }} |
|
|
|
samplingRatePerMillion: {{ kubelet_tracing_sampling_rate_per_million }} |
|
|
|
{% endif %} |
|
|
|
{% endif %} |
|
|
@ -102,4 +102,3 @@ data: |
|
|
|
} |
|
|
|
] |
|
|
|
} |
|
|
|
|
|
|
@ -134,7 +134,7 @@ data: |
|
|
|
## DSR setting |
|
|
|
bpf-lb-mode: "{{ cilium_loadbalancer_mode }}" |
|
|
|
|
|
|
|
# l2 |
|
|
|
# l2 |
|
|
|
enable-l2-announcements: "{{ cilium_l2announcements }}" |
|
|
|
|
|
|
|
# Enable Bandwidth Manager |
|
|
|
|
|
@ -140,7 +140,7 @@ rules: |
|
|
|
verbs: |
|
|
|
- list |
|
|
|
- watch |
|
|
|
{% if cilium_version %} |
|
|
|
{% if cilium_version %} |
|
|
|
- apiGroups: |
|
|
|
- coordination.k8s.io |
|
|
|
resources: |
|
|
|
|
|
@ -12,10 +12,10 @@ data: |
|
|
|
peer-service: "hubble-peer.kube-system.svc.{{ dns_domain }}:443" |
|
|
|
listen-address: :4245 |
|
|
|
metrics-listen-address: ":9966" |
|
|
|
dial-timeout: |
|
|
|
retry-timeout: |
|
|
|
sort-buffer-len-max: |
|
|
|
sort-buffer-drain-timeout: |
|
|
|
dial-timeout: |
|
|
|
retry-timeout: |
|
|
|
sort-buffer-len-max: |
|
|
|
sort-buffer-drain-timeout: |
|
|
|
tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt |
|
|
|
tls-client-key-file: /var/lib/hubble-relay/tls/client.key |
|
|
|
tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt |
|
|
|
|
|
@ -102,4 +102,3 @@ spec: |
|
|
|
protocol: TCP |
|
|
|
targetPort: 4244 |
|
|
|
internalTrafficPolicy: Local |
|
|
|
|
|
|
@ -1530,4 +1530,4 @@ spec: |
|
|
|
subresources: |
|
|
|
status: {} |
|
|
|
conversion: |
|
|
|
strategy: None |
|
|
|
strategy: None |
|
|
@ -1,3 +1,3 @@ |
|
|
|
--- |
|
|
|
- name: Scale the cluster |
|
|
|
ansible.builtin.import_playbook: playbooks/scale.yml |
|
|
|
ansible.builtin.import_playbook: playbooks/scale.yml |
|
|
@ -61,7 +61,7 @@ def main(): |
|
|
|
|
|
|
|
for ip in conn.network.ips(): |
|
|
|
fn_if_old(conn.network.delete_ip, ip) |
|
|
|
|
|
|
|
|
|
|
|
# After removing unnecessary subnet from router, retry to delete ports |
|
|
|
map_if_old(conn.network.delete_port, |
|
|
|
conn.network.ports()) |
|
|
|
|
|
@ -6,4 +6,4 @@ $libvirt_volume_cache = "unsafe" |
|
|
|
# Checking for box update can trigger API rate limiting |
|
|
|
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html |
|
|
|
$box_check_update = false |
|
|
|
$vm_cpus = 2 |
|
|
|
$vm_cpus = 2 |
|
|
@ -5,8 +5,8 @@ ara[server]==1.7.1 |
|
|
|
dopy==0.3.7 |
|
|
|
molecule==24.2.1 |
|
|
|
molecule-plugins[vagrant]==23.5.3 |
|
|
|
python-vagrant==1.0.0 |
|
|
|
pytest-testinfra==10.1.0 |
|
|
|
python-vagrant==1.0.0 |
|
|
|
tox==4.15.0 |
|
|
|
yamllint==1.35.1 |
|
|
|
tzdata==2024.1 |
|
|
|
yamllint==1.35.1 |