Browse Source

pre-commit: apply autofixes hooks and fix the rest manually

- markdownlint (manual fix)
- end-of-file-fixer
- requirements-txt-fixer
- trailing-whitespace
pull/11244/head
Max Gautier 6 months ago
parent
commit
d50f61eae5
Failed to extract signature
44 changed files with 42 additions and 47 deletions
  1. 1
      contrib/terraform/nifcloud/README.md
  2. 2
      contrib/terraform/upcloud/cluster-settings.tfvars
  3. 2
      contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf
  4. 2
      contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf
  5. 2
      contrib/terraform/upcloud/sample-inventory/cluster.tfvars
  6. 1
      docs/operations/recover-control-plane.md
  7. 2
      inventory/sample/group_vars/etcd.yml
  8. 2
      requirements.txt
  9. 2
      roles/container-engine/containerd/defaults/main.yml
  10. 1
      roles/container-engine/containerd/templates/config.toml.j2
  11. 2
      roles/etcd/defaults/main.yml
  12. 2
      roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2
  13. 2
      roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2
  14. 2
      roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-sc-regional.yml.j2
  15. 2
      roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-sc-zonal.yml.j2
  16. 2
      roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2
  17. 2
      roles/kubernetes-apps/external_cloud_controller/huaweicloud/templates/external-huawei-cloud-controller-manager-role-bindings.yml.j2
  18. 2
      roles/kubernetes-apps/external_cloud_controller/huaweicloud/templates/external-huawei-cloud-controller-manager-roles.yml.j2
  19. 1
      roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2
  20. 2
      roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2
  21. 2
      roles/kubernetes-apps/metallb/defaults/main.yml
  22. 1
      roles/kubernetes-apps/node_feature_discovery/templates/nfd-rolebinding.yaml.j2
  23. 2
      roles/kubernetes-apps/scheduler_plugins/templates/appgroup.diktyo.x-k8s.io_appgroups.yaml.j2
  24. 2
      roles/kubernetes-apps/scheduler_plugins/templates/cm-scheduler-plugins.yaml.j2
  25. 2
      roles/kubernetes-apps/scheduler_plugins/templates/deploy-scheduler-plugins.yaml.j2
  26. 2
      roles/kubernetes-apps/scheduler_plugins/templates/namespace.yaml.j2
  27. 2
      roles/kubernetes-apps/scheduler_plugins/templates/networktopology.diktyo.x-k8s.io_networktopologies.yaml.j2
  28. 2
      roles/kubernetes-apps/scheduler_plugins/templates/rbac-scheduler-plugins.yaml.j2
  29. 2
      roles/kubernetes-apps/scheduler_plugins/templates/sa-scheduler-plugins.yaml.j2
  30. 2
      roles/kubernetes-apps/scheduler_plugins/templates/scheduling.x-k8s.io_elasticquotas.yaml.j2
  31. 2
      roles/kubernetes-apps/scheduler_plugins/templates/scheduling.x-k8s.io_podgroups.yaml.j2
  32. 2
      roles/kubernetes-apps/scheduler_plugins/templates/topology.node.k8s.io_noderesourcetopologies.yaml.j2
  33. 2
      roles/kubernetes/control-plane/templates/apiserver-tracing.yaml.j2
  34. 2
      roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2
  35. 1
      roles/network_plugin/calico/templates/calico-config.yml.j2
  36. 2
      roles/network_plugin/cilium/templates/cilium/config.yml.j2
  37. 2
      roles/network_plugin/cilium/templates/cilium/cr.yml.j2
  38. 8
      roles/network_plugin/cilium/templates/hubble/config.yml.j2
  39. 1
      roles/network_plugin/cilium/templates/hubble/service.yml.j2
  40. 2
      roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2
  41. 2
      scale.yml
  42. 2
      scripts/openstack-cleanup/main.py
  43. 2
      tests/files/vagrant_ubuntu20-flannel-collection.rb
  44. 4
      tests/requirements.txt

1
contrib/terraform/nifcloud/README.md

@ -72,6 +72,7 @@ The setup looks like following
```bash
./generate-inventory.sh > sample-inventory/inventory.ini
```
* Export Variables:

2
contrib/terraform/upcloud/cluster-settings.tfvars

@ -146,4 +146,4 @@ server_groups = {
# ]
# anti_affinity_policy = "yes"
# }
}
}

2
contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf

@ -558,4 +558,4 @@ resource "upcloud_server_group" "server_groups" {
anti_affinity_policy = each.value.anti_affinity_policy
labels = {}
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
}
}

2
contrib/terraform/upcloud/modules/kubernetes-cluster/variables.tf

@ -106,4 +106,4 @@ variable "server_groups" {
anti_affinity_policy = string
servers = list(string)
}))
}
}

2
contrib/terraform/upcloud/sample-inventory/cluster.tfvars

@ -146,4 +146,4 @@ server_groups = {
# ]
# anti_affinity_policy = "yes"
# }
}
}

1
docs/operations/recover-control-plane.md

@ -8,7 +8,6 @@ Examples of what broken means in this context:
* One or more bare metal node(s) suffer from unrecoverable hardware failure
* One or more node(s) fail during patching or upgrading
* Etcd database corruption
* Other node related failures leaving your control plane degraded or nonfunctional
__Note that you need at least one functional node to be able to recover using this method.__

2
inventory/sample/group_vars/etcd.yml

@ -32,4 +32,4 @@
# etcd_experimental_enable_distributed_tracing: false
# etcd_experimental_distributed_tracing_sample_rate: 100
# etcd_experimental_distributed_tracing_address: "localhost:4317"
# etcd_experimental_distributed_tracing_service_name: etcd
# etcd_experimental_distributed_tracing_service_name: etcd

2
requirements.txt

@ -2,9 +2,9 @@ ansible==9.5.1
cryptography==42.0.7
jinja2==3.1.4
jmespath==1.0.1
jsonschema==4.22.0
MarkupSafe==2.1.5
netaddr==1.2.1
pbr==6.0.0
ruamel.yaml==0.18.6
ruamel.yaml.clib==0.2.8
jsonschema==4.22.0

2
roles/container-engine/containerd/defaults/main.yml

@ -116,4 +116,4 @@ containerd_tracing_enabled: false
containerd_tracing_endpoint: "0.0.0.0:4317"
containerd_tracing_protocol: "grpc"
containerd_tracing_sampling_ratio: 1.0
containerd_tracing_service_name: "containerd"
containerd_tracing_service_name: "containerd"

1
roles/container-engine/containerd/templates/config.toml.j2

@ -107,4 +107,3 @@ oom_score = {{ containerd_oom_score }}
sampling_ratio = {{ containerd_tracing_sampling_ratio }}
service_name = "{{ containerd_tracing_service_name }}"
{% endif %}

2
roles/etcd/defaults/main.yml

@ -124,4 +124,4 @@ unsafe_show_logs: false
etcd_experimental_enable_distributed_tracing: false
etcd_experimental_distributed_tracing_sample_rate: 100
etcd_experimental_distributed_tracing_address: "localhost:4317"
etcd_experimental_distributed_tracing_service_name: etcd
etcd_experimental_distributed_tracing_service_name: etcd

2
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2

@ -162,4 +162,4 @@ metadata:
name: pd.csi.storage.gke.io
spec:
attachRequired: true
podInfoOnMount: false
podInfoOnMount: false

2
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2

@ -109,4 +109,4 @@ spec:
# See "special case". This will tolerate everything. Node component should
# be scheduled on all nodes.
tolerations:
- operator: Exists
- operator: Exists

2
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-sc-regional.yml.j2

@ -6,4 +6,4 @@ provisioner: pd.csi.storage.gke.io
parameters:
type: pd-balanced
replication-type: regional-pd
volumeBindingMode: WaitForFirstConsumer
volumeBindingMode: WaitForFirstConsumer

2
roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-sc-zonal.yml.j2

@ -5,4 +5,4 @@ metadata:
provisioner: pd.csi.storage.gke.io
parameters:
type: pd-balanced
volumeBindingMode: WaitForFirstConsumer
volumeBindingMode: WaitForFirstConsumer

2
roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2

@ -18,7 +18,7 @@ data:
"max-pvscsi-targets-per-vm": "true"
"multi-vcenter-csi-topology": "true"
"csi-internal-generated-cluster-id": "true"
"listview-tasks": "true"
"listview-tasks": "true"
{% if vsphere_csi_controller is version('v2.7.0', '>=') %}
"improved-csi-idempotency": "true"
"improved-volume-topology": "true"

2
roles/kubernetes-apps/external_cloud_controller/huaweicloud/templates/external-huawei-cloud-controller-manager-role-bindings.yml.j2

@ -9,4 +9,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
namespace: kube-system

2
roles/kubernetes-apps/external_cloud_controller/huaweicloud/templates/external-huawei-cloud-controller-manager-roles.yml.j2

@ -110,4 +110,4 @@ rules:
- list
- watch
apiGroups:
- discovery.k8s.io
- discovery.k8s.io

1
roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cm.yml.j2

@ -32,4 +32,3 @@ data:
- name: helper-pod
image: "{{ local_path_provisioner_helper_image_repo }}:{{ local_path_provisioner_helper_image_tag }}"
imagePullPolicy: IfNotPresent

2
roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-cr.yml.j2

@ -15,4 +15,4 @@ rules:
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
verbs: [ "get", "list", "watch" ]

2
roles/kubernetes-apps/metallb/defaults/main.yml

@ -13,4 +13,4 @@ metallb_speaker_tolerations:
key: node-role.kubernetes.io/control-plane
operator: Exists
metallb_controller_tolerations: []
metallb_loadbalancer_class: ""
metallb_loadbalancer_class: ""

1
roles/kubernetes-apps/node_feature_discovery/templates/nfd-rolebinding.yaml.j2

@ -11,4 +11,3 @@ subjects:
- kind: ServiceAccount
name: {{ node_feature_discovery_worker_sa_name }}
namespace: {{ node_feature_discovery_namespace }}

2
roles/kubernetes-apps/scheduler_plugins/templates/appgroup.diktyo.x-k8s.io_appgroups.yaml.j2

@ -194,4 +194,4 @@ spec:
type: object
type: object
served: true
storage: true
storage: true

2
roles/kubernetes-apps/scheduler_plugins/templates/cm-scheduler-plugins.yaml.j2

@ -25,4 +25,4 @@ data:
{% if scheduler_plugins_plugin_config is defined and scheduler_plugins_plugin_config | length != 0 %}
pluginConfig:
{{ scheduler_plugins_plugin_config | to_nice_yaml(indent=2, width=256) | indent(6, true) }}
{% endif %}
{% endif %}

2
roles/kubernetes-apps/scheduler_plugins/templates/deploy-scheduler-plugins.yaml.j2

@ -71,4 +71,4 @@ spec:
volumes:
- name: scheduler-config
configMap:
name: scheduler-config
name: scheduler-config

2
roles/kubernetes-apps/scheduler_plugins/templates/namespace.yaml.j2

@ -4,4 +4,4 @@ kind: Namespace
metadata:
name: {{ scheduler_plugins_namespace }}
labels:
name: {{ scheduler_plugins_namespace }}
name: {{ scheduler_plugins_namespace }}

2
roles/kubernetes-apps/scheduler_plugins/templates/networktopology.diktyo.x-k8s.io_networktopologies.yaml.j2

@ -145,4 +145,4 @@ spec:
type: object
type: object
served: true
storage: true
storage: true

2
roles/kubernetes-apps/scheduler_plugins/templates/rbac-scheduler-plugins.yaml.j2

@ -137,4 +137,4 @@ subjects:
namespace: {{ scheduler_plugins_namespace }}
- kind: ServiceAccount
name: scheduler-plugins-controller
namespace: {{ scheduler_plugins_namespace }}
namespace: {{ scheduler_plugins_namespace }}

2
roles/kubernetes-apps/scheduler_plugins/templates/sa-scheduler-plugins.yaml.j2

@ -8,4 +8,4 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: scheduler-plugins-controller
namespace: {{ scheduler_plugins_namespace }}
namespace: {{ scheduler_plugins_namespace }}

2
roles/kubernetes-apps/scheduler_plugins/templates/scheduling.x-k8s.io_elasticquotas.yaml.j2

@ -79,4 +79,4 @@ spec:
served: true
storage: true
subresources:
status: {}
status: {}

2
roles/kubernetes-apps/scheduler_plugins/templates/scheduling.x-k8s.io_podgroups.yaml.j2

@ -94,4 +94,4 @@ spec:
served: true
storage: true
subresources:
status: {}
status: {}

2
roles/kubernetes-apps/scheduler_plugins/templates/topology.node.k8s.io_noderesourcetopologies.yaml.j2

@ -150,4 +150,4 @@ spec:
- zones
type: object
served: true
storage: true
storage: true

2
roles/kubernetes/control-plane/templates/apiserver-tracing.yaml.j2

@ -1,4 +1,4 @@
apiVersion: apiserver.config.k8s.io/v1beta1
kind: TracingConfiguration
endpoint: {{ kube_apiserver_tracing_endpoint }}
samplingRatePerMillion: {{ kube_apiserver_tracing_sampling_rate_per_million }}
samplingRatePerMillion: {{ kube_apiserver_tracing_sampling_rate_per_million }}

2
roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2

@ -174,4 +174,4 @@ topologyManagerScope: {{ kubelet_topology_manager_scope }}
tracing:
endpoint: {{ kubelet_tracing_endpoint }}
samplingRatePerMillion: {{ kubelet_tracing_sampling_rate_per_million }}
{% endif %}
{% endif %}

1
roles/network_plugin/calico/templates/calico-config.yml.j2

@ -102,4 +102,3 @@ data:
}
]
}

2
roles/network_plugin/cilium/templates/cilium/config.yml.j2

@ -134,7 +134,7 @@ data:
## DSR setting
bpf-lb-mode: "{{ cilium_loadbalancer_mode }}"
# l2
# l2
enable-l2-announcements: "{{ cilium_l2announcements }}"
# Enable Bandwidth Manager

2
roles/network_plugin/cilium/templates/cilium/cr.yml.j2

@ -140,7 +140,7 @@ rules:
verbs:
- list
- watch
{% if cilium_version %}
{% if cilium_version %}
- apiGroups:
- coordination.k8s.io
resources:

8
roles/network_plugin/cilium/templates/hubble/config.yml.j2

@ -12,10 +12,10 @@ data:
peer-service: "hubble-peer.kube-system.svc.{{ dns_domain }}:443"
listen-address: :4245
metrics-listen-address: ":9966"
dial-timeout:
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
dial-timeout:
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt
tls-client-key-file: /var/lib/hubble-relay/tls/client.key
tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt

1
roles/network_plugin/cilium/templates/hubble/service.yml.j2

@ -102,4 +102,3 @@ spec:
protocol: TCP
targetPort: 4244
internalTrafficPolicy: Local

2
roles/network_plugin/kube-ovn/templates/cni-kube-ovn-crd.yml.j2

@ -1530,4 +1530,4 @@ spec:
subresources:
status: {}
conversion:
strategy: None
strategy: None

2
scale.yml

@ -1,3 +1,3 @@
---
- name: Scale the cluster
ansible.builtin.import_playbook: playbooks/scale.yml
ansible.builtin.import_playbook: playbooks/scale.yml

2
scripts/openstack-cleanup/main.py

@ -61,7 +61,7 @@ def main():
for ip in conn.network.ips():
fn_if_old(conn.network.delete_ip, ip)
# After removing unnecessary subnet from router, retry to delete ports
map_if_old(conn.network.delete_port,
conn.network.ports())

2
tests/files/vagrant_ubuntu20-flannel-collection.rb

@ -6,4 +6,4 @@ $libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2
$vm_cpus = 2

4
tests/requirements.txt

@ -5,8 +5,8 @@ ara[server]==1.7.1
dopy==0.3.7
molecule==24.2.1
molecule-plugins[vagrant]==23.5.3
python-vagrant==1.0.0
pytest-testinfra==10.1.0
python-vagrant==1.0.0
tox==4.15.0
yamllint==1.35.1
tzdata==2024.1
yamllint==1.35.1
Loading…
Cancel
Save