You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

193 lines
5.5 KiB

  1. data "exoscale_compute_template" "os_image" {
  2. for_each = var.machines
  3. zone = var.zone
  4. name = each.value.boot_disk.image_name
  5. }
  6. data "exoscale_compute" "master_nodes" {
  7. for_each = exoscale_compute.master
  8. id = each.value.id
  9. # Since private IP address is not assigned until the nics are created we need this
  10. depends_on = [exoscale_nic.master_private_network_nic]
  11. }
  12. data "exoscale_compute" "worker_nodes" {
  13. for_each = exoscale_compute.worker
  14. id = each.value.id
  15. # Since private IP address is not assigned until the nics are created we need this
  16. depends_on = [exoscale_nic.worker_private_network_nic]
  17. }
  18. resource "exoscale_network" "private_network" {
  19. zone = var.zone
  20. name = "${var.prefix}-network"
  21. start_ip = cidrhost(var.private_network_cidr, 1)
  22. # cidr -1 = Broadcast address
  23. # cidr -2 = DHCP server address (exoscale specific)
  24. end_ip = cidrhost(var.private_network_cidr, -3)
  25. netmask = cidrnetmask(var.private_network_cidr)
  26. }
  27. resource "exoscale_compute" "master" {
  28. for_each = {
  29. for name, machine in var.machines :
  30. name => machine
  31. if machine.node_type == "master"
  32. }
  33. display_name = "${var.prefix}-${each.key}"
  34. template_id = data.exoscale_compute_template.os_image[each.key].id
  35. size = each.value.size
  36. disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
  37. state = "Running"
  38. zone = var.zone
  39. security_groups = [exoscale_security_group.master_sg.name]
  40. user_data = templatefile(
  41. "${path.module}/templates/cloud-init.tmpl",
  42. {
  43. eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
  44. node_local_partition_size = each.value.boot_disk.node_local_partition_size
  45. ceph_partition_size = each.value.boot_disk.ceph_partition_size
  46. root_partition_size = each.value.boot_disk.root_partition_size
  47. node_type = "master"
  48. ssh_public_keys = var.ssh_public_keys
  49. }
  50. )
  51. }
  52. resource "exoscale_compute" "worker" {
  53. for_each = {
  54. for name, machine in var.machines :
  55. name => machine
  56. if machine.node_type == "worker"
  57. }
  58. display_name = "${var.prefix}-${each.key}"
  59. template_id = data.exoscale_compute_template.os_image[each.key].id
  60. size = each.value.size
  61. disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
  62. state = "Running"
  63. zone = var.zone
  64. security_groups = [exoscale_security_group.worker_sg.name]
  65. user_data = templatefile(
  66. "${path.module}/templates/cloud-init.tmpl",
  67. {
  68. eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
  69. node_local_partition_size = each.value.boot_disk.node_local_partition_size
  70. ceph_partition_size = each.value.boot_disk.ceph_partition_size
  71. root_partition_size = each.value.boot_disk.root_partition_size
  72. node_type = "worker"
  73. ssh_public_keys = var.ssh_public_keys
  74. }
  75. )
  76. }
  77. resource "exoscale_nic" "master_private_network_nic" {
  78. for_each = exoscale_compute.master
  79. compute_id = each.value.id
  80. network_id = exoscale_network.private_network.id
  81. }
  82. resource "exoscale_nic" "worker_private_network_nic" {
  83. for_each = exoscale_compute.worker
  84. compute_id = each.value.id
  85. network_id = exoscale_network.private_network.id
  86. }
  87. resource "exoscale_security_group" "master_sg" {
  88. name = "${var.prefix}-master-sg"
  89. description = "Security group for Kubernetes masters"
  90. }
  91. resource "exoscale_security_group_rules" "master_sg_rules" {
  92. security_group_id = exoscale_security_group.master_sg.id
  93. # SSH
  94. ingress {
  95. protocol = "TCP"
  96. cidr_list = var.ssh_whitelist
  97. ports = ["22"]
  98. }
  99. # Kubernetes API
  100. ingress {
  101. protocol = "TCP"
  102. cidr_list = var.api_server_whitelist
  103. ports = ["6443"]
  104. }
  105. }
  106. resource "exoscale_security_group" "worker_sg" {
  107. name = "${var.prefix}-worker-sg"
  108. description = "security group for kubernetes worker nodes"
  109. }
  110. resource "exoscale_security_group_rules" "worker_sg_rules" {
  111. security_group_id = exoscale_security_group.worker_sg.id
  112. # SSH
  113. ingress {
  114. protocol = "TCP"
  115. cidr_list = var.ssh_whitelist
  116. ports = ["22"]
  117. }
  118. # HTTP(S)
  119. ingress {
  120. protocol = "TCP"
  121. cidr_list = ["0.0.0.0/0"]
  122. ports = ["80", "443"]
  123. }
  124. # Kubernetes Nodeport
  125. ingress {
  126. protocol = "TCP"
  127. cidr_list = var.nodeport_whitelist
  128. ports = ["30000-32767"]
  129. }
  130. }
  131. resource "exoscale_ipaddress" "ingress_controller_lb" {
  132. zone = var.zone
  133. healthcheck_mode = "http"
  134. healthcheck_port = 80
  135. healthcheck_path = "/healthz"
  136. healthcheck_interval = 10
  137. healthcheck_timeout = 2
  138. healthcheck_strikes_ok = 2
  139. healthcheck_strikes_fail = 3
  140. }
  141. resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
  142. for_each = exoscale_compute.worker
  143. compute_id = each.value.id
  144. ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
  145. }
  146. resource "exoscale_ipaddress" "control_plane_lb" {
  147. zone = var.zone
  148. healthcheck_mode = "tcp"
  149. healthcheck_port = 6443
  150. healthcheck_interval = 10
  151. healthcheck_timeout = 2
  152. healthcheck_strikes_ok = 2
  153. healthcheck_strikes_fail = 3
  154. }
  155. resource "exoscale_secondary_ipaddress" "control_plane_lb" {
  156. for_each = exoscale_compute.master
  157. compute_id = each.value.id
  158. ip_address = exoscale_ipaddress.control_plane_lb.ip_address
  159. }