|
|
@ -1,29 +1,25 @@ |
|
|
|
data "exoscale_compute_template" "os_image" { |
|
|
|
data "exoscale_template" "os_image" { |
|
|
|
for_each = var.machines |
|
|
|
|
|
|
|
zone = var.zone |
|
|
|
name = each.value.boot_disk.image_name |
|
|
|
} |
|
|
|
|
|
|
|
data "exoscale_compute" "master_nodes" { |
|
|
|
for_each = exoscale_compute.master |
|
|
|
data "exoscale_compute_instance" "master_nodes" { |
|
|
|
for_each = exoscale_compute_instance.master |
|
|
|
|
|
|
|
id = each.value.id |
|
|
|
|
|
|
|
# Since private IP address is not assigned until the nics are created we need this |
|
|
|
depends_on = [exoscale_nic.master_private_network_nic] |
|
|
|
id = each.value.id |
|
|
|
zone = var.zone |
|
|
|
} |
|
|
|
|
|
|
|
data "exoscale_compute" "worker_nodes" { |
|
|
|
for_each = exoscale_compute.worker |
|
|
|
data "exoscale_compute_instance" "worker_nodes" { |
|
|
|
for_each = exoscale_compute_instance.worker |
|
|
|
|
|
|
|
id = each.value.id |
|
|
|
|
|
|
|
# Since private IP address is not assigned until the nics are created we need this |
|
|
|
depends_on = [exoscale_nic.worker_private_network_nic] |
|
|
|
id = each.value.id |
|
|
|
zone = var.zone |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_network" "private_network" { |
|
|
|
resource "exoscale_private_network" "private_network" { |
|
|
|
zone = var.zone |
|
|
|
name = "${var.prefix}-network" |
|
|
|
|
|
|
@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" { |
|
|
|
netmask = cidrnetmask(var.private_network_cidr) |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_compute" "master" { |
|
|
|
resource "exoscale_compute_instance" "master" { |
|
|
|
for_each = { |
|
|
|
for name, machine in var.machines : |
|
|
|
name => machine |
|
|
|
if machine.node_type == "master" |
|
|
|
} |
|
|
|
|
|
|
|
display_name = "${var.prefix}-${each.key}" |
|
|
|
template_id = data.exoscale_compute_template.os_image[each.key].id |
|
|
|
size = each.value.size |
|
|
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size |
|
|
|
state = "Running" |
|
|
|
zone = var.zone |
|
|
|
security_groups = [exoscale_security_group.master_sg.name] |
|
|
|
name = "${var.prefix}-${each.key}" |
|
|
|
template_id = data.exoscale_template.os_image[each.key].id |
|
|
|
type = each.value.size |
|
|
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size |
|
|
|
state = "Running" |
|
|
|
zone = var.zone |
|
|
|
security_group_ids = [exoscale_security_group.master_sg.id] |
|
|
|
network_interface { |
|
|
|
network_id = exoscale_private_network.private_network.id |
|
|
|
} |
|
|
|
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id] |
|
|
|
|
|
|
|
user_data = templatefile( |
|
|
|
"${path.module}/templates/cloud-init.tmpl", |
|
|
|
{ |
|
|
|
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address |
|
|
|
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address |
|
|
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size |
|
|
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size |
|
|
|
root_partition_size = each.value.boot_disk.root_partition_size |
|
|
@ -62,25 +62,29 @@ resource "exoscale_compute" "master" { |
|
|
|
) |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_compute" "worker" { |
|
|
|
resource "exoscale_compute_instance" "worker" { |
|
|
|
for_each = { |
|
|
|
for name, machine in var.machines : |
|
|
|
name => machine |
|
|
|
if machine.node_type == "worker" |
|
|
|
} |
|
|
|
|
|
|
|
display_name = "${var.prefix}-${each.key}" |
|
|
|
template_id = data.exoscale_compute_template.os_image[each.key].id |
|
|
|
size = each.value.size |
|
|
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size |
|
|
|
state = "Running" |
|
|
|
zone = var.zone |
|
|
|
security_groups = [exoscale_security_group.worker_sg.name] |
|
|
|
name = "${var.prefix}-${each.key}" |
|
|
|
template_id = data.exoscale_template.os_image[each.key].id |
|
|
|
type = each.value.size |
|
|
|
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size |
|
|
|
state = "Running" |
|
|
|
zone = var.zone |
|
|
|
security_group_ids = [exoscale_security_group.worker_sg.id] |
|
|
|
network_interface { |
|
|
|
network_id = exoscale_private_network.private_network.id |
|
|
|
} |
|
|
|
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id] |
|
|
|
|
|
|
|
user_data = templatefile( |
|
|
|
"${path.module}/templates/cloud-init.tmpl", |
|
|
|
{ |
|
|
|
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address |
|
|
|
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address |
|
|
|
node_local_partition_size = each.value.boot_disk.node_local_partition_size |
|
|
|
ceph_partition_size = each.value.boot_disk.ceph_partition_size |
|
|
|
root_partition_size = each.value.boot_disk.root_partition_size |
|
|
@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" { |
|
|
|
) |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_nic" "master_private_network_nic" { |
|
|
|
for_each = exoscale_compute.master |
|
|
|
|
|
|
|
compute_id = each.value.id |
|
|
|
network_id = exoscale_network.private_network.id |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_nic" "worker_private_network_nic" { |
|
|
|
for_each = exoscale_compute.worker |
|
|
|
|
|
|
|
compute_id = each.value.id |
|
|
|
network_id = exoscale_network.private_network.id |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_security_group" "master_sg" { |
|
|
|
name = "${var.prefix}-master-sg" |
|
|
|
description = "Security group for Kubernetes masters" |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_security_group_rules" "master_sg_rules" { |
|
|
|
resource "exoscale_security_group_rule" "master_sg_rule_ssh" { |
|
|
|
security_group_id = exoscale_security_group.master_sg.id |
|
|
|
|
|
|
|
for_each = toset(var.ssh_whitelist) |
|
|
|
# SSH |
|
|
|
ingress { |
|
|
|
protocol = "TCP" |
|
|
|
cidr_list = var.ssh_whitelist |
|
|
|
ports = ["22"] |
|
|
|
} |
|
|
|
type = "INGRESS" |
|
|
|
start_port = 22 |
|
|
|
end_port = 22 |
|
|
|
protocol = "TCP" |
|
|
|
cidr = each.value |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" { |
|
|
|
security_group_id = exoscale_security_group.master_sg.id |
|
|
|
|
|
|
|
for_each = toset(var.api_server_whitelist) |
|
|
|
# Kubernetes API |
|
|
|
ingress { |
|
|
|
protocol = "TCP" |
|
|
|
cidr_list = var.api_server_whitelist |
|
|
|
ports = ["6443"] |
|
|
|
} |
|
|
|
type = "INGRESS" |
|
|
|
start_port = 6443 |
|
|
|
end_port = 6443 |
|
|
|
protocol = "TCP" |
|
|
|
cidr = each.value |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_security_group" "worker_sg" { |
|
|
@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" { |
|
|
|
description = "security group for kubernetes worker nodes" |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_security_group_rules" "worker_sg_rules" { |
|
|
|
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" { |
|
|
|
security_group_id = exoscale_security_group.worker_sg.id |
|
|
|
|
|
|
|
# SSH |
|
|
|
ingress { |
|
|
|
protocol = "TCP" |
|
|
|
cidr_list = var.ssh_whitelist |
|
|
|
ports = ["22"] |
|
|
|
} |
|
|
|
for_each = toset(var.ssh_whitelist) |
|
|
|
type = "INGRESS" |
|
|
|
start_port = 22 |
|
|
|
end_port = 22 |
|
|
|
protocol = "TCP" |
|
|
|
cidr = each.value |
|
|
|
} |
|
|
|
|
|
|
|
# HTTP(S) |
|
|
|
ingress { |
|
|
|
protocol = "TCP" |
|
|
|
cidr_list = ["0.0.0.0/0"] |
|
|
|
ports = ["80", "443"] |
|
|
|
} |
|
|
|
resource "exoscale_security_group_rule" "worker_sg_rule_http" { |
|
|
|
security_group_id = exoscale_security_group.worker_sg.id |
|
|
|
|
|
|
|
# Kubernetes Nodeport |
|
|
|
ingress { |
|
|
|
protocol = "TCP" |
|
|
|
cidr_list = var.nodeport_whitelist |
|
|
|
ports = ["30000-32767"] |
|
|
|
} |
|
|
|
# HTTP(S) |
|
|
|
for_each = toset(["80", "443"]) |
|
|
|
type = "INGRESS" |
|
|
|
start_port = each.value |
|
|
|
end_port = each.value |
|
|
|
protocol = "TCP" |
|
|
|
cidr = "0.0.0.0/0" |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_ipaddress" "ingress_controller_lb" { |
|
|
|
zone = var.zone |
|
|
|
healthcheck_mode = "http" |
|
|
|
healthcheck_port = 80 |
|
|
|
healthcheck_path = "/healthz" |
|
|
|
healthcheck_interval = 10 |
|
|
|
healthcheck_timeout = 2 |
|
|
|
healthcheck_strikes_ok = 2 |
|
|
|
healthcheck_strikes_fail = 3 |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" { |
|
|
|
for_each = exoscale_compute.worker |
|
|
|
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" { |
|
|
|
security_group_id = exoscale_security_group.worker_sg.id |
|
|
|
|
|
|
|
compute_id = each.value.id |
|
|
|
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address |
|
|
|
# HTTP(S) |
|
|
|
for_each = toset(var.nodeport_whitelist) |
|
|
|
type = "INGRESS" |
|
|
|
start_port = 30000 |
|
|
|
end_port = 32767 |
|
|
|
protocol = "TCP" |
|
|
|
cidr = each.value |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_ipaddress" "control_plane_lb" { |
|
|
|
zone = var.zone |
|
|
|
healthcheck_mode = "tcp" |
|
|
|
healthcheck_port = 6443 |
|
|
|
healthcheck_interval = 10 |
|
|
|
healthcheck_timeout = 2 |
|
|
|
healthcheck_strikes_ok = 2 |
|
|
|
healthcheck_strikes_fail = 3 |
|
|
|
resource "exoscale_elastic_ip" "ingress_controller_lb" { |
|
|
|
zone = var.zone |
|
|
|
healthcheck { |
|
|
|
mode = "http" |
|
|
|
port = 80 |
|
|
|
uri = "/healthz" |
|
|
|
interval = 10 |
|
|
|
timeout = 2 |
|
|
|
strikes_ok = 2 |
|
|
|
strikes_fail = 3 |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
resource "exoscale_secondary_ipaddress" "control_plane_lb" { |
|
|
|
for_each = exoscale_compute.master |
|
|
|
|
|
|
|
compute_id = each.value.id |
|
|
|
ip_address = exoscale_ipaddress.control_plane_lb.ip_address |
|
|
|
resource "exoscale_elastic_ip" "control_plane_lb" { |
|
|
|
zone = var.zone |
|
|
|
healthcheck { |
|
|
|
mode = "tcp" |
|
|
|
port = 6443 |
|
|
|
interval = 10 |
|
|
|
timeout = 2 |
|
|
|
strikes_ok = 2 |
|
|
|
strikes_fail = 3 |
|
|
|
} |
|
|
|
} |