Fredrik Liv
3 years ago
committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 498 additions and 0 deletions
Split View
Diff Options
-
107contrib/terraform/hetzner/README.md
-
44contrib/terraform/hetzner/default.tfvars
-
51contrib/terraform/hetzner/main.tf
-
122contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf
-
23contrib/terraform/hetzner/modules/kubernetes-cluster/output.tf
-
17contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl
-
41contrib/terraform/hetzner/modules/kubernetes-cluster/variables.tf
-
9contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf
-
7contrib/terraform/hetzner/output.tf
-
16contrib/terraform/hetzner/templates/inventory.tpl
-
46contrib/terraform/hetzner/variables.tf
-
15contrib/terraform/hetzner/versions.tf
@ -0,0 +1,107 @@ |
|||
# Kubernetes on Hetzner with Terraform |
|||
|
|||
Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray |
|||
|
|||
## Overview |
|||
|
|||
The setup looks like following |
|||
|
|||
```text |
|||
Kubernetes cluster |
|||
+--------------------------+ |
|||
| +--------------+ | |
|||
| | +--------------+ | |
|||
| --> | | | | |
|||
| | | Master/etcd | | |
|||
| | | node(s) | | |
|||
| +-+ | | |
|||
| +--------------+ | |
|||
| ^ | |
|||
| | | |
|||
| v | |
|||
| +--------------+ | |
|||
| | +--------------+ | |
|||
| --> | | | | |
|||
| | | Worker | | |
|||
| | | node(s) | | |
|||
| +-+ | | |
|||
| +--------------+ | |
|||
+--------------------------+ |
|||
``` |
|||
|
|||
The nodes uses a private network for node to node communication and a public interface for all external communication. |
|||
|
|||
## Requirements |
|||
|
|||
* Terraform 0.14.0 or newer |
|||
|
|||
## Quickstart |
|||
|
|||
NOTE: Assumes you are at the root of the kubespray repo. |
|||
|
|||
For authentication in your cluster you can use the environment variables. |
|||
|
|||
```bash |
|||
export HCLOUD_TOKEN=api-token |
|||
``` |
|||
|
|||
Copy the cluster configuration file. |
|||
|
|||
```bash |
|||
CLUSTER=my-hetzner-cluster |
|||
cp -r inventory/sample inventory/$CLUSTER |
|||
cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/ |
|||
cd inventory/$CLUSTER |
|||
``` |
|||
|
|||
Edit `default.tfvars` to match your requirement. |
|||
|
|||
Run Terraform to create the infrastructure. |
|||
|
|||
```bash |
|||
terraform init ../../contrib/terraform/hetzner |
|||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/ |
|||
``` |
|||
|
|||
You should now have a inventory file named `inventory.ini` that you can use with kubespray. |
|||
You can use the inventory file with kubespray to set up a cluster. |
|||
|
|||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by: |
|||
|
|||
```bash |
|||
ansible -i inventory.ini -m ping all |
|||
``` |
|||
|
|||
You can setup Kubernetes with kubespray using the generated inventory: |
|||
|
|||
```bash |
|||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v |
|||
``` |
|||
|
|||
## Cloud controller |
|||
|
|||
For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver). |
|||
|
|||
Please read the instructions in both repos on how to install it. |
|||
|
|||
## Teardown |
|||
|
|||
You can teardown your infrastructure using the following Terraform command: |
|||
|
|||
```bash |
|||
terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner |
|||
``` |
|||
|
|||
## Variables |
|||
|
|||
* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix |
|||
* `ssh_public_keys`: List of public SSH keys to install on all machines |
|||
* `zone`: The zone where to run the cluster |
|||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine |
|||
* `node_type`: The role of this node *(master|worker)* |
|||
* `size`: Size of the VM |
|||
* `image`: The image to use for the VM |
|||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes |
|||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server |
|||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports) |
|||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443 |
@ -0,0 +1,44 @@ |
|||
prefix = "default" |
|||
zone = "hel1" |
|||
|
|||
inventory_file = "inventory.ini" |
|||
|
|||
ssh_public_keys = [ |
|||
# Put your public SSH key here |
|||
"ssh-rsa I-did-not-read-the-docs", |
|||
"ssh-rsa I-did-not-read-the-docs 2", |
|||
] |
|||
|
|||
machines = { |
|||
"master-0" : { |
|||
"node_type" : "master", |
|||
"size" : "cx21", |
|||
"image" : "ubuntu-20.04", |
|||
}, |
|||
"worker-0" : { |
|||
"node_type" : "worker", |
|||
"size" : "cx21", |
|||
"image" : "ubuntu-20.04", |
|||
}, |
|||
"worker-1" : { |
|||
"node_type" : "worker", |
|||
"size" : "cx21", |
|||
"image" : "ubuntu-20.04", |
|||
} |
|||
} |
|||
|
|||
nodeport_whitelist = [ |
|||
"0.0.0.0/0" |
|||
] |
|||
|
|||
ingress_whitelist = [ |
|||
"0.0.0.0/0" |
|||
] |
|||
|
|||
ssh_whitelist = [ |
|||
"0.0.0.0/0" |
|||
] |
|||
|
|||
api_server_whitelist = [ |
|||
"0.0.0.0/0" |
|||
] |
@ -0,0 +1,51 @@ |
|||
provider "hcloud" {} |
|||
|
|||
module "kubernetes" { |
|||
source = "./modules/kubernetes-cluster" |
|||
|
|||
prefix = var.prefix |
|||
|
|||
zone = var.zone |
|||
|
|||
machines = var.machines |
|||
|
|||
ssh_public_keys = var.ssh_public_keys |
|||
|
|||
ssh_whitelist = var.ssh_whitelist |
|||
api_server_whitelist = var.api_server_whitelist |
|||
nodeport_whitelist = var.nodeport_whitelist |
|||
ingress_whitelist = var.ingress_whitelist |
|||
} |
|||
|
|||
# |
|||
# Generate ansible inventory |
|||
# |
|||
|
|||
data "template_file" "inventory" { |
|||
template = file("${path.module}/templates/inventory.tpl") |
|||
|
|||
vars = { |
|||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", |
|||
keys(module.kubernetes.master_ip_addresses), |
|||
values(module.kubernetes.master_ip_addresses).*.public_ip, |
|||
values(module.kubernetes.master_ip_addresses).*.private_ip, |
|||
range(1, length(module.kubernetes.master_ip_addresses) + 1))) |
|||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", |
|||
keys(module.kubernetes.worker_ip_addresses), |
|||
values(module.kubernetes.worker_ip_addresses).*.public_ip, |
|||
values(module.kubernetes.worker_ip_addresses).*.private_ip)) |
|||
|
|||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) |
|||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) |
|||
} |
|||
} |
|||
|
|||
resource "null_resource" "inventories" { |
|||
provisioner "local-exec" { |
|||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" |
|||
} |
|||
|
|||
triggers = { |
|||
template = data.template_file.inventory.rendered |
|||
} |
|||
} |
@ -0,0 +1,122 @@ |
|||
resource "hcloud_network" "kubernetes" { |
|||
name = "${var.prefix}-network" |
|||
ip_range = var.private_network_cidr |
|||
} |
|||
|
|||
resource "hcloud_network_subnet" "kubernetes" { |
|||
type = "cloud" |
|||
network_id = hcloud_network.kubernetes.id |
|||
network_zone = "eu-central" |
|||
ip_range = var.private_subnet_cidr |
|||
} |
|||
|
|||
resource "hcloud_server" "master" { |
|||
for_each = { |
|||
for name, machine in var.machines : |
|||
name => machine |
|||
if machine.node_type == "master" |
|||
} |
|||
|
|||
name = "${var.prefix}-${each.key}" |
|||
image = each.value.image |
|||
server_type = each.value.size |
|||
location = var.zone |
|||
|
|||
user_data = templatefile( |
|||
"${path.module}/templates/cloud-init.tmpl", |
|||
{ |
|||
ssh_public_keys = var.ssh_public_keys |
|||
} |
|||
) |
|||
|
|||
firewall_ids = [hcloud_firewall.master.id] |
|||
} |
|||
|
|||
resource "hcloud_server_network" "master" { |
|||
for_each = hcloud_server.master |
|||
|
|||
server_id = each.value.id |
|||
|
|||
subnet_id = hcloud_network_subnet.kubernetes.id |
|||
} |
|||
|
|||
resource "hcloud_server" "worker" { |
|||
for_each = { |
|||
for name, machine in var.machines : |
|||
name => machine |
|||
if machine.node_type == "worker" |
|||
} |
|||
|
|||
name = "${var.prefix}-${each.key}" |
|||
image = each.value.image |
|||
server_type = each.value.size |
|||
location = var.zone |
|||
|
|||
user_data = templatefile( |
|||
"${path.module}/templates/cloud-init.tmpl", |
|||
{ |
|||
ssh_public_keys = var.ssh_public_keys |
|||
} |
|||
) |
|||
|
|||
firewall_ids = [hcloud_firewall.worker.id] |
|||
|
|||
} |
|||
|
|||
resource "hcloud_server_network" "worker" { |
|||
for_each = hcloud_server.worker |
|||
|
|||
server_id = each.value.id |
|||
|
|||
subnet_id = hcloud_network_subnet.kubernetes.id |
|||
} |
|||
|
|||
resource "hcloud_firewall" "master" { |
|||
name = "${var.prefix}-master-firewall" |
|||
|
|||
rule { |
|||
direction = "in" |
|||
protocol = "tcp" |
|||
port = "22" |
|||
source_ips = var.ssh_whitelist |
|||
} |
|||
|
|||
rule { |
|||
direction = "in" |
|||
protocol = "tcp" |
|||
port = "6443" |
|||
source_ips = var.api_server_whitelist |
|||
} |
|||
} |
|||
|
|||
resource "hcloud_firewall" "worker" { |
|||
name = "${var.prefix}-worker-firewall" |
|||
|
|||
rule { |
|||
direction = "in" |
|||
protocol = "tcp" |
|||
port = "22" |
|||
source_ips = var.ssh_whitelist |
|||
} |
|||
|
|||
rule { |
|||
direction = "in" |
|||
protocol = "tcp" |
|||
port = "80" |
|||
source_ips = var.ingress_whitelist |
|||
} |
|||
|
|||
rule { |
|||
direction = "in" |
|||
protocol = "tcp" |
|||
port = "443" |
|||
source_ips = var.ingress_whitelist |
|||
} |
|||
|
|||
rule { |
|||
direction = "in" |
|||
protocol = "tcp" |
|||
port = "30000-32767" |
|||
source_ips = var.nodeport_whitelist |
|||
} |
|||
} |
@ -0,0 +1,23 @@ |
|||
output "master_ip_addresses" { |
|||
value = { |
|||
for key, instance in hcloud_server.master : |
|||
instance.name => { |
|||
"private_ip" = hcloud_server_network.master[key].ip |
|||
"public_ip" = hcloud_server.master[key].ipv4_address |
|||
} |
|||
} |
|||
} |
|||
|
|||
output "worker_ip_addresses" { |
|||
value = { |
|||
for key, instance in hcloud_server.worker : |
|||
instance.name => { |
|||
"private_ip" = hcloud_server_network.worker[key].ip |
|||
"public_ip" = hcloud_server.worker[key].ipv4_address |
|||
} |
|||
} |
|||
} |
|||
|
|||
output "cluster_private_network_cidr" { |
|||
value = var.private_subnet_cidr |
|||
} |
@ -0,0 +1,17 @@ |
|||
#cloud-config |
|||
|
|||
users: |
|||
- default |
|||
- name: ubuntu |
|||
shell: /bin/bash |
|||
sudo: "ALL=(ALL) NOPASSWD:ALL" |
|||
ssh_authorized_keys: |
|||
%{ for ssh_public_key in ssh_public_keys ~} |
|||
- ${ssh_public_key} |
|||
%{ endfor ~} |
|||
|
|||
ssh_authorized_keys: |
|||
%{ for ssh_public_key in ssh_public_keys ~} |
|||
- ${ssh_public_key} |
|||
%{ endfor ~} |
|||
|
@ -0,0 +1,41 @@ |
|||
variable "zone" { |
|||
type = string |
|||
} |
|||
|
|||
variable "prefix" {} |
|||
|
|||
variable "machines" { |
|||
type = map(object({ |
|||
node_type = string |
|||
size = string |
|||
image = string |
|||
})) |
|||
} |
|||
|
|||
variable "ssh_public_keys" { |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "ssh_whitelist" { |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "api_server_whitelist" { |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "nodeport_whitelist" { |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "ingress_whitelist" { |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "private_network_cidr" { |
|||
default = "10.0.0.0/16" |
|||
} |
|||
|
|||
variable "private_subnet_cidr" { |
|||
default = "10.0.10.0/24" |
|||
} |
@ -0,0 +1,9 @@ |
|||
terraform { |
|||
required_providers { |
|||
hcloud = { |
|||
source = "hetznercloud/hcloud" |
|||
version = "1.31.1" |
|||
} |
|||
} |
|||
required_version = ">= 0.14" |
|||
} |
@ -0,0 +1,7 @@ |
|||
output "master_ips" { |
|||
value = module.kubernetes.master_ip_addresses |
|||
} |
|||
|
|||
output "worker_ips" { |
|||
value = module.kubernetes.worker_ip_addresses |
|||
} |
@ -0,0 +1,16 @@ |
|||
[all] |
|||
${connection_strings_master} |
|||
${connection_strings_worker} |
|||
|
|||
[kube-master] |
|||
${list_master} |
|||
|
|||
[etcd] |
|||
${list_master} |
|||
|
|||
[kube-node] |
|||
${list_worker} |
|||
|
|||
[k8s-cluster:children] |
|||
kube-master |
|||
kube-node |
@ -0,0 +1,46 @@ |
|||
variable "zone" { |
|||
description = "The zone where to run the cluster" |
|||
} |
|||
|
|||
variable "prefix" { |
|||
description = "Prefix for resource names" |
|||
default = "default" |
|||
} |
|||
|
|||
variable "machines" { |
|||
description = "Cluster machines" |
|||
type = map(object({ |
|||
node_type = string |
|||
size = string |
|||
image = string |
|||
})) |
|||
} |
|||
|
|||
variable "ssh_public_keys" { |
|||
description = "Public SSH key which are injected into the VMs." |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "ssh_whitelist" { |
|||
description = "List of IP ranges (CIDR) to whitelist for ssh" |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "api_server_whitelist" { |
|||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "nodeport_whitelist" { |
|||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "ingress_whitelist" { |
|||
description = "List of IP ranges (CIDR) to whitelist for HTTP" |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "inventory_file" { |
|||
description = "Where to store the generated inventory file" |
|||
} |
@ -0,0 +1,15 @@ |
|||
terraform { |
|||
required_providers { |
|||
hcloud = { |
|||
source = "hetznercloud/hcloud" |
|||
version = "1.31.1" |
|||
} |
|||
null = { |
|||
source = "hashicorp/null" |
|||
} |
|||
template = { |
|||
source = "hashicorp/template" |
|||
} |
|||
} |
|||
required_version = ">= 0.14" |
|||
} |
Write
Preview
Loading…
Cancel
Save