Browse Source
Added terraform support for NIFCLOUD (#10227)
Added terraform support for NIFCLOUD (#10227)
* Add NIFCLOUD * Add tf-validate-nifcloud in gitlab-cipull/10226/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 844 additions and 0 deletions
Split View
Diff Options
-
6.gitlab-ci/terraform.yml
-
5contrib/terraform/nifcloud/.gitignore
-
137contrib/terraform/nifcloud/README.md
-
64contrib/terraform/nifcloud/generate-inventory.sh
-
36contrib/terraform/nifcloud/main.tf
-
301contrib/terraform/nifcloud/modules/kubernetes-cluster/main.tf
-
48contrib/terraform/nifcloud/modules/kubernetes-cluster/outputs.tf
-
45contrib/terraform/nifcloud/modules/kubernetes-cluster/templates/userdata.tftpl
-
9contrib/terraform/nifcloud/modules/kubernetes-cluster/terraform.tf
-
81contrib/terraform/nifcloud/modules/kubernetes-cluster/variables.tf
-
3contrib/terraform/nifcloud/output.tf
-
22contrib/terraform/nifcloud/sample-inventory/cluster.tfvars
-
1contrib/terraform/nifcloud/sample-inventory/group_vars
-
9contrib/terraform/nifcloud/terraform.tf
-
77contrib/terraform/nifcloud/variables.tf
@ -0,0 +1,5 @@ |
|||
*.tfstate* |
|||
.terraform.lock.hcl |
|||
.terraform |
|||
|
|||
sample-inventory/inventory.ini |
@ -0,0 +1,137 @@ |
|||
# Kubernetes on NIFCLOUD with Terraform |
|||
|
|||
Provision a Kubernetes cluster on [NIFCLOUD](https://pfs.nifcloud.com/) using Terraform and Kubespray |
|||
|
|||
## Overview |
|||
|
|||
The setup looks like following |
|||
|
|||
```text |
|||
Kubernetes cluster |
|||
+----------------------------+ |
|||
+---------------+ | +--------------------+ | |
|||
| | | | +--------------------+ | |
|||
| API server LB +---------> | | | | |
|||
| | | | | Control Plane/etcd | | |
|||
+---------------+ | | | node(s) | | |
|||
| +-+ | | |
|||
| +--------------------+ | |
|||
| ^ | |
|||
| | | |
|||
| v | |
|||
| +--------------------+ | |
|||
| | +--------------------+ | |
|||
| | | | | |
|||
| | | Worker | | |
|||
| | | node(s) | | |
|||
| +-+ | | |
|||
| +--------------------+ | |
|||
+----------------------------+ |
|||
``` |
|||
|
|||
## Requirements |
|||
|
|||
* Terraform 1.3.7 |
|||
|
|||
## Quickstart |
|||
|
|||
### Export Variables |
|||
|
|||
* Your NIFCLOUD credentials: |
|||
|
|||
```bash |
|||
export NIFCLOUD_ACCESS_KEY_ID=<YOUR ACCESS KEY> |
|||
export NIFCLOUD_SECRET_ACCESS_KEY=<YOUR SECRET ACCESS KEY> |
|||
``` |
|||
|
|||
* The SSH KEY used to connect to the instance: |
|||
* FYI: [Cloud Help(SSH Key)](https://pfs.nifcloud.com/help/ssh.htm) |
|||
|
|||
```bash |
|||
export TF_VAR_SSHKEY_NAME=<YOUR SSHKEY NAME> |
|||
``` |
|||
|
|||
* The IP address to connect to bastion server: |
|||
|
|||
```bash |
|||
export TF_VAR_working_instance_ip=$(curl ifconfig.me) |
|||
``` |
|||
|
|||
### Create The Infrastructure |
|||
|
|||
* Run terraform: |
|||
|
|||
```bash |
|||
terraform init |
|||
terraform apply -var-file ./sample-inventory/cluster.tfvars |
|||
``` |
|||
|
|||
### Setup The Kubernetes |
|||
|
|||
* Generate cluster configuration file: |
|||
|
|||
```bash |
|||
./generate-inventory.sh > sample-inventory/inventory.ini |
|||
|
|||
* Export Variables: |
|||
|
|||
```bash |
|||
BASTION_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.bastion_info | to_entries[].value.public_ip') |
|||
API_LB_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_lb') |
|||
CP01_IP=$(terraform output -json | jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[0].value.private_ip') |
|||
export ANSIBLE_SSH_ARGS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand=\"ssh root@${BASTION_IP} -W %h:%p\"" |
|||
``` |
|||
|
|||
* Set ssh-agent" |
|||
|
|||
```bash |
|||
eval `ssh-agent` |
|||
ssh-add <THE PATH TO YOUR SSH KEY> |
|||
``` |
|||
|
|||
* Run cluster.yml playbook: |
|||
|
|||
```bash |
|||
cd ./../../../ |
|||
ansible-playbook -i contrib/terraform/nifcloud/inventory/inventory.ini cluster.yml |
|||
``` |
|||
|
|||
### Connecting to Kubernetes |
|||
|
|||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/) on the localhost |
|||
* Fetching kubeconfig file: |
|||
|
|||
```bash |
|||
mkdir -p ~/.kube |
|||
scp -o ProxyCommand="ssh root@${BASTION_IP} -W %h:%p" root@${CP01_IP}:/etc/kubernetes/admin.conf ~/.kube/config |
|||
``` |
|||
|
|||
* Rewrite /etc/hosts |
|||
|
|||
```bash |
|||
sudo echo "${API_LB_IP} lb-apiserver.kubernetes.local" >> /etc/hosts |
|||
``` |
|||
|
|||
* Run kubectl |
|||
|
|||
```bash |
|||
kubectl get node |
|||
``` |
|||
|
|||
## Variables |
|||
|
|||
* `region`: Region where to run the cluster |
|||
* `az`: Availability zone where to run the cluster |
|||
* `private_ip_bn`: Private ip address of bastion server |
|||
* `private_network_cidr`: Subnet of private network |
|||
* `instances_cp`: Machine to provision as Control Plane. Key of this object will be used as part of the machine' name |
|||
* `private_ip`: private ip address of machine |
|||
* `instances_wk`: Machine to provision as Worker Node. Key of this object will be used as part of the machine' name |
|||
* `private_ip`: private ip address of machine |
|||
* `instance_key_name`: The key name of the Key Pair to use for the instance |
|||
* `instance_type_bn`: The instance type of bastion server |
|||
* `instance_type_wk`: The instance type of worker node |
|||
* `instance_type_cp`: The instance type of control plane |
|||
* `image_name`: OS image used for the instance |
|||
* `working_instance_ip`: The IP address to connect to bastion server |
|||
* `accounting_type`: Accounting type. (1: monthly, 2: pay per use) |
@ -0,0 +1,64 @@ |
|||
#!/bin/bash |
|||
|
|||
# |
|||
# Generates a inventory file based on the terraform output. |
|||
# After provisioning a cluster, simply run this command and supply the terraform state file |
|||
# Default state file is terraform.tfstate |
|||
# |
|||
|
|||
set -e |
|||
|
|||
TF_OUT=$(terraform output -json) |
|||
|
|||
CONTROL_PLANES=$(jq -r '.kubernetes_cluster.value.control_plane_info | to_entries[]' <(echo "${TF_OUT}")) |
|||
WORKERS=$(jq -r '.kubernetes_cluster.value.worker_info | to_entries[]' <(echo "${TF_OUT}")) |
|||
mapfile -t CONTROL_PLANE_NAMES < <(jq -r '.key' <(echo "${CONTROL_PLANES}")) |
|||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}")) |
|||
|
|||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}")) |
|||
|
|||
echo "[all]" |
|||
# Generate control plane hosts |
|||
i=1 |
|||
for name in "${CONTROL_PLANE_NAMES[@]}"; do |
|||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${CONTROL_PLANES}")) |
|||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip} etcd_member_name=etcd${i}" |
|||
i=$(( i + 1 )) |
|||
done |
|||
|
|||
# Generate worker hosts |
|||
for name in "${WORKER_NAMES[@]}"; do |
|||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}")) |
|||
echo "${name} ansible_user=root ansible_host=${private_ip} access_ip=${private_ip} ip=${private_ip}" |
|||
done |
|||
|
|||
API_LB=$(jq -r '.kubernetes_cluster.value.control_plane_lb' <(echo "${TF_OUT}")) |
|||
|
|||
echo "" |
|||
echo "[all:vars]" |
|||
echo "upstream_dns_servers=['8.8.8.8','8.8.4.4']" |
|||
echo "loadbalancer_apiserver={'address':'${API_LB}','port':'6443'}" |
|||
|
|||
|
|||
echo "" |
|||
echo "[kube_control_plane]" |
|||
for name in "${CONTROL_PLANE_NAMES[@]}"; do |
|||
echo "${name}" |
|||
done |
|||
|
|||
echo "" |
|||
echo "[etcd]" |
|||
for name in "${CONTROL_PLANE_NAMES[@]}"; do |
|||
echo "${name}" |
|||
done |
|||
|
|||
echo "" |
|||
echo "[kube_node]" |
|||
for name in "${WORKER_NAMES[@]}"; do |
|||
echo "${name}" |
|||
done |
|||
|
|||
echo "" |
|||
echo "[k8s_cluster:children]" |
|||
echo "kube_control_plane" |
|||
echo "kube_node" |
@ -0,0 +1,36 @@ |
|||
provider "nifcloud" { |
|||
region = var.region |
|||
} |
|||
|
|||
module "kubernetes_cluster" { |
|||
source = "./modules/kubernetes-cluster" |
|||
|
|||
availability_zone = var.az |
|||
prefix = "dev" |
|||
|
|||
private_network_cidr = var.private_network_cidr |
|||
|
|||
instance_key_name = var.instance_key_name |
|||
instances_cp = var.instances_cp |
|||
instances_wk = var.instances_wk |
|||
image_name = var.image_name |
|||
|
|||
instance_type_bn = var.instance_type_bn |
|||
instance_type_cp = var.instance_type_cp |
|||
instance_type_wk = var.instance_type_wk |
|||
|
|||
private_ip_bn = var.private_ip_bn |
|||
|
|||
additional_lb_filter = [var.working_instance_ip] |
|||
} |
|||
|
|||
resource "nifcloud_security_group_rule" "ssh_from_bastion" { |
|||
security_group_names = [ |
|||
module.kubernetes_cluster.security_group_name.bastion |
|||
] |
|||
type = "IN" |
|||
from_port = 22 |
|||
to_port = 22 |
|||
protocol = "TCP" |
|||
cidr_ip = var.working_instance_ip |
|||
} |
@ -0,0 +1,301 @@ |
|||
################################################# |
|||
## |
|||
## Local variables |
|||
## |
|||
locals { |
|||
# e.g. east-11 is 11 |
|||
az_num = reverse(split("-", var.availability_zone))[0] |
|||
# e.g. east-11 is e11 |
|||
az_short_name = "${substr(reverse(split("-", var.availability_zone))[1], 0, 1)}${local.az_num}" |
|||
|
|||
# Port used by the protocol |
|||
port_ssh = 22 |
|||
port_kubectl = 6443 |
|||
port_kubelet = 10250 |
|||
|
|||
# calico: https://docs.tigera.io/calico/latest/getting-started/kubernetes/requirements#network-requirements |
|||
port_bgp = 179 |
|||
port_vxlan = 4789 |
|||
port_etcd = 2379 |
|||
} |
|||
|
|||
################################################# |
|||
## |
|||
## General |
|||
## |
|||
|
|||
# data |
|||
data "nifcloud_image" "this" { |
|||
image_name = var.image_name |
|||
} |
|||
|
|||
# private lan |
|||
resource "nifcloud_private_lan" "this" { |
|||
private_lan_name = "${var.prefix}lan" |
|||
availability_zone = var.availability_zone |
|||
cidr_block = var.private_network_cidr |
|||
accounting_type = var.accounting_type |
|||
} |
|||
|
|||
################################################# |
|||
## |
|||
## Bastion |
|||
## |
|||
resource "nifcloud_security_group" "bn" { |
|||
group_name = "${var.prefix}bn" |
|||
description = "${var.prefix} bastion" |
|||
availability_zone = var.availability_zone |
|||
} |
|||
|
|||
resource "nifcloud_instance" "bn" { |
|||
|
|||
instance_id = "${local.az_short_name}${var.prefix}bn01" |
|||
security_group = nifcloud_security_group.bn.group_name |
|||
instance_type = var.instance_type_bn |
|||
|
|||
user_data = templatefile("${path.module}/templates/userdata.tftpl", { |
|||
private_ip_address = var.private_ip_bn |
|||
ssh_port = local.port_ssh |
|||
hostname = "${local.az_short_name}${var.prefix}bn01" |
|||
}) |
|||
|
|||
availability_zone = var.availability_zone |
|||
accounting_type = var.accounting_type |
|||
image_id = data.nifcloud_image.this.image_id |
|||
key_name = var.instance_key_name |
|||
|
|||
network_interface { |
|||
network_id = "net-COMMON_GLOBAL" |
|||
} |
|||
network_interface { |
|||
network_id = nifcloud_private_lan.this.network_id |
|||
ip_address = "static" |
|||
} |
|||
|
|||
# The image_id changes when the OS image type is demoted from standard to public. |
|||
lifecycle { |
|||
ignore_changes = [ |
|||
image_id, |
|||
user_data, |
|||
] |
|||
} |
|||
} |
|||
|
|||
################################################# |
|||
## |
|||
## Control Plane |
|||
## |
|||
resource "nifcloud_security_group" "cp" { |
|||
group_name = "${var.prefix}cp" |
|||
description = "${var.prefix} control plane" |
|||
availability_zone = var.availability_zone |
|||
} |
|||
|
|||
resource "nifcloud_instance" "cp" { |
|||
for_each = var.instances_cp |
|||
|
|||
instance_id = "${local.az_short_name}${var.prefix}${each.key}" |
|||
security_group = nifcloud_security_group.cp.group_name |
|||
instance_type = var.instance_type_cp |
|||
user_data = templatefile("${path.module}/templates/userdata.tftpl", { |
|||
private_ip_address = each.value.private_ip |
|||
ssh_port = local.port_ssh |
|||
hostname = "${local.az_short_name}${var.prefix}${each.key}" |
|||
}) |
|||
|
|||
availability_zone = var.availability_zone |
|||
accounting_type = var.accounting_type |
|||
image_id = data.nifcloud_image.this.image_id |
|||
key_name = var.instance_key_name |
|||
|
|||
network_interface { |
|||
network_id = "net-COMMON_GLOBAL" |
|||
} |
|||
network_interface { |
|||
network_id = nifcloud_private_lan.this.network_id |
|||
ip_address = "static" |
|||
} |
|||
|
|||
# The image_id changes when the OS image type is demoted from standard to public. |
|||
lifecycle { |
|||
ignore_changes = [ |
|||
image_id, |
|||
user_data, |
|||
] |
|||
} |
|||
} |
|||
|
|||
resource "nifcloud_load_balancer" "this" { |
|||
load_balancer_name = "${local.az_short_name}${var.prefix}cp" |
|||
accounting_type = var.accounting_type |
|||
balancing_type = 1 // Round-Robin |
|||
load_balancer_port = local.port_kubectl |
|||
instance_port = local.port_kubectl |
|||
instances = [for v in nifcloud_instance.cp : v.instance_id] |
|||
filter = concat( |
|||
[for k, v in nifcloud_instance.cp : v.public_ip], |
|||
[for k, v in nifcloud_instance.wk : v.public_ip], |
|||
var.additional_lb_filter, |
|||
) |
|||
filter_type = 1 // Allow |
|||
} |
|||
|
|||
################################################# |
|||
## |
|||
## Worker |
|||
## |
|||
resource "nifcloud_security_group" "wk" { |
|||
group_name = "${var.prefix}wk" |
|||
description = "${var.prefix} worker" |
|||
availability_zone = var.availability_zone |
|||
} |
|||
|
|||
resource "nifcloud_instance" "wk" { |
|||
for_each = var.instances_wk |
|||
|
|||
instance_id = "${local.az_short_name}${var.prefix}${each.key}" |
|||
security_group = nifcloud_security_group.wk.group_name |
|||
instance_type = var.instance_type_wk |
|||
user_data = templatefile("${path.module}/templates/userdata.tftpl", { |
|||
private_ip_address = each.value.private_ip |
|||
ssh_port = local.port_ssh |
|||
hostname = "${local.az_short_name}${var.prefix}${each.key}" |
|||
}) |
|||
|
|||
availability_zone = var.availability_zone |
|||
accounting_type = var.accounting_type |
|||
image_id = data.nifcloud_image.this.image_id |
|||
key_name = var.instance_key_name |
|||
|
|||
network_interface { |
|||
network_id = "net-COMMON_GLOBAL" |
|||
} |
|||
network_interface { |
|||
network_id = nifcloud_private_lan.this.network_id |
|||
ip_address = "static" |
|||
} |
|||
|
|||
# The image_id changes when the OS image type is demoted from standard to public. |
|||
lifecycle { |
|||
ignore_changes = [ |
|||
image_id, |
|||
user_data, |
|||
] |
|||
} |
|||
} |
|||
|
|||
################################################# |
|||
## |
|||
## Security Group Rule: Kubernetes |
|||
## |
|||
|
|||
# ssh |
|||
resource "nifcloud_security_group_rule" "ssh_from_bastion" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.wk.group_name, |
|||
nifcloud_security_group.cp.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_ssh |
|||
to_port = local.port_ssh |
|||
protocol = "TCP" |
|||
source_security_group_name = nifcloud_security_group.bn.group_name |
|||
} |
|||
|
|||
# kubectl |
|||
resource "nifcloud_security_group_rule" "kubectl_from_worker" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.cp.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_kubectl |
|||
to_port = local.port_kubectl |
|||
protocol = "TCP" |
|||
source_security_group_name = nifcloud_security_group.wk.group_name |
|||
} |
|||
|
|||
# kubelet |
|||
resource "nifcloud_security_group_rule" "kubelet_from_worker" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.cp.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_kubelet |
|||
to_port = local.port_kubelet |
|||
protocol = "TCP" |
|||
source_security_group_name = nifcloud_security_group.wk.group_name |
|||
} |
|||
|
|||
resource "nifcloud_security_group_rule" "kubelet_from_control_plane" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.wk.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_kubelet |
|||
to_port = local.port_kubelet |
|||
protocol = "TCP" |
|||
source_security_group_name = nifcloud_security_group.cp.group_name |
|||
} |
|||
|
|||
################################################# |
|||
## |
|||
## Security Group Rule: calico |
|||
## |
|||
|
|||
# vslan |
|||
resource "nifcloud_security_group_rule" "vxlan_from_control_plane" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.wk.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_vxlan |
|||
to_port = local.port_vxlan |
|||
protocol = "UDP" |
|||
source_security_group_name = nifcloud_security_group.cp.group_name |
|||
} |
|||
|
|||
resource "nifcloud_security_group_rule" "vxlan_from_worker" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.cp.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_vxlan |
|||
to_port = local.port_vxlan |
|||
protocol = "UDP" |
|||
source_security_group_name = nifcloud_security_group.wk.group_name |
|||
} |
|||
|
|||
# bgp |
|||
resource "nifcloud_security_group_rule" "bgp_from_control_plane" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.wk.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_bgp |
|||
to_port = local.port_bgp |
|||
protocol = "TCP" |
|||
source_security_group_name = nifcloud_security_group.cp.group_name |
|||
} |
|||
|
|||
resource "nifcloud_security_group_rule" "bgp_from_worker" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.cp.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_bgp |
|||
to_port = local.port_bgp |
|||
protocol = "TCP" |
|||
source_security_group_name = nifcloud_security_group.wk.group_name |
|||
} |
|||
|
|||
# etcd |
|||
resource "nifcloud_security_group_rule" "etcd_from_worker" { |
|||
security_group_names = [ |
|||
nifcloud_security_group.cp.group_name, |
|||
] |
|||
type = "IN" |
|||
from_port = local.port_etcd |
|||
to_port = local.port_etcd |
|||
protocol = "TCP" |
|||
source_security_group_name = nifcloud_security_group.wk.group_name |
|||
} |
@ -0,0 +1,48 @@ |
|||
output "control_plane_lb" { |
|||
description = "The DNS name of LB for control plane" |
|||
value = nifcloud_load_balancer.this.dns_name |
|||
} |
|||
|
|||
output "security_group_name" { |
|||
description = "The security group used in the cluster" |
|||
value = { |
|||
bastion = nifcloud_security_group.bn.group_name, |
|||
control_plane = nifcloud_security_group.cp.group_name, |
|||
worker = nifcloud_security_group.wk.group_name, |
|||
} |
|||
} |
|||
|
|||
output "private_network_id" { |
|||
description = "The private network used in the cluster" |
|||
value = nifcloud_private_lan.this.id |
|||
} |
|||
|
|||
output "bastion_info" { |
|||
description = "The basion information in cluster" |
|||
value = { (nifcloud_instance.bn.instance_id) : { |
|||
instance_id = nifcloud_instance.bn.instance_id, |
|||
unique_id = nifcloud_instance.bn.unique_id, |
|||
private_ip = nifcloud_instance.bn.private_ip, |
|||
public_ip = nifcloud_instance.bn.public_ip, |
|||
} } |
|||
} |
|||
|
|||
output "worker_info" { |
|||
description = "The worker information in cluster" |
|||
value = { for v in nifcloud_instance.wk : v.instance_id => { |
|||
instance_id = v.instance_id, |
|||
unique_id = v.unique_id, |
|||
private_ip = v.private_ip, |
|||
public_ip = v.public_ip, |
|||
} } |
|||
} |
|||
|
|||
output "control_plane_info" { |
|||
description = "The control plane information in cluster" |
|||
value = { for v in nifcloud_instance.cp : v.instance_id => { |
|||
instance_id = v.instance_id, |
|||
unique_id = v.unique_id, |
|||
private_ip = v.private_ip, |
|||
public_ip = v.public_ip, |
|||
} } |
|||
} |
@ -0,0 +1,45 @@ |
|||
#!/bin/bash |
|||
|
|||
################################################# |
|||
## |
|||
## IP Address |
|||
## |
|||
configure_private_ip_address () { |
|||
cat << EOS > /etc/netplan/01-netcfg.yaml |
|||
network: |
|||
version: 2 |
|||
renderer: networkd |
|||
ethernets: |
|||
ens192: |
|||
dhcp4: yes |
|||
dhcp6: yes |
|||
dhcp-identifier: mac |
|||
ens224: |
|||
dhcp4: no |
|||
dhcp6: no |
|||
addresses: [${private_ip_address}] |
|||
EOS |
|||
netplan apply |
|||
} |
|||
configure_private_ip_address |
|||
|
|||
################################################# |
|||
## |
|||
## SSH |
|||
## |
|||
configure_ssh_port () { |
|||
sed -i 's/^#*Port [0-9]*/Port ${ssh_port}/' /etc/ssh/sshd_config |
|||
} |
|||
configure_ssh_port |
|||
|
|||
################################################# |
|||
## |
|||
## Hostname |
|||
## |
|||
hostnamectl set-hostname ${hostname} |
|||
|
|||
################################################# |
|||
## |
|||
## Disable swap files genereated by systemd-gpt-auto-generator |
|||
## |
|||
systemctl mask "dev-sda3.swap" |
@ -0,0 +1,9 @@ |
|||
terraform { |
|||
required_version = ">=1.3.7" |
|||
required_providers { |
|||
nifcloud = { |
|||
source = "nifcloud/nifcloud" |
|||
version = ">= 1.8.0, < 2.0.0" |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,81 @@ |
|||
variable "availability_zone" { |
|||
description = "The availability zone" |
|||
type = string |
|||
} |
|||
|
|||
variable "prefix" { |
|||
description = "The prefix for the entire cluster" |
|||
type = string |
|||
validation { |
|||
condition = length(var.prefix) <= 5 |
|||
error_message = "Must be a less than 5 character long." |
|||
} |
|||
} |
|||
|
|||
variable "private_network_cidr" { |
|||
description = "The subnet of private network" |
|||
type = string |
|||
validation { |
|||
condition = can(cidrnetmask(var.private_network_cidr)) |
|||
error_message = "Must be a valid IPv4 CIDR block address." |
|||
} |
|||
} |
|||
|
|||
variable "private_ip_bn" { |
|||
description = "Private IP of bastion server" |
|||
type = string |
|||
} |
|||
|
|||
variable "instances_cp" { |
|||
type = map(object({ |
|||
private_ip = string |
|||
})) |
|||
} |
|||
|
|||
variable "instances_wk" { |
|||
type = map(object({ |
|||
private_ip = string |
|||
})) |
|||
} |
|||
|
|||
variable "instance_key_name" { |
|||
description = "The key name of the Key Pair to use for the instance" |
|||
type = string |
|||
} |
|||
|
|||
variable "instance_type_bn" { |
|||
description = "The instance type of bastion server" |
|||
type = string |
|||
} |
|||
|
|||
variable "instance_type_wk" { |
|||
description = "The instance type of worker" |
|||
type = string |
|||
} |
|||
|
|||
variable "instance_type_cp" { |
|||
description = "The instance type of control plane" |
|||
type = string |
|||
} |
|||
|
|||
variable "image_name" { |
|||
description = "The name of image" |
|||
type = string |
|||
} |
|||
|
|||
variable "additional_lb_filter" { |
|||
description = "Additional LB filter" |
|||
type = list(string) |
|||
} |
|||
|
|||
variable "accounting_type" { |
|||
type = string |
|||
default = "1" |
|||
validation { |
|||
condition = anytrue([ |
|||
var.accounting_type == "1", // Monthly |
|||
var.accounting_type == "2", // Pay per use |
|||
]) |
|||
error_message = "Must be a 1 or 2." |
|||
} |
|||
} |
@ -0,0 +1,3 @@ |
|||
output "kubernetes_cluster" { |
|||
value = module.kubernetes_cluster |
|||
} |
@ -0,0 +1,22 @@ |
|||
region = "jp-west-1" |
|||
az = "west-11" |
|||
|
|||
instance_key_name = "deployerkey" |
|||
|
|||
instance_type_bn = "e-medium" |
|||
instance_type_cp = "e-medium" |
|||
instance_type_wk = "e-medium" |
|||
|
|||
private_network_cidr = "192.168.30.0/24" |
|||
instances_cp = { |
|||
"cp01" : { private_ip : "192.168.30.11/24" } |
|||
"cp02" : { private_ip : "192.168.30.12/24" } |
|||
"cp03" : { private_ip : "192.168.30.13/24" } |
|||
} |
|||
instances_wk = { |
|||
"wk01" : { private_ip : "192.168.30.21/24" } |
|||
"wk02" : { private_ip : "192.168.30.22/24" } |
|||
} |
|||
private_ip_bn = "192.168.30.10/24" |
|||
|
|||
image_name = "Ubuntu Server 22.04 LTS" |
@ -0,0 +1 @@ |
|||
../../../../inventory/sample/group_vars |
@ -0,0 +1,9 @@ |
|||
terraform { |
|||
required_version = ">=1.3.7" |
|||
required_providers { |
|||
nifcloud = { |
|||
source = "nifcloud/nifcloud" |
|||
version = "1.8.0" |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,77 @@ |
|||
variable "region" { |
|||
description = "The region" |
|||
type = string |
|||
} |
|||
|
|||
variable "az" { |
|||
description = "The availability zone" |
|||
type = string |
|||
} |
|||
|
|||
variable "private_ip_bn" { |
|||
description = "Private IP of bastion server" |
|||
type = string |
|||
} |
|||
|
|||
variable "private_network_cidr" { |
|||
description = "The subnet of private network" |
|||
type = string |
|||
validation { |
|||
condition = can(cidrnetmask(var.private_network_cidr)) |
|||
error_message = "Must be a valid IPv4 CIDR block address." |
|||
} |
|||
} |
|||
|
|||
variable "instances_cp" { |
|||
type = map(object({ |
|||
private_ip = string |
|||
})) |
|||
} |
|||
|
|||
variable "instances_wk" { |
|||
type = map(object({ |
|||
private_ip = string |
|||
})) |
|||
} |
|||
|
|||
variable "instance_key_name" { |
|||
description = "The key name of the Key Pair to use for the instance" |
|||
type = string |
|||
} |
|||
|
|||
variable "instance_type_bn" { |
|||
description = "The instance type of bastion server" |
|||
type = string |
|||
} |
|||
|
|||
variable "instance_type_wk" { |
|||
description = "The instance type of worker" |
|||
type = string |
|||
} |
|||
|
|||
variable "instance_type_cp" { |
|||
description = "The instance type of control plane" |
|||
type = string |
|||
} |
|||
|
|||
variable "image_name" { |
|||
description = "The name of image" |
|||
type = string |
|||
} |
|||
|
|||
variable "working_instance_ip" { |
|||
description = "The IP address to connect to bastion server." |
|||
type = string |
|||
} |
|||
|
|||
variable "accounting_type" { |
|||
type = string |
|||
default = "2" |
|||
validation { |
|||
condition = anytrue([ |
|||
var.accounting_type == "1", // Monthly |
|||
var.accounting_type == "2", // Pay per use |
|||
]) |
|||
error_message = "Must be a 1 or 2." |
|||
} |
|||
} |
Write
Preview
Loading…
Cancel
Save