homelab/tf/modules/k8s/main.tf

358 lines
8.3 KiB
HCL

locals {
control_plane = [
for i in range(var.control_plane_count) : {
name = "${var.cluster_name}-cp-${format("%02s", i + 1)}"
}
]
workers = [
for i in range(var.worker_count) : {
name = "${var.cluster_name}-worker-${format("%02s", i + 1)}"
}
]
storage_workers = [
for i in range(var.storage_worker_count) : {
name = "${var.cluster_name}-storage-worker-${format("%02s", i + 1)}"
}
]
storage_dummies = [
for i in range(var.storage_worker_count) : {
name = "${var.cluster_name}-storage-worker-${format("%02s", i + 1)}-dummy"
}
]
}
resource "proxmox_virtual_environment_pool" "k8s_pool" {
comment = "Managed by Terraform"
pool_id = var.cluster_name
}
resource "proxmox_virtual_environment_vm" "k8s_control_plane" {
count = var.control_plane_count
name = local.control_plane[count.index].name
description = "Managed by Terraform"
tags = ["terraform", "ubuntu", "k8s-cp", var.cluster_name]
node_name = "pve"
vm_id = "${var.vm_id_prefix}${count.index + 101}"
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
started = var.started
cpu {
cores = 2
type = "host"
}
memory {
dedicated = 4096
floating = 4096
}
agent {
# read 'Qemu guest agent' section, change to true only when ready
enabled = true
}
startup {
order = "3"
up_delay = "60"
down_delay = "60"
}
disk {
datastore_id = var.proxmox_vm_storage
file_id = var.cloud_image_id
interface = "virtio0"
iothread = true
discard = "on"
size = 16
file_format = "raw"
}
initialization {
dns {
servers = [var.dns_server_ip]
}
ip_config {
ipv4 {
# x.x.x.32 - x.x.x.47
address = "${cidrhost(var.subnet_cidr, count.index + 32)}/24"
gateway = var.gateway
}
}
datastore_id = var.proxmox_image_storage
user_data_file_id = var.cloud_init_file_id
}
network_device {
bridge = "vmbr0"
}
operating_system {
type = "l26"
}
connection {
type = "ssh"
user = "ubuntu"
private_key = file(var.ssh_private_key_file)
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
}
provisioner "remote-exec" {
inline = [
"sudo hostnamectl set-hostname ${self.name}",
"sudo systemctl restart avahi-daemon",
]
}
lifecycle {
ignore_changes = [
initialization[0].user_data_file_id,
]
}
}
resource "proxmox_virtual_environment_vm" "k8s_worker" {
count = var.worker_count
name = local.workers[count.index].name
description = "Managed by Terraform"
tags = ["terraform", "ubuntu", "k8s-node", var.cluster_name]
node_name = "pve"
vm_id = "${var.vm_id_prefix}${count.index + 201}"
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
started = var.started
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 8192
floating = 8192
}
agent {
# read 'Qemu guest agent' section, change to true only when ready
enabled = true
}
startup {
order = "4"
up_delay = "60"
down_delay = "60"
}
disk {
datastore_id = var.proxmox_vm_storage
file_id = var.cloud_image_id
interface = "virtio0"
iothread = true
discard = "on"
size = 32
file_format = "raw"
}
initialization {
dns {
servers = [var.dns_server_ip]
}
ip_config {
ipv4 {
# x.x.x.48 - x.x.x.79
address = "${cidrhost(var.subnet_cidr, count.index + 48)}/24"
gateway = var.gateway
}
}
datastore_id = var.proxmox_image_storage
user_data_file_id = var.cloud_init_file_id
}
network_device {
bridge = "vmbr0"
}
operating_system {
type = "l26"
}
connection {
type = "ssh"
user = "ubuntu"
private_key = file(var.ssh_private_key_file)
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
}
provisioner "remote-exec" {
inline = [
"sudo hostnamectl set-hostname ${self.name}",
"sudo systemctl restart avahi-daemon",
]
}
lifecycle {
ignore_changes = [
initialization[0].user_data_file_id,
]
}
}
# This is currently how we create "disks" that are independent of a VM: by
# creating a dummy VM with a disk and then attaching the disk. This way, we
# can destroy the real VM without destroying the disk.
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_vm#example-attached-disks
resource "proxmox_virtual_environment_vm" "k8s_storage_dummy" {
count = var.storage_worker_count
name = local.storage_dummies[count.index].name
description = "Managed by Terraform"
tags = ["terraform", "disk-dummy", var.cluster_name]
node_name = "pve"
vm_id = "${var.vm_id_prefix}${count.index + 401}"
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
started = false
on_boot = false
disk {
datastore_id = var.proxmox_vm_storage
file_format = "qcow2"
interface = "scsi0"
size = var.storage_size
}
}
resource "proxmox_virtual_environment_vm" "k8s_storage_worker" {
count = var.storage_worker_count
name = local.storage_workers[count.index].name
description = "Managed by Terraform"
tags = ["terraform", "ubuntu", "k8s-node", var.cluster_name]
node_name = "pve"
vm_id = "${var.vm_id_prefix}${count.index + 301}"
pool_id = proxmox_virtual_environment_pool.k8s_pool.id
started = var.started
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 8192
floating = 8192
}
agent {
# read 'Qemu guest agent' section, change to true only when ready
enabled = true
}
startup {
order = "4"
up_delay = "60"
down_delay = "60"
}
disk {
datastore_id = var.proxmox_vm_storage
file_id = var.cloud_image_id
interface = "scsi0"
discard = "on"
size = 32
file_format = "qcow2"
}
# scsi on these guys for hotplugging and resizing
dynamic "disk" {
for_each = { for idx, val in proxmox_virtual_environment_vm.k8s_storage_dummy[count.index].disk : idx => val }
iterator = data_disk
content {
datastore_id = data_disk.value["datastore_id"]
path_in_datastore = data_disk.value["path_in_datastore"]
file_format = data_disk.value["file_format"]
size = data_disk.value["size"]
# assign from scsi1 and up
interface = "scsi${data_disk.key + 1}"
}
}
initialization {
dns {
servers = [var.dns_server_ip]
}
ip_config {
ipv4 {
# x.x.x.80 - x.x.x.87
address = "${cidrhost(var.subnet_cidr, count.index + 80)}/24"
gateway = var.gateway
}
}
datastore_id = var.proxmox_image_storage
user_data_file_id = var.cloud_init_file_id
}
network_device {
bridge = "vmbr0"
}
operating_system {
type = "l26"
}
connection {
type = "ssh"
user = "ubuntu"
private_key = file(var.ssh_private_key_file)
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
}
provisioner "remote-exec" {
inline = [
"sudo hostnamectl set-hostname ${self.name}",
"sudo systemctl restart avahi-daemon",
]
}
lifecycle {
ignore_changes = [
initialization[0].user_data_file_id,
]
}
}
resource "ansible_host" "k8s_control_plane" {
count = var.control_plane_count
# Use mDNS rather than IP
name = "${local.control_plane[count.index].name}.local"
groups = ["${var.cluster_name}_master", var.cluster_name]
}
resource "ansible_host" "k8s_worker" {
count = var.worker_count
# Use mDNS rather than IP
name = "${local.workers[count.index].name}.local"
groups = ["${var.cluster_name}_node", var.cluster_name]
}
resource "ansible_host" "k8s_storage_worker" {
count = var.storage_worker_count
# Use mDNS rather than IP
name = "${local.storage_workers[count.index].name}.local"
groups = ["${var.cluster_name}_node", "${var.cluster_name}_storage", var.cluster_name]
}
resource "ansible_group" "cluster" {
name = "${var.cluster_name}_k3s_cluster"
children = ["${var.cluster_name}_master", "${var.cluster_name}_node"]
}