chore: Refactor terraform
This commit is contained in:
parent
ae22b0aee8
commit
dc924f3f1b
@ -1,11 +0,0 @@
|
||||
resource "proxmox_virtual_environment_file" "ubuntu_cloud_image" {
|
||||
content_type = "iso"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_file {
|
||||
path = "https://cloud-images.ubuntu.com/noble/20250122/noble-server-cloudimg-amd64.img"
|
||||
|
||||
checksum = "482244b83f49a97ee61fb9b8520d6e8b9c2e3c28648de461ba7e17681ddbd1c9"
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
../../data/ssh/id_rsa.pub
|
@ -1,20 +0,0 @@
|
||||
locals {
|
||||
dns_server_ip = "10.0.123.123"
|
||||
dns_server_subnet = "16"
|
||||
}
|
||||
|
||||
module "dns_server" {
|
||||
source = "./modules/dns-server"
|
||||
|
||||
vm_id = "200"
|
||||
|
||||
ipv4_address = "${local.dns_server_ip}/${local.dns_server_subnet}"
|
||||
pool_id = proxmox_virtual_environment_pool.core.id
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
module "docker_swarm_stingray" {
|
||||
source = "./modules/docker-swarm"
|
||||
|
||||
swarm_name = "stingray"
|
||||
vm_id_prefix = "8"
|
||||
subnet_cidr = "10.0.42.0/24"
|
||||
gateway = var.gateway
|
||||
manager_count = 3
|
||||
worker_count = 3
|
||||
dns_server_ip = local.dns_server_ip
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
43
tf/k8s.tf
43
tf/k8s.tf
@ -1,43 +0,0 @@
|
||||
module "k8s_dolo" {
|
||||
source = "./modules/k8s"
|
||||
started = true
|
||||
|
||||
cluster_name = "dolo"
|
||||
vm_id_prefix = "1"
|
||||
subnet_cidr = "10.0.185.0/24"
|
||||
gateway = var.gateway
|
||||
control_plane_count = 3
|
||||
worker_count = 3
|
||||
storage_worker_count = 3
|
||||
storage_size = 32
|
||||
dns_server_ip = local.dns_server_ip
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
||||
module "k8s_folly" {
|
||||
source = "./modules/k8s"
|
||||
|
||||
started = false
|
||||
|
||||
cluster_name = "folly"
|
||||
vm_id_prefix = "2"
|
||||
subnet_cidr = "10.0.186.0/24"
|
||||
control_plane_count = 0
|
||||
worker_count = 0
|
||||
storage_worker_count = 0
|
||||
dns_server_ip = local.dns_server_ip
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
94
tf/main.tf
Normal file
94
tf/main.tf
Normal file
@ -0,0 +1,94 @@
|
||||
locals {
|
||||
dns_server_ip = "10.0.123.123"
|
||||
dns_server_subnet = "16"
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_pool" "core" {
|
||||
comment = "Managed by Terraform"
|
||||
pool_id = "core"
|
||||
}
|
||||
|
||||
module "dns_server" {
|
||||
source = "./modules/dns-server"
|
||||
|
||||
vm_id = "200"
|
||||
|
||||
ipv4_address = "${local.dns_server_ip}/${local.dns_server_subnet}"
|
||||
pool_id = proxmox_virtual_environment_pool.core.id
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
||||
module "docker_swarm_stingray" {
|
||||
source = "./modules/docker-swarm"
|
||||
|
||||
swarm_name = "stingray"
|
||||
vm_id_prefix = "8"
|
||||
subnet_cidr = "10.0.42.0/24"
|
||||
gateway = var.gateway
|
||||
manager_count = 3
|
||||
worker_count = 3
|
||||
dns_server_ip = local.dns_server_ip
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
||||
module "k8s_dolo" {
|
||||
source = "./modules/k8s"
|
||||
started = true
|
||||
|
||||
cluster_name = "dolo"
|
||||
vm_id_prefix = "1"
|
||||
subnet_cidr = "10.0.185.0/24"
|
||||
gateway = var.gateway
|
||||
control_plane_count = 3
|
||||
worker_count = 3
|
||||
storage_worker_count = 3
|
||||
storage_size = 32
|
||||
dns_server_ip = local.dns_server_ip
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
||||
module "k8s_folly" {
|
||||
source = "./modules/k8s"
|
||||
|
||||
started = false
|
||||
|
||||
cluster_name = "folly"
|
||||
vm_id_prefix = "2"
|
||||
subnet_cidr = "10.0.186.0/24"
|
||||
control_plane_count = 0
|
||||
worker_count = 0
|
||||
storage_worker_count = 0
|
||||
dns_server_ip = local.dns_server_ip
|
||||
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
||||
# module "outpost" {
|
||||
# source = "./modules/outpost"
|
||||
|
||||
# ip_count = 1
|
||||
# ssh_public_key_file = var.ssh_public_key_file
|
||||
# }
|
196
tf/modules/flock/main.tf
Normal file
196
tf/modules/flock/main.tf
Normal file
@ -0,0 +1,196 @@
|
||||
locals {
|
||||
managers = [
|
||||
for i in range(var.manager_count) : {
|
||||
name = "${var.swarm_name}-manager-${format("%02s", i + 1)}"
|
||||
}
|
||||
]
|
||||
workers = [
|
||||
for i in range(var.worker_count) : {
|
||||
name = "${var.swarm_name}-worker-${format("%02s", i + 1)}"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_pool" "swarm_pool" {
|
||||
comment = "Managed by Terraform"
|
||||
pool_id = var.swarm_name
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "swarm_manager" {
|
||||
count = var.manager_count
|
||||
name = local.managers[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "swarm-manager", var.swarm_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 101}"
|
||||
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
|
||||
|
||||
cpu {
|
||||
cores = 2
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 2048
|
||||
floating = 2048
|
||||
}
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "1"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "virtio0"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
size = 32
|
||||
file_format = "qcow2"
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
# x.x.x.32 - x.x.x.39
|
||||
address = "${cidrhost(var.subnet_cidr, count.index + 32)}/24"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "swarm_worker" {
|
||||
count = var.worker_count
|
||||
name = local.workers[count.index].name
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "swarm-worker", var.swarm_name]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = "${var.vm_id_prefix}${count.index + 301}"
|
||||
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
|
||||
|
||||
cpu {
|
||||
cores = 4
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 8192
|
||||
floating = 8192
|
||||
}
|
||||
|
||||
agent {
|
||||
# read 'Qemu guest agent' section, change to true only when ready
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "2"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = var.cloud_image_id
|
||||
interface = "virtio0"
|
||||
discard = "on"
|
||||
size = 32
|
||||
file_format = "qcow2"
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
# x.x.x.40 - x.x.x.55
|
||||
address = "${cidrhost(var.subnet_cidr, count.index + 40)}/24"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "ansible_host" "swarm_manager" {
|
||||
count = var.manager_count
|
||||
name = "${local.managers[count.index].name}.local"
|
||||
groups = ["${var.swarm_name}_manager", var.swarm_name]
|
||||
}
|
||||
|
||||
resource "ansible_host" "swarm_worker" {
|
||||
count = var.worker_count
|
||||
name = "${local.workers[count.index].name}.local"
|
||||
groups = ["${var.swarm_name}_worker", var.swarm_name]
|
||||
}
|
13
tf/modules/flock/providers.tf
Normal file
13
tf/modules/flock/providers.tf
Normal file
@ -0,0 +1,13 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.70.0"
|
||||
}
|
||||
ansible = {
|
||||
source = "ansible/ansible"
|
||||
version = "1.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
60
tf/modules/flock/variables.tf
Normal file
60
tf/modules/flock/variables.tf
Normal file
@ -0,0 +1,60 @@
|
||||
variable "proxmox_image_storage" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "proxmox_vm_storage" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vm_id_prefix" {
|
||||
type = number
|
||||
description = "Prefix for the vm ids in the cluster"
|
||||
}
|
||||
|
||||
variable "gateway" {
|
||||
type = string
|
||||
default = "10.0.0.1"
|
||||
}
|
||||
variable "swarm_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "manager_count" {
|
||||
type = number
|
||||
validation {
|
||||
condition = var.manager_count <= 8
|
||||
error_message = "Too many manager nodes"
|
||||
}
|
||||
}
|
||||
|
||||
variable "worker_count" {
|
||||
type = number
|
||||
validation {
|
||||
condition = var.worker_count <= 16
|
||||
error_message = "Too many worker nodes"
|
||||
}
|
||||
}
|
||||
|
||||
variable "cloud_init_file_id" {
|
||||
type = string
|
||||
description = "Base cloud-init template"
|
||||
}
|
||||
|
||||
variable "cloud_image_id" {
|
||||
type = string
|
||||
description = "Cloud image to use"
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
type = string
|
||||
# example "10.0.185.0/24"
|
||||
}
|
||||
|
||||
variable "dns_server_ip" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_private_key_file" {
|
||||
type = string
|
||||
description = "Path to private key file. Make sure this matches the public key defined in the cloud init."
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
# module "outpost" {
|
||||
# source = "./modules/outpost"
|
||||
|
||||
# ip_count = 1
|
||||
# ssh_public_key_file = var.ssh_public_key_file
|
||||
# }
|
@ -1,5 +0,0 @@
|
||||
resource "proxmox_virtual_environment_pool" "core" {
|
||||
comment = "Managed by Terraform"
|
||||
pool_id = "core"
|
||||
}
|
||||
|
@ -41,3 +41,16 @@ EOF
|
||||
file_name = "common-cloud-init.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_file" "ubuntu_cloud_image" {
|
||||
content_type = "iso"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_file {
|
||||
path = "https://cloud-images.ubuntu.com/noble/20250122/noble-server-cloudimg-amd64.img"
|
||||
|
||||
checksum = "482244b83f49a97ee61fb9b8520d6e8b9c2e3c28648de461ba7e17681ddbd1c9"
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user