chore: Reorganize terraform
This commit is contained in:
parent
fe74343202
commit
6bbcd55bb4
@ -2,6 +2,7 @@
|
||||
- name: Install
|
||||
hosts: jumper
|
||||
remote_user: ubuntu
|
||||
serial: 1
|
||||
vars:
|
||||
pv_disks:
|
||||
- /dev/sda
|
||||
@ -12,6 +13,7 @@
|
||||
mount_path: /mnt/docker
|
||||
extra_docker_daemon_options: |
|
||||
"data-root": "/mnt/docker/docker-root",
|
||||
"dns": ["10.0.123.123"],
|
||||
tasks:
|
||||
- import_role: name=dns-client
|
||||
|
||||
|
@ -22,8 +22,7 @@
|
||||
"base": "172.168.0.0/16",
|
||||
"size": 24
|
||||
}
|
||||
],
|
||||
"dns": ["10.0.123.123"]
|
||||
]
|
||||
}
|
||||
dest: /etc/docker/daemon.json
|
||||
register: docker_daemon_conf
|
||||
|
17
docker/compose/bind9/config/home.mnke.org.zone
Normal file
17
docker/compose/bind9/config/home.mnke.org.zone
Normal file
@ -0,0 +1,17 @@
|
||||
$ORIGIN home.mnke.org.
|
||||
@ 900 IN SOA ns.home.mnke.org. info.mnke.org. (
|
||||
5 ; serial
|
||||
900 ; refresh
|
||||
300 ; retry
|
||||
604800 ; expire
|
||||
900 ; minimum ttl
|
||||
)
|
||||
@ 3600 IN NS ns.home.mnke.org.
|
||||
ns 3600 IN A 10.0.4.4
|
||||
|
||||
|
||||
truenas 600 IN A 10.0.0.160
|
||||
nas 600 IN CNAME truenas
|
||||
db 600 IN CNAME truenas
|
||||
|
||||
truenas-gpu 600 IN A 10.0.0.250
|
17
docker/compose/bind9/config/named.conf
Normal file
17
docker/compose/bind9/config/named.conf
Normal file
@ -0,0 +1,17 @@
|
||||
acl internal {
|
||||
10.0.0.0/16;
|
||||
127.0.0.1;
|
||||
};
|
||||
|
||||
options {
|
||||
forwarders {
|
||||
1.0.0.1;
|
||||
1.1.1.1;
|
||||
};
|
||||
allow-query { internal; };
|
||||
};
|
||||
|
||||
zone "home.mnke.org" IN {
|
||||
type master;
|
||||
file "/etc/bind/home.mnke.org.zone";
|
||||
};
|
17
docker/compose/bind9/docker-compose.yaml
Normal file
17
docker/compose/bind9/docker-compose.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
services:
|
||||
bind9:
|
||||
container_name: bind9
|
||||
image: ubuntu/bind9:9.18-22.04_beta
|
||||
environment:
|
||||
- BIND9_USER=root
|
||||
- TZ=America/Vancouver
|
||||
ports:
|
||||
- 53:53/tcp
|
||||
- 53:53/udp
|
||||
volumes:
|
||||
- ./config:/etc/bind
|
||||
- ./cache:/var/cache/bind
|
||||
- ./records:/var/lib/bind
|
||||
restart: unless-stopped
|
@ -10,6 +10,7 @@ services:
|
||||
gitea:
|
||||
image: docker.io/gitea/gitea:1.23.1
|
||||
container_name: gitea
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- USER_UID=1002
|
||||
- USER_GID=1002
|
||||
|
@ -176,7 +176,7 @@ services:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.25'
|
||||
memory: 256M
|
||||
memory: 512M
|
||||
reservations:
|
||||
cpus: '0.1'
|
||||
memory: 64M
|
||||
|
@ -8,6 +8,7 @@ services:
|
||||
traefik:
|
||||
image: traefik:v3.3
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
# This seems to be needed to solve the DNS challenge. Otherwise our own
|
||||
# DNS server is used, which isn't correctly configured to allow checking
|
||||
# the DNS entries have been propagated
|
||||
@ -62,6 +63,8 @@ services:
|
||||
|
||||
whoami:
|
||||
image: "traefik/whoami"
|
||||
container_name: whoami
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- traefik
|
||||
labels:
|
||||
|
@ -32,7 +32,7 @@ spec:
|
||||
apiVersion: notification.toolkit.fluxcd.io/v1beta3
|
||||
kind: Alert
|
||||
metadata:
|
||||
name: on-call-webapp
|
||||
name: alerts
|
||||
namespace: flux-system
|
||||
spec:
|
||||
summary: "cluster addons"
|
||||
@ -47,5 +47,9 @@ spec:
|
||||
name: '*'
|
||||
- kind: Kustomization
|
||||
name: '*'
|
||||
|
||||
|
||||
# - kind: HelmRelease
|
||||
# name: '*'
|
||||
exclusionList:
|
||||
# These are so noisy
|
||||
- "*HelmRepository/flux-system/prometheus-community configured*"
|
||||
- "*HelmRepository/flux-system/traefik configured*"
|
||||
|
@ -1,20 +0,0 @@
|
||||
terraform {
|
||||
backend "s3" {
|
||||
bucket = "tfstate"
|
||||
|
||||
endpoints = {
|
||||
s3 = var.s3_backend_endpoint
|
||||
}
|
||||
|
||||
key = "homelab.tfstate"
|
||||
access_key = var.s3_access_key
|
||||
secret_key = var.s3_secret_key
|
||||
|
||||
region = "main" # Region validation will be skipped
|
||||
skip_credentials_validation = true # Skip AWS related checks and validations
|
||||
skip_requesting_account_id = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
use_path_style = true # Enable path-style S3 URLs (https://<HOST>/<BUCKET> https://developer.hashicorp.com/terraform/language/settings/backends/s3#use_path_style
|
||||
}
|
||||
}
|
@ -38,3 +38,47 @@ power_state:
|
||||
condition: true
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "common_cloud_init" {
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
#cloud-config
|
||||
chpasswd:
|
||||
list: |
|
||||
ubuntu:ubuntu
|
||||
${var.username}:${var.username}
|
||||
expire: false
|
||||
packages:
|
||||
- qemu-guest-agent
|
||||
- nfs-common
|
||||
- avahi-daemon
|
||||
timezone: America/Vancouver
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: ubuntu
|
||||
groups: sudo
|
||||
shell: /bin/bash
|
||||
ssh-authorized-keys:
|
||||
- ${trimspace(data.local_file.ssh_pub_key.content)}
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
- name: ${var.username}
|
||||
groups: sudo
|
||||
shell: /bin/bash
|
||||
ssh_import_id:
|
||||
- ${var.ssh_import_id}
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
|
||||
power_state:
|
||||
delay: now
|
||||
mode: reboot
|
||||
message: Rebooting after cloud-init completion
|
||||
condition: true
|
||||
EOF
|
||||
file_name = "common-cloud-init.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ module "dns_server" {
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
6
tf/dns.tf
Normal file
6
tf/dns.tf
Normal file
@ -0,0 +1,6 @@
|
||||
# resource "dns_a_record_set" "test" {
|
||||
# zone = "home.mnke.org."
|
||||
# name = "test"
|
||||
# addresses = ["10.0.123.123"]
|
||||
# ttl = 300
|
||||
# }
|
@ -12,6 +12,7 @@ module "docker_swarm_stingray" {
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
82
tf/huts.tf
82
tf/huts.tf
@ -47,8 +47,8 @@ resource "proxmox_virtual_environment_vm" "jumper" {
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 4096
|
||||
floating = 4096
|
||||
dedicated = 8192
|
||||
floating = 8192
|
||||
}
|
||||
|
||||
agent {
|
||||
@ -113,3 +113,81 @@ resource "ansible_host" "jumper" {
|
||||
groups = ["jumper", "portainer_agent"]
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "bench" {
|
||||
name = "bench"
|
||||
description = "Managed by Terraform"
|
||||
tags = ["terraform", "ubuntu", "hut"]
|
||||
|
||||
node_name = "pve"
|
||||
vm_id = 20001
|
||||
|
||||
cpu {
|
||||
cores = 1
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 1024
|
||||
floating = 1024
|
||||
}
|
||||
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
startup {
|
||||
order = "1"
|
||||
up_delay = "60"
|
||||
down_delay = "60"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = var.proxmox_vm_storage
|
||||
file_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
interface = "virtio0"
|
||||
iothread = true
|
||||
discard = "on"
|
||||
size = 16
|
||||
file_format = "qcow2"
|
||||
}
|
||||
|
||||
initialization {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "10.0.44.3/16"
|
||||
gateway = var.gateway
|
||||
}
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
10
tf/k8s.tf
10
tf/k8s.tf
@ -15,8 +15,9 @@ module "k8s_dolo" {
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
||||
module "k8s_folly" {
|
||||
@ -35,7 +36,8 @@ module "k8s_folly" {
|
||||
proxmox_vm_storage = var.proxmox_vm_storage
|
||||
proxmox_image_storage = var.proxmox_image_storage
|
||||
|
||||
common_cloud_init = local.common_cloud_init
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
|
||||
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
|
||||
ssh_private_key_file = var.ssh_private_key_file
|
||||
}
|
||||
|
||||
|
@ -19,8 +19,8 @@ resource "proxmox_virtual_environment_vm" "dns_server" {
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 512
|
||||
floating = 512
|
||||
dedicated = 1024
|
||||
floating = 1024
|
||||
}
|
||||
|
||||
agent {
|
||||
@ -54,7 +54,7 @@ resource "proxmox_virtual_environment_vm" "dns_server" {
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.dns_server.id
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
@ -65,22 +65,24 @@ resource "proxmox_virtual_environment_vm" "dns_server" {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "dns_server" {
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.dns_server.name}
|
||||
EOF
|
||||
file_name = "${local.dns_server.name}.cloud-config.yaml"
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,3 @@
|
||||
variable "common_cloud_init" {
|
||||
type = string
|
||||
description = "Base cloud-init template"
|
||||
}
|
||||
|
||||
variable "cloud_image_id" {
|
||||
type = string
|
||||
description = "Cloud image to use"
|
||||
@ -32,3 +27,13 @@ variable "pool_id" {
|
||||
variable "vm_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cloud_init_file_id" {
|
||||
type = string
|
||||
description = "Base cloud-init template"
|
||||
}
|
||||
|
||||
variable "ssh_private_key_file" {
|
||||
type = string
|
||||
description = "Path to private key file. Make sure this matches the public key defined in the cloud init."
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ resource "proxmox_virtual_environment_vm" "swarm_manager" {
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.swarm_manager[count.index].id
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
@ -78,43 +78,28 @@ resource "proxmox_virtual_environment_vm" "swarm_manager" {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_file" "swarm_manager" {
|
||||
count = var.manager_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.managers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.managers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_file" "swarm_worker" {
|
||||
count = var.worker_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.workers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.workers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "swarm_worker" {
|
||||
count = var.worker_count
|
||||
@ -166,7 +151,7 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" {
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.swarm_worker[count.index].id
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
@ -177,7 +162,24 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ variable "worker_count" {
|
||||
}
|
||||
}
|
||||
|
||||
variable "common_cloud_init" {
|
||||
variable "cloud_init_file_id" {
|
||||
type = string
|
||||
description = "Base cloud-init template"
|
||||
}
|
||||
@ -53,3 +53,8 @@ variable "subnet_cidr" {
|
||||
variable "dns_server_ip" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_private_key_file" {
|
||||
type = string
|
||||
description = "Path to private key file. Make sure this matches the public key defined in the cloud init."
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ resource "proxmox_virtual_environment_vm" "k8s_control_plane" {
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.k8s_control_plane[count.index].id
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
@ -93,7 +93,24 @@ resource "proxmox_virtual_environment_vm" "k8s_control_plane" {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,7 +171,7 @@ resource "proxmox_virtual_environment_vm" "k8s_worker" {
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.k8s_worker[count.index].id
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
@ -165,58 +182,27 @@ resource "proxmox_virtual_environment_vm" "k8s_worker" {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "k8s_control_plane" {
|
||||
count = var.control_plane_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.control_plane[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.control_plane[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "proxmox_virtual_environment_file" "k8s_worker" {
|
||||
count = var.worker_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.workers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.workers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "k8s_storage_worker" {
|
||||
count = var.storage_worker_count
|
||||
content_type = "snippets"
|
||||
datastore_id = var.proxmox_image_storage
|
||||
node_name = "pve"
|
||||
|
||||
source_raw {
|
||||
data = <<EOF
|
||||
${var.common_cloud_init}
|
||||
|
||||
hostname: ${local.storage_workers[count.index].name}
|
||||
EOF
|
||||
file_name = "${local.storage_workers[count.index].name}.cloud-config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
# This is currently how we create "disks" that are independent of a VM: by
|
||||
# creating a dummy VM with a disk and then attaching the disk. This way, we
|
||||
@ -312,7 +298,7 @@ resource "proxmox_virtual_environment_vm" "k8s_storage_worker" {
|
||||
}
|
||||
datastore_id = var.proxmox_image_storage
|
||||
|
||||
user_data_file_id = proxmox_virtual_environment_file.k8s_storage_worker[count.index].id
|
||||
user_data_file_id = var.cloud_init_file_id
|
||||
}
|
||||
|
||||
network_device {
|
||||
@ -323,7 +309,24 @@ resource "proxmox_virtual_environment_vm" "k8s_storage_worker" {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.ssh_private_key_file)
|
||||
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
"sudo systemctl restart avahi-daemon",
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
initialization[0].user_data_file_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ variable "storage_size" {
|
||||
default = 32
|
||||
}
|
||||
|
||||
variable "common_cloud_init" {
|
||||
variable "cloud_init_file_id" {
|
||||
type = string
|
||||
description = "Base cloud-init template"
|
||||
}
|
||||
@ -76,3 +76,8 @@ variable "subnet_cidr" {
|
||||
variable "dns_server_ip" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_private_key_file" {
|
||||
type = string
|
||||
description = "Path to private key file. Make sure this matches the public key defined in the cloud init."
|
||||
}
|
||||
|
@ -2,3 +2,4 @@ resource "proxmox_virtual_environment_pool" "core" {
|
||||
comment = "Managed by Terraform"
|
||||
pool_id = "core"
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,29 @@ terraform {
|
||||
source = "ansible/ansible"
|
||||
version = "1.3.0"
|
||||
}
|
||||
dns = {
|
||||
source = "hashicorp/dns"
|
||||
version = "3.4.2"
|
||||
}
|
||||
}
|
||||
|
||||
backend "s3" {
|
||||
bucket = "tfstate"
|
||||
|
||||
endpoints = {
|
||||
s3 = var.s3_backend_endpoint
|
||||
}
|
||||
|
||||
key = "homelab.tfstate"
|
||||
access_key = var.s3_access_key
|
||||
secret_key = var.s3_secret_key
|
||||
|
||||
region = "main" # Region validation will be skipped
|
||||
skip_credentials_validation = true # Skip AWS related checks and validations
|
||||
skip_requesting_account_id = true
|
||||
skip_metadata_api_check = true
|
||||
skip_region_validation = true
|
||||
use_path_style = true
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,3 +47,12 @@ provider "proxmox" {
|
||||
username = "root"
|
||||
}
|
||||
}
|
||||
|
||||
provider "dns" {
|
||||
update {
|
||||
server = local.dns_server_ip
|
||||
key_name = "terraform.mnke.org."
|
||||
key_algorithm = "hmac-sha256"
|
||||
key_secret = var.technitium_tsig_secret
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ variable "proxmox_api_endpoint" {
|
||||
variable "proxmox_api_token" {
|
||||
type = string
|
||||
description = "Proxmox API token bpg proxmox provider with ID and token"
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "gateway" {
|
||||
@ -44,3 +45,13 @@ variable "ssh_import_id" {
|
||||
type = string
|
||||
# example = "gh:tonyd33"
|
||||
}
|
||||
|
||||
variable "technitium_tsig_secret" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "ssh_private_key_file" {
|
||||
type = string
|
||||
description = "Path to private key file. Make sure this matches the public key defined in the cloud init."
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
proxmox_image_storage = "proxmox-local-directory"
|
||||
proxmox_vm_storage = "proxmox-local-directory"
|
||||
gateway = "10.0.0.1"
|
||||
gateway = "10.0.0.1"
|
||||
|
||||
username = "tony"
|
||||
ssh_import_id = "gh:tonyd33"
|
||||
|
||||
ssh_private_key_file = "~/.ssh/id_rsa"
|
||||
|
Loading…
x
Reference in New Issue
Block a user