feat: Expose game servers

This commit is contained in:
Tony Du 2025-02-15 17:08:13 -08:00
parent ce7677f9dc
commit 2c8f9d0ea2
31 changed files with 660 additions and 238 deletions

View File

@ -1,2 +1,9 @@
plugin: ansible.builtin.constructed
strict: true
groups:
vpn: >-
group_names | intersect(['embassy']) | length == 1 or
group_names | intersect(['jodye']) | length == 1
vpn_server: group_names | intersect(['embassy']) | length == 1
vpn_client: group_names | intersect(['jodye']) | length == 1

View File

@ -1,3 +1,5 @@
ansible_user: ubuntu
dns_server:
admin_username: "{{ secrets.admin_username }}"
admin_password: "{{ secrets.admin_password }}"

View File

@ -0,0 +1,18 @@
ansible_user: ubuntu
# Directory to store WireGuard configuration on the remote hosts
wireguard_remote_directory: /etc/wireguard
wireguard_interface_restart: true
# TODO: Enable this when stable
wireguard_service_enabled: false
wireguard_service_state: started
# We can generate this dynamically, but it really doesn't seem like it's worth
# the work.
nat_map:
jodye-wings-01.local:
lan_ip: 10.0.29.40
vpn_ip: 10.4.4.33
vps_ip: 172.32.1.33

View File

@ -0,0 +1,17 @@
wireguard_addresses:
- "{{ nat_map[inventory_hostname].vpn_ip }}"
wireguard_endpoint: ""
# Don't set this
# wireguard_dns: 10.0.123.123
# Keep the PAT mapping open. Only needed for the client; the server is always
# reachable
wireguard_persistent_keepalive: 30
# don't route local addresses through the wg tunnel
wireguard_preup:
- ip route add 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
wireguard_postdown:
- ip route del 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink

View File

@ -0,0 +1,55 @@
# This should really be set per host, but I'm abusing the fact that there's only
# one vpn_server host
wireguard_addresses:
- "10.4.4.1/24"
wireguard_endpoint: "{{ inventory_hostname }}"
wireguard_preup:
- echo 1 > /proc/sys/net/ipv4/ip_forward
wireguard_postup: |
{% filter from_yaml %}
{%- for value in (nat_map | dict2items | map(attribute='value')) %}
# incoming packets to vps_ip, dst port 10,000-40,000 are DNAT'd to vpn_ip
# with a matching port
- iptables -t nat -A PREROUTING -p tcp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
# incoming packets from vpn_ip are SNAT'd to vps_ip with a matching port to
# complete the reverse NAT path
- iptables -t nat -A POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
# Same thing for UDP. We do this selectively because we don't wanna mess with
# stuff like icmp
- iptables -t nat -A PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
- iptables -t nat -A POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
{%- endfor %}
{% endfilter %}
# Exact reverse of above to delete all the rules
wireguard_predown: |
{% filter from_yaml %}
{%- for value in (nat_map | dict2items | map(attribute='value') | reverse) %}
- iptables -t nat -D POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
- iptables -t nat -D PREROUTING -p tcp -i enX0 -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
- iptables -t nat -D PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
- iptables -t nat -D POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
{%- endfor %}
{% endfilter %}
wireguard_postdown:
- echo 0 > /proc/sys/net/ipv4/ip_forward
# https://www.procustodibus.com/blog/2021/03/wireguard-allowedips-calculator/
# Above recommends to just add specific routing rules rather than compute
# an equivalent list of subnets
#
# Yes, this is supposed to be defined on vpn_server rather than vpn_client, like
# I initially thought. The reason for this is likely because the role was meant
# for a fully meshed network rather than a single server with multiple clients,
# and each host defines a list of IPs that should be routed _to this host_, not
# a list of IPs that should be routed to the "server" (because everyone is a
# peer in a fully meshed network)
wireguard_allowed_ips: "0.0.0.0/0"

View File

@ -46,6 +46,35 @@
source: /mnt/docker/docker-root/volumes
target: /var/lib/docker/volumes
# This is used _only_ to enable stacks for things like rolling updates
- name: Init swarm
become: true
docker_swarm:
state: present
advertise_addr: "{{ansible_default_ipv4.address}}"
default_addr_pool:
- 172.17.0.0/16
- 172.18.0.0/16
- 172.19.0.0/16
- name: Create ingress network
# The ingress network conflicts with my subnet and ends up causing problems,
# so we have to set a different subnet first
become: true
docker_network:
name: ingress
driver: overlay
ingress: true
scope: swarm
ipam_config:
- subnet: 172.31.0.0/16
gateway: 172.31.0.1
driver_options:
# I'm honestly not completely sure what this, but in the default
# ingress network that's created during swarm initialization, this exists
# and things don't seem to work without it.
com.docker.network.driver.overlay.vxlanid_list: 4096
- name: Mount extra shares
tags: [extra-shares]
hosts: jumper

View File

@ -6,6 +6,7 @@
content: |
[Resolve]
DNS=10.0.123.123
DNSStubListener=no
# FallbackDNS=1.1.1.1
dest: /etc/systemd/resolved.conf
register: dns_servers_configuration
@ -16,5 +17,12 @@
name: systemd-resolved
state: restarted
when: dns_servers_configuration.changed
retries: 2
# This is gonna break the SSH connection
async: 10
poll: 0
ignore_errors: yes
- name: Wait for SSH to come back
wait_for_connection:
delay: 5
timeout: 500

View File

@ -0,0 +1,5 @@
---
roles:
- name: githubixx.ansible_role_wireguard
src: https://github.com/githubixx/ansible-role-wireguard.git
version: 17.0.0

View File

@ -0,0 +1,30 @@
---
version: '3.8'
services:
wings:
image: ghcr.io/pterodactyl/wings:v1.11.13
restart: always
ports:
- "8080:8080"
- "2022:2022"
- "443:443"
tty: true
environment:
TZ: "America/Vancouver"
WINGS_UID: 988
WINGS_GID: 988
WINGS_USERNAME: pterodactyl
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "/var/lib/docker/containers/:/var/lib/docker/containers/"
- "/etc/pterodactyl/:/etc/pterodactyl/"
- "/var/lib/pterodactyl/:/var/lib/pterodactyl/"
- "/var/log/pterodactyl/:/var/log/pterodactyl/"
- "/tmp/pterodactyl/:/tmp/pterodactyl/"
- "/etc/ssl/certs:/etc/ssl/certs:ro"
# you may need /srv/daemon-data if you are upgrading from an old daemon
#- "/srv/daemon-data/:/srv/daemon-data/"
# Required for ssl if you use let's encrypt. uncomment to use.
#- "/etc/letsencrypt/:/etc/letsencrypt/"

View File

@ -0,0 +1,16 @@
- name: Verify compose directory exists
file:
path: "/home/{{ansible_user}}/compose/wings"
state: directory
mode: 0755
- name: Copy wings configuration
copy:
src: docker-compose.yml
dest: "/home/{{ansible_user}}/compose/wings/docker-compose.yml"
mode: 0755
- name: Deploy wings
become: true
community.docker.docker_compose_v2:
project_src: "/home/{{ansible_user}}/compose/wings"

33
ansible/wings.yml Normal file
View File

@ -0,0 +1,33 @@
---
- name: Ensure prerequisites met
# TODO: Check # of IP addrs on enX0 >= # of wings + 1
hosts: vpn
tasks: []
- name: Install wings
hosts: jodye_wings
remote_user: ubuntu
# Don't forget to create a new disk if creating new wings. This is
# purposefully manual to give more fine-grained control
vars:
pv_disks:
- /dev/sda
vg_name: vg1
lv_name: pvs
lv_size: +100%FREE
fs_type: ext4
mount_path: /var/lib/pterodactyl
extra_docker_daemon_options: |
"dns": ["10.0.123.123"],
roles:
- dns-client
- lvm
- docker
- wings
- name: Set up VPN
hosts: vpn
become: true
roles:
- githubixx.ansible_role_wireguard

View File

@ -0,0 +1,4 @@
$ORIGIN jodye.mnke.org.
@ 900 IN SOA dns-server. hostadmin 3 900 300 604800 900
@ 3600 IN NS dns-server.
wings-01 600 IN A 10.0.29.40

View File

@ -5,3 +5,4 @@ authentik 600 IN CNAME authentik.dolo
blog 600 IN CNAME blog.dolo
git 600 IN CNAME git.jumper
vault 600 IN CNAME vault.jumper
panel 600 IN CNAME panel.jumper

View File

@ -0,0 +1,4 @@
$ORIGIN proxy.dolo.mnke.org.
@ 900 IN SOA dns-server. hostadmin 2 900 300 604800 900
@ 3600 IN NS dns-server.
* 600 IN CNAME dolo.mnke.org.

View File

@ -0,0 +1,4 @@
$ORIGIN web.jodye.mnke.org.
@ 900 IN SOA dns-server. hostadmin 2 900 300 604800 900
@ 3600 IN NS dns-server.
wings-01 600 IN CNAME dolo.mnke.org.

View File

@ -0,0 +1,92 @@
---
version: '3.8'
x-common:
database: &db-environment
MYSQL_PASSWORD: &db-password "${MYSQL_PASSWORD}"
MYSQL_ROOT_PASSWORD: "${MYSQL_ROOT_PASSWORD}"
panel:
&panel-environment
APP_URL: "https://${PANEL_HOST:-panel.mnke.org}"
# A list of valid timezones can be found here: http://php.net/manual/en/timezones.php
APP_TIMEZONE: "America/Vancouver"
APP_SERVICE_AUTHOR: "${EMAIL:-tony@mnke.org}"
TRUSTED_PROXIES: "${TRUSTED_PROXIES:-*}"
# Uncomment the line below and set to a non-empty value if you want to use Let's Encrypt
# to generate an SSL certificate for the Panel.
# LE_EMAIL: ""
mail: &mail-environment
MAIL_FROM: "${EMAIL:-tony@mnke.org}"
MAIL_DRIVER: "smtp"
MAIL_HOST: "mail"
MAIL_PORT: "${MAIL_PORT:-1025}"
MAIL_USERNAME: "${MAIL_USERNAME:-}"
MAIL_PASSWORD: "${MAIL_PASSWORD:-}"
MAIL_ENCRYPTION: "true"
services:
database:
image: mariadb:10.5
restart: always
command: --default-authentication-plugin=mysql_native_password
networks:
- pterodactyl-panel
volumes:
- ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/database:/var/lib/mysql
environment:
<<: *db-environment
MYSQL_DATABASE: "panel"
MYSQL_USER: "pterodactyl"
cache:
image: redis:7.4.2-alpine
restart: unless-stopped
networks:
- pterodactyl-panel
panel:
image: ghcr.io/pterodactyl/panel:latest
restart: unless-stopped
# ports:
# - "80:80"
# - "443:443"
networks:
- pterodactyl-panel
- traefik
volumes:
- ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/var/:/app/var/
- ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/nginx/:/etc/nginx/http.d/
- ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/certs/:/etc/letsencrypt/
- ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/logs/:/app/storage/logs
environment:
<<: [*panel-environment, *mail-environment]
DB_PASSWORD: *db-password
APP_ENV: "production"
APP_ENVIRONMENT_ONLY: "false"
CACHE_DRIVER: "redis"
SESSION_DRIVER: "redis"
QUEUE_DRIVER: "redis"
REDIS_HOST: "cache"
DB_HOST: "database"
DB_PORT: "3306"
labels:
- "traefik.enable=true"
- "traefik.http.routers.pterodactyl-panel.rule=Host(`${PANEL_HOST:-panel.mnke.org}`)"
- "traefik.http.routers.pterodactyl-panel.entrypoints=websecure"
- "traefik.http.routers.pterodactyl-panel.tls.certresolver=letsencrypt"
- "traefik.http.services.pterodactyl-panel.loadbalancer.server.port=80"
- "traefik.docker.network=traefik"
deploy:
resources:
limits:
cpus: '0.50'
memory: 1G
reservations:
cpus: '0.25'
memory: 128M
networks:
pterodactyl-panel:
name: pterodactyl-panel
traefik:
external: true

View File

@ -7,4 +7,4 @@ resources:
- seerr-mnke.yaml
- seerr-tonydu.yaml
- dns-dolo-mnke.yaml
- jodye-wings-01-dolo-mnke.yaml
- wings-01-web-jodye-mnke.yaml

View File

@ -0,0 +1,36 @@
---
# This file was automatically generated. Do not modify.
apiVersion: v1
kind: Service
metadata:
name: web-wings-01-jodye-mnke-external
namespace: default
spec:
type: ExternalName
externalName: wings-01.jodye.mnke.org
ports:
- name: web-wings-01-jodye-mnke-external
port: 443
targetPort: 443
---
# This file was automatically generated. Do not modify.
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: web-wings-01-jodye-mnke-external
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`web.wings-01.jodye.mnke.org`)
kind: Rule
services:
- kind: Service
name: web-wings-01-jodye-mnke-external
port: 443
passHostHeader: true
scheme: http
tls:
secretName: wildcard-mnke-org-tls

View File

@ -0,0 +1,36 @@
---
# This file was automatically generated. Do not modify.
apiVersion: v1
kind: Service
metadata:
name: wings-01-web-jodye-mnke-external
namespace: default
spec:
type: ExternalName
externalName: wings-01.jodye.mnke.org
ports:
- name: wings-01-web-jodye-mnke-external
port: 443
targetPort: 443
---
# This file was automatically generated. Do not modify.
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: wings-01-web-jodye-mnke-external
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`wings-01.web.jodye.mnke.org`)
kind: Rule
services:
- kind: Service
name: wings-01-web-jodye-mnke-external
port: 443
passHostHeader: true
scheme: http
tls:
secretName: wildcard-mnke-org-tls

View File

@ -41,6 +41,9 @@ spec:
name: {{ service_name }}-external
port: {{ upstream_port }}
passHostHeader: {{ pass_host_header | default('true') }}
{%- if scheme %}
scheme: {{ scheme }}
{%- endif %}
{%- endfor %}
tls:
secretName: {{ tls_secret_name }}

View File

@ -47,9 +47,9 @@ proxies:
upstream_port: 5380
pass_host_header: false
- service_name: jodye-wings-01-dolo-mnke
- service_name: wings-01-web-jodye-mnke
tls_secret_name: wildcard-mnke-org-tls
listen_host: jodye-wings-01.dolo.mnke.org
listen_host: wings-01.web.jodye.mnke.org
upstream_host: wings-01.jodye.mnke.org
upstream_port: 443
scheme: http

View File

@ -20,6 +20,8 @@ spec:
- "*.mnke.org"
- "*.home.mnke.org"
- "*.dolo.mnke.org"
- "*.jodye.mnke.org"
- "*.web.jodye.mnke.org"
commonName: "*.mnke.org"
issuerRef:
name: le-cf-issuer

View File

@ -193,3 +193,82 @@ resource "proxmox_virtual_environment_vm" "bench" {
]
}
}
# resource "proxmox_virtual_environment_vm" "press" {
# name = "press"
# description = "Managed by Terraform"
# tags = ["terraform", "ubuntu", "hut"]
# node_name = "pve"
# vm_id = 20002
# cpu {
# cores = 1
# type = "host"
# }
# memory {
# dedicated = 1024
# floating = 1024
# }
# agent {
# enabled = true
# }
# startup {
# order = "1"
# up_delay = "60"
# down_delay = "60"
# }
# disk {
# datastore_id = var.proxmox_vm_storage
# file_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
# interface = "virtio0"
# iothread = true
# discard = "on"
# size = 16
# file_format = "qcow2"
# }
# initialization {
# ip_config {
# ipv4 {
# address = "10.0.44.4/16"
# gateway = var.gateway
# }
# }
# datastore_id = var.proxmox_image_storage
# user_data_file_id = proxmox_virtual_environment_file.common_cloud_init.id
# }
# network_device {
# bridge = "vmbr0"
# }
# operating_system {
# type = "l26"
# }
# connection {
# type = "ssh"
# user = "ubuntu"
# private_key = file(var.ssh_private_key_file)
# host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
# }
# provisioner "remote-exec" {
# inline = [
# "sudo hostnamectl set-hostname ${self.name}",
# "sudo systemctl restart avahi-daemon",
# ]
# }
# lifecycle {
# ignore_changes = [
# initialization[0].user_data_file_id,
# ]
# }
# }

View File

@ -86,9 +86,27 @@ module "k8s_folly" {
ssh_private_key_file = var.ssh_private_key_file
}
# module "outpost" {
# source = "./modules/outpost"
module "flock_jodye" {
source = "./modules/flock"
# ip_count = 1
# ssh_public_key_file = var.ssh_public_key_file
# }
flock_name = "jodye"
vm_id_prefix = "9"
subnet_cidr = "10.0.29.0/24"
gateway = var.gateway
wing_count = 1
dns_server_ip = local.dns_server_ip
proxmox_vm_storage = var.proxmox_vm_storage
proxmox_image_storage = var.proxmox_image_storage
cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id
cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id
ssh_private_key_file = var.ssh_private_key_file
}
module "embassy" {
source = "./modules/embassy"
ip_count = 2
ssh_public_key_file = var.ssh_public_key_file
}

121
tf/modules/embassy/main.tf Normal file
View File

@ -0,0 +1,121 @@
resource "aws_key_pair" "titanium" {
key_name = "titanium"
public_key = file(var.ssh_public_key_file)
}
resource "aws_vpc" "embassy" {
# whatever
cidr_block = "172.32.0.0/16"
}
resource "aws_subnet" "embassy" {
vpc_id = aws_vpc.embassy.id
cidr_block = cidrsubnet(aws_vpc.embassy.cidr_block, 8, 1)
availability_zone = "us-west-2a"
}
resource "aws_internet_gateway" "embassy" {
vpc_id = aws_vpc.embassy.id
}
resource "aws_security_group" "embassy" {
vpc_id = aws_vpc.embassy.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# wireguard
ingress {
from_port = 51820
to_port = 51820
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
# everything else
ingress {
from_port = 10000
to_port = 40000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 10000
to_port = 40000
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_route_table" "embassy" {
vpc_id = aws_vpc.embassy.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.embassy.id
}
}
resource "aws_route_table_association" "embassy_assoc" {
subnet_id = aws_subnet.embassy.id
route_table_id = aws_route_table.embassy.id
}
resource "aws_network_interface" "embassy" {
subnet_id = aws_subnet.embassy.id
# Required for private_ip_list
private_ip_list_enabled = true
# private_ips aren't ordered meaning this NIC and its dependent resources may
# be re-created upon changing the number of IPs.
# private_ip_list, however, _is_ ordered, hence why we use it over private_ips
private_ip_list = [
for i in range(var.ip_count) : cidrhost(aws_subnet.embassy.cidr_block, i + 32)
]
security_groups = [aws_security_group.embassy.id]
}
resource "aws_instance" "embassy" {
ami = "ami-00c257e12d6828491"
instance_type = "t2.micro"
availability_zone = aws_subnet.embassy.availability_zone
key_name = aws_key_pair.titanium.key_name
network_interface {
network_interface_id = aws_network_interface.embassy.id
device_index = 0
}
tags = {
Name = "embassy-01"
}
}
resource "aws_eip" "eip" {
count = var.ip_count
}
resource "aws_eip_association" "eip_assoc" {
count = var.ip_count
network_interface_id = aws_network_interface.embassy.id
allocation_id = aws_eip.eip[count.index].id
private_ip_address = aws_network_interface.embassy.private_ip_list[count.index]
}
resource "ansible_host" "embassy" {
# any of the public ips will work
name = aws_eip.eip[0].public_ip
groups = ["embassy"]
}

View File

@ -0,0 +1,11 @@
variable "ssh_public_key_file" {
type = string
}
variable "ip_count" {
type = number
validation {
condition = var.ip_count >= 1
error_message = "Need at least one ip"
}
}

View File

@ -1,115 +1,17 @@
locals {
managers = [
for i in range(var.manager_count) : {
name = "${var.swarm_name}-manager-${format("%02s", i + 1)}"
}
]
workers = [
for i in range(var.worker_count) : {
name = "${var.swarm_name}-worker-${format("%02s", i + 1)}"
}
]
}
resource "proxmox_virtual_environment_pool" "swarm_pool" {
resource "proxmox_virtual_environment_pool" "flock_pool" {
comment = "Managed by Terraform"
pool_id = var.swarm_name
pool_id = var.flock_name
}
resource "proxmox_virtual_environment_vm" "swarm_manager" {
count = var.manager_count
name = local.managers[count.index].name
resource "proxmox_virtual_environment_vm" "wings" {
count = var.wing_count
name = "${var.flock_name}-wings-${format("%02s", count.index + 1)}"
description = "Managed by Terraform"
tags = ["terraform", "ubuntu", "swarm-manager", var.swarm_name]
tags = ["terraform", "ubuntu", "wings", var.flock_name]
node_name = "pve"
vm_id = "${var.vm_id_prefix}${count.index + 101}"
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
cpu {
cores = 2
type = "host"
}
memory {
dedicated = 2048
floating = 2048
}
agent {
# read 'Qemu guest agent' section, change to true only when ready
enabled = true
}
startup {
order = "1"
up_delay = "60"
down_delay = "60"
}
disk {
datastore_id = var.proxmox_vm_storage
file_id = var.cloud_image_id
interface = "virtio0"
iothread = true
discard = "on"
size = 32
file_format = "qcow2"
}
initialization {
ip_config {
ipv4 {
# x.x.x.32 - x.x.x.39
address = "${cidrhost(var.subnet_cidr, count.index + 32)}/24"
gateway = var.gateway
}
}
datastore_id = var.proxmox_image_storage
user_data_file_id = var.cloud_init_file_id
}
network_device {
bridge = "vmbr0"
}
operating_system {
type = "l26"
}
connection {
type = "ssh"
user = "ubuntu"
private_key = file(var.ssh_private_key_file)
host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0]
}
provisioner "remote-exec" {
inline = [
"sudo hostnamectl set-hostname ${self.name}",
"sudo systemctl restart avahi-daemon",
]
}
lifecycle {
ignore_changes = [
initialization[0].user_data_file_id,
]
}
}
resource "proxmox_virtual_environment_vm" "swarm_worker" {
count = var.worker_count
name = local.workers[count.index].name
description = "Managed by Terraform"
tags = ["terraform", "ubuntu", "swarm-worker", var.swarm_name]
node_name = "pve"
vm_id = "${var.vm_id_prefix}${count.index + 301}"
pool_id = proxmox_virtual_environment_pool.swarm_pool.id
pool_id = proxmox_virtual_environment_pool.flock_pool.id
cpu {
cores = 4
@ -127,7 +29,7 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" {
}
startup {
order = "2"
order = "5"
up_delay = "60"
down_delay = "60"
}
@ -137,7 +39,7 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" {
file_id = var.cloud_image_id
interface = "virtio0"
discard = "on"
size = 32
size = 16
file_format = "qcow2"
}
@ -179,18 +81,14 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" {
lifecycle {
ignore_changes = [
initialization[0].user_data_file_id,
# These will have manually provisioned disks
disk,
]
}
}
resource "ansible_host" "swarm_manager" {
count = var.manager_count
name = "${local.managers[count.index].name}.local"
groups = ["${var.swarm_name}_manager", var.swarm_name]
}
resource "ansible_host" "swarm_worker" {
count = var.worker_count
name = "${local.workers[count.index].name}.local"
groups = ["${var.swarm_name}_worker", var.swarm_name]
resource "ansible_host" "wings" {
count = var.wing_count
name = "${proxmox_virtual_environment_vm.wings[count.index].name}.local"
groups = ["${var.flock_name}_wings", var.flock_name]
}

View File

@ -15,23 +15,15 @@ variable "gateway" {
type = string
default = "10.0.0.1"
}
variable "swarm_name" {
variable "flock_name" {
type = string
}
variable "manager_count" {
variable "wing_count" {
type = number
validation {
condition = var.manager_count <= 8
error_message = "Too many manager nodes"
}
}
variable "worker_count" {
type = number
validation {
condition = var.worker_count <= 16
error_message = "Too many worker nodes"
condition = var.wing_count <= 16
error_message = "Too many wing nodes"
}
}

View File

@ -1,92 +0,0 @@
resource "aws_key_pair" "titanium" {
key_name = "titanium"
public_key = file(var.ssh_public_key_file)
}
resource "aws_vpc" "outpost" {
# whatever
cidr_block = "172.32.0.0/16"
}
resource "aws_subnet" "outpost" {
vpc_id = aws_vpc.outpost.id
cidr_block = cidrsubnet(aws_vpc.outpost.cidr_block, 8, 1)
availability_zone = "us-west-2a"
}
resource "aws_internet_gateway" "outpost" {
vpc_id = aws_vpc.outpost.id
}
resource "aws_security_group" "outpost" {
vpc_id = aws_vpc.outpost.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_route_table" "outpost" {
vpc_id = aws_vpc.outpost.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.outpost.id
}
}
resource "aws_route_table_association" "outpost_assoc" {
subnet_id = aws_subnet.outpost.id
route_table_id = aws_route_table.outpost.id
}
resource "aws_network_interface" "outpost" {
subnet_id = aws_subnet.outpost.id
# Required for private_ip_list
private_ip_list_enabled = true
# private_ips aren't ordered meaning this NIC and its dependent resources may
# be re-created upon changing the number of IPs.
# private_ip_list, however, _is_ ordered, hence why we use it over private_ips
private_ip_list = [
for i in range(var.ip_count) : cidrhost(aws_subnet.outpost.cidr_block, i + 32)
]
security_groups = [aws_security_group.outpost.id]
}
resource "aws_instance" "outpost" {
ami = "ami-00c257e12d6828491"
instance_type = "t2.micro"
availability_zone = aws_subnet.outpost.availability_zone
key_name = aws_key_pair.titanium.key_name
network_interface {
network_interface_id = aws_network_interface.outpost.id
device_index = 0
}
tags = {
Name = "outpost-01"
}
}
resource "aws_eip" "eip" {
count = var.ip_count
}
resource "aws_eip_association" "eip_assoc" {
count = var.ip_count
network_interface_id = aws_network_interface.outpost.id
allocation_id = aws_eip.eip[count.index].id
private_ip_address = aws_network_interface.outpost.private_ip_list[count.index]
}

View File

@ -1,7 +0,0 @@
variable "ssh_public_key_file" {
type = string
}
variable "ip_count" {
type = number
}