feat: Add moirai flock and change embassy to linode

This commit is contained in:
Tony Du 2025-02-19 16:33:58 -08:00
parent 51fd04b3bd
commit dde508f88b
26 changed files with 392 additions and 202 deletions

View File

@ -4,6 +4,6 @@ strict: true
groups:
vpn: >-
group_names | intersect(['embassy']) | length == 1 or
group_names | intersect(['jodye']) | length == 1
group_names | intersect(['moirai']) | length == 1
vpn_server: group_names | intersect(['embassy']) | length == 1
vpn_client: group_names | intersect(['jodye']) | length == 1
vpn_client: group_names | intersect(['moirai']) | length == 1

View File

@ -1,17 +1,30 @@
ansible_user: ubuntu
# Directory to store WireGuard configuration on the remote hosts
wireguard_remote_directory: /etc/wireguard
wireguard_interface_restart: false
wireguard_service_enabled: true
wireguard_service_enabled: false
wireguard_service_state: started
# Keep the NAT mapping open. Should only be needed for server -> client, but
# if the server disconnects, we may never be able to re-establish a connection.
# So this is on both client and server just in case that happens.
wireguard_persistent_keepalive: 25
wireguard_ipv6_subnet: "fde0:fb5b:2593::/64"
# Setting this here doesn't seem to work. We set it in a playbook later
# public_ipv6_subnet: "{{ hostvars[groups['embassy'][0]].ipv6_subnet }}"
# We can generate this dynamically, but it really doesn't seem like it's worth
# the work.
nat_map:
jodye-wings-01.local:
lan_ip: 10.0.29.40
vpn_ip: 10.4.4.33
vps_ip: 172.32.1.33
moirai-clotho.local:
vpn_ipv6: "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('16') | ansible.utils.ipaddr('address') }}"
vps_ipv6: "{{ public_ipv6_subnet | ansible.utils.ipaddr('16') | ansible.utils.ipaddr('address') }}"
moirai-lachesis.local:
vpn_ipv6: "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('17') | ansible.utils.ipaddr('address') }}"
vps_ipv6: "{{ public_ipv6_subnet | ansible.utils.ipaddr('17') | ansible.utils.ipaddr('address') }}"
moirai-atropos.local:
vpn_ipv6: "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('18') | ansible.utils.ipaddr('address') }}"
vps_ipv6: "{{ public_ipv6_subnet | ansible.utils.ipaddr('18') | ansible.utils.ipaddr('address') }}"

View File

@ -1,20 +1,18 @@
wireguard_addresses:
- "{{ nat_map[inventory_hostname].vpn_ip }}"
- "{{ nat_map[inventory_hostname].vpn_ipv6 }}"
# wireguard_endpoint: "{{ nat_map[inventory_hostname].vpn_ipv6 }}"
wireguard_endpoint: ""
# Don't set this
# wireguard_dns: 10.0.123.123
# Keep the PAT mapping open. Only needed for the client; the server is always
# reachable
wireguard_persistent_keepalive: 30
# don't route local addresses through the wg tunnel
wireguard_preup:
- ip route add 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
# wireguard_preup:
# - ip route add 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
wireguard_postdown:
- ip route del 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
# wireguard_postdown:
# - ip route del 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
# Ok, I could not get the stuff below working properly. What I _wanted_ to do
# was make it so that _only_ traffic that was sent from the wireguard tunnel

View File

@ -1,43 +1,49 @@
# This should really be set per host, but I'm abusing the fact that there's only
# one vpn_server host
wireguard_addresses:
- "10.4.4.1/24"
- "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('net') | ansible.utils.ipaddr('1') }}"
wireguard_endpoint: "{{ inventory_hostname }}"
wireguard_preup:
- echo 1 > /proc/sys/net/ipv4/ip_forward
- echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
wireguard_postup: |
{% filter from_yaml %}
{%- for value in (nat_map | dict2items | map(attribute='value')) %}
# incoming packets to vps_ip, dst port 10,000-40,000 are DNAT'd to vpn_ip
# with a matching port
- iptables -t nat -A PREROUTING -p tcp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
{% for value in (nat_map | dict2items | map(attribute='value')) %}
- ip -6 addr add {{ value.vps_ipv6 }} dev eth0
# incoming packets from vpn_ip are SNAT'd to vps_ip with a matching port to
# complete the reverse NAT path
- iptables -t nat -A POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
# Incoming packets to this node's public IP are DNAT'd and forwarded to the
# matching internal VPN IP
- ip6tables -t nat -A PREROUTING -p tcp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
# Same thing for UDP. We do this selectively because we don't wanna mess with
# stuff like icmp
- iptables -t nat -A PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
- iptables -t nat -A POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
{%- endfor %}
# Incoming packets to an internal VPN IP are SNAT'd to use this node's public
# IP. I think `-j MASQUERADE` might work here rather than doing the SNAT
# manually(?), but I don't mind being explicit here.
- ip6tables -t nat -A POSTROUTING -p tcp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
# Same thing with UDP. We do this selectively so we don't mess with things
# like ICMP6 and whatnot.
- ip6tables -t nat -A PREROUTING -p udp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
- ip6tables -t nat -A POSTROUTING -p udp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
{% endfor %}
{% endfilter %}
# Exact reverse of above to delete all the rules
wireguard_predown: |
{% filter from_yaml %}
{%- for value in (nat_map | dict2items | map(attribute='value') | reverse) %}
- iptables -t nat -D POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
- iptables -t nat -D PREROUTING -p tcp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
- iptables -t nat -D PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
- iptables -t nat -D POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
{%- endfor %}
{% for value in (nat_map | dict2items | map(attribute='value') | reverse) %}
- ip6tables -t nat -D POSTROUTING -p udp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
- ip6tables -t nat -D PREROUTING -p udp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
- ip6tables -t nat -D POSTROUTING -p tcp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
- ip6tables -t nat -D PREROUTING -p tcp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
- ip -6 addr del {{ value.vps_ipv6 }} dev eth0
{% endfor %}
{% endfilter %}
wireguard_postdown:
- echo 0 > /proc/sys/net/ipv6/conf/all/forwarding
- echo 0 > /proc/sys/net/ipv4/ip_forward
# https://www.procustodibus.com/blog/2021/03/wireguard-allowedips-calculator/
@ -50,5 +56,5 @@ wireguard_postdown:
# and each host defines a list of IPs that should be routed _to this host_, not
# a list of IPs that should be routed to the "server" (because everyone is a
# peer in a fully meshed network)
wireguard_allowed_ips: "0.0.0.0/0"
wireguard_allowed_ips: "::0/0"

View File

@ -27,8 +27,8 @@
ingress: true
scope: swarm
ipam_config:
- subnet: 172.254.0.0/16
gateway: 172.254.0.1
- subnet: 172.31.0.0/16
gateway: 172.31.0.1
driver_options:
# I'm honestly not completely sure what this, but in the default
# ingress network that's created during swarm initialization, this exists

View File

@ -2,4 +2,4 @@
roles:
- name: githubixx.ansible_role_wireguard
src: https://github.com/githubixx/ansible-role-wireguard.git
version: 17.0.0
version: 17.1.0

View File

@ -1,29 +1,50 @@
---
- name: Ensure prerequisites met
# TODO: Check # of IP addrs on enX0 >= # of wings + 1
- name: Preparation
hosts: vpn
tasks: []
pre_tasks:
- name: Verify only one embassy
ansible.builtin.assert:
that: >
groups['embassy'] | length == 1 and
groups['vpn_server'] | length == 1 and
groups['vpn_server'] | intersect(groups['embassy']) | length == 1
msg: Expected only one embassy host
- name: Verify ipv6_subnet is set
when: inventory_hostname == groups['embassy'][0]
ansible.builtin.assert:
that: ipv6_subnet is defined
msg: >
Expected ipv6_subnet to be defined.
This should have been done in Terraform or otherwise.
tasks:
# As mentioned in the other file, if I set this statically on group_vars,
# things seem to break.
- set_fact:
public_ipv6_subnet: "{{ hostvars[groups['embassy'][0]].ipv6_subnet }}"
- name: Install wings
hosts: jodye_wings
remote_user: ubuntu
# Don't forget to create a new disk if creating new wings. This is
# purposefully manual to give more fine-grained control
vars:
pv_disks:
- /dev/sda
vg_name: vg1
lv_name: pvs
lv_size: +100%FREE
fs_type: ext4
mount_path: /var/lib/pterodactyl
extra_docker_daemon_options: |
"dns": ["10.0.123.123"],
roles:
- dns-client
- lvm
- docker
- wings
- name: Prepare embassy
hosts: embassy
become: true
tasks:
- name: Disable password-based authentication
lineinfile:
path: "/etc/ssh/sshd_config"
regexp: '^()PasswordAuthentication yes()$'
line: 'PasswordAuthentication no'
register: passwordauthentication
- name: Enable public key authentication in SSH
lineinfile:
path: "/etc/ssh/sshd_config"
regexp: '^()PubkeyAuthentication()$'
line: 'PubkeyAuthentication yes'
register: publickeyauthentication
- name: Restart SSH
service:
name: ssh
state: restarted
when: passwordauthentication.changed or publickeyauthentication.changed
- name: Set up VPN
hosts: vpn
@ -31,3 +52,24 @@
roles:
- githubixx.ansible_role_wireguard
# - name: Install wings
# hosts: moirai_wings
# remote_user: ubuntu
# # Don't forget to create a new disk if creating new wings. This is
# # purposefully manual to give more fine-grained control
# vars:
# pv_disks:
# - /dev/sda
# vg_name: vg1
# lv_name: pvs
# lv_size: +100%FREE
# fs_type: ext4
# mount_path: /var/lib/pterodactyl
# extra_docker_daemon_options: |
# "dns": ["10.0.123.123"],
# roles:
# - dns-client
# - lvm
# - docker
# - wings

View File

@ -1,9 +1,12 @@
$ORIGIN mnke.org.
@ 900 IN SOA dns-server. hostadmin 38 900 300 604800 900
@ 900 IN SOA dns-server. hostadmin 43 900 300 604800 900
@ 3600 IN NS dns-server.
atropos_moirai 600 IN CNAME dolo
authentik 600 IN CNAME authentik.dolo
blog 600 IN CNAME blog.dolo
clotho_moirai 600 IN CNAME dolo
git 600 IN CNAME git.jumper
lachesis_moirai 600 IN CNAME dolo
media 600 IN CNAME media.dolo
nc 600 IN CNAME nc.dolo
panel 600 IN CNAME panel.dolo

View File

@ -0,0 +1,6 @@
$ORIGIN moirai.mnke.org.
@ 900 IN SOA dns-server. hostadmin 4 900 300 604800 900
@ 3600 IN NS dns-server.
atropos 600 IN A 10.0.49.42
clotho 600 IN A 10.0.29.40
lachesis 600 IN A 10.0.29.41

View File

@ -120,14 +120,18 @@ data:
service: https://up.mnke.org
- hostname: panel.mnke.org
service: https://panel.mnke.org
- hostname: wings-01_jodye.mnke.org
service: https://wings-01_jodye.mnke.org
- hostname: vault.mnke.org
service: https://vault.mnke.org
- hostname: authentik.mnke.org
service: https://authentik.mnke.org
- hostname: nc.mnke.org
service: https://nc.mnke.org
- hostname: clotho_moirai.mnke.org
service: https://clotho_moirai.mnke.org
- hostname: lachesis_moirai.mnke.org
service: https://lachesis_moirai.mnke.org
- hostname: atropos_moirai.mnke.org
service: https://atropos_moirai.mnke.org
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
- service: http_status:404

View File

@ -17,6 +17,25 @@ spec:
remoteRef:
key: ghost-mysql-password
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ghost-email-creds
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: infisical
target:
name: ghost-email-creds
data:
- secretKey: email-password
remoteRef:
key: tony-mnke-org-email-password
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret

View File

@ -3,13 +3,13 @@
apiVersion: v1
kind: Service
metadata:
name: wings-01-jodye-external
name: atropos-moirai-external
namespace: default
spec:
type: ExternalName
externalName: wings-01.jodye.mnke.org
externalName: atropos.moirai.mnke.org
ports:
- name: wings-01-jodye-external
- name: atropos-moirai-external
port: 443
targetPort: 443
@ -18,17 +18,17 @@ spec:
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: wings-01-jodye-external
name: atropos-moirai-external
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`wings-01_jodye.mnke.org`)
- match: Host(`atropos_moirai.mnke.org`)
kind: Rule
services:
- kind: Service
name: wings-01-jodye-external
name: atropos-moirai-external
port: 443
passHostHeader: true
scheme: http

View File

@ -0,0 +1,36 @@
---
# This file was automatically generated. Do not modify.
apiVersion: v1
kind: Service
metadata:
name: clotho-moirai-external
namespace: default
spec:
type: ExternalName
externalName: clotho.moirai.mnke.org
ports:
- name: clotho-moirai-external
port: 443
targetPort: 443
---
# This file was automatically generated. Do not modify.
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: clotho-moirai-external
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`clotho_moirai.mnke.org`)
kind: Rule
services:
- kind: Service
name: clotho-moirai-external
port: 443
passHostHeader: true
scheme: http
tls:
secretName: wildcard-mnke-org-tls

View File

@ -9,5 +9,7 @@ resources:
- wizarr-tonydu.yaml
- dns-dolo-mnke.yaml
- vaultwarden.yaml
- wings-01-jodye.yaml
- panel.yaml
- clotho-moirai.yaml
- lachesis-moirai.yaml
- atropos-moirai.yaml

View File

@ -0,0 +1,36 @@
---
# This file was automatically generated. Do not modify.
apiVersion: v1
kind: Service
metadata:
name: lachesis-moirai-external
namespace: default
spec:
type: ExternalName
externalName: lachesis.moirai.mnke.org
ports:
- name: lachesis-moirai-external
port: 443
targetPort: 443
---
# This file was automatically generated. Do not modify.
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: lachesis-moirai-external
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`lachesis_moirai.mnke.org`)
kind: Rule
services:
- kind: Service
name: lachesis-moirai-external
port: 443
passHostHeader: true
scheme: http
tls:
secretName: wildcard-mnke-org-tls

View File

@ -65,16 +65,36 @@ proxies:
upstream_port: 443
pass_host_header: true
- service_name: wings-01-jodye
tls_secret_name: wildcard-mnke-org-tls
listen_host: wings-01_jodye.mnke.org
upstream_host: wings-01.jodye.mnke.org
upstream_port: 443
scheme: http
- service_name: panel
tls_secret_name: wildcard-mnke-org-tls
listen_host: panel.mnke.org
upstream_host: panel.jumper.mnke.org
upstream_port: 443
pass_host_header: true
# The reason why we do clotho_moirai instead of clotho.moirai is because
# Cloudflare SSL doesn't cover *.moirai.mnke.org by default. I'm not sure
# if there's any configuration to allow TLS passthrough on Cloudflare
# (probably not) or to upload my own SSL cert.
#
# TODO: Check if we can host this on clotho.moirai.mnke.org with SSL
- service_name: clotho-moirai
tls_secret_name: wildcard-mnke-org-tls
listen_host: clotho_moirai.mnke.org
upstream_host: clotho.moirai.mnke.org
upstream_port: 443
scheme: http
- service_name: lachesis-moirai
tls_secret_name: wildcard-mnke-org-tls
listen_host: lachesis_moirai.mnke.org
upstream_host: lachesis.moirai.mnke.org
upstream_port: 443
scheme: http
- service_name: atropos-moirai
tls_secret_name: wildcard-mnke-org-tls
listen_host: atropos_moirai.mnke.org
upstream_host: atropos.moirai.mnke.org
upstream_port: 443
scheme: http

View File

@ -86,14 +86,14 @@ module "k8s_folly" {
ssh_private_key_file = var.ssh_private_key_file
}
module "flock_jodye" {
module "flock_moirai" {
source = "./modules/flock"
flock_name = "jodye"
flock_name = "moirai"
vm_id_prefix = "9"
subnet_cidr = "10.0.29.0/24"
gateway = var.gateway
wing_count = 1
wing_names = ["clotho", "lachesis", "atropos"]
dns_server_ip = local.dns_server_ip
proxmox_vm_storage = var.proxmox_vm_storage
@ -107,6 +107,6 @@ module "flock_jodye" {
module "embassy" {
source = "./modules/embassy"
ip_count = 2
ssh_public_key_file = var.ssh_public_key_file
cloudflare_zone_id = var.cloudflare_zone_id
}

View File

@ -14,7 +14,7 @@ resource "proxmox_virtual_environment_vm" "dns_server" {
pool_id = var.pool_id
cpu {
cores = 1
cores = 2
type = "host"
}
@ -81,7 +81,7 @@ resource "proxmox_virtual_environment_vm" "dns_server" {
lifecycle {
ignore_changes = [
initialization[0].user_data_file_id,
initialization,
]
}
}

View File

@ -1,121 +1,94 @@
resource "aws_key_pair" "titanium" {
key_name = "titanium"
public_key = file(var.ssh_public_key_file)
resource "linode_sshkey" "titanium" {
label = "titanium"
ssh_key = chomp(file(var.ssh_public_key_file))
}
resource "aws_vpc" "embassy" {
# whatever
cidr_block = "172.32.0.0/16"
resource "linode_instance" "embassy" {
image = "linode/ubuntu24.04"
label = "embassy"
region = "us-sea"
type = "g6-nanode-1"
authorized_keys = [linode_sshkey.titanium.ssh_key]
}
resource "aws_subnet" "embassy" {
vpc_id = aws_vpc.embassy.id
cidr_block = cidrsubnet(aws_vpc.embassy.cidr_block, 8, 1)
availability_zone = "us-west-2a"
resource "linode_ipv6_range" "embassy" {
linode_id = linode_instance.embassy.id
prefix_length = 64
}
resource "aws_internet_gateway" "embassy" {
vpc_id = aws_vpc.embassy.id
}
resource "linode_firewall" "embassy" {
label = "embassy"
resource "aws_security_group" "embassy" {
vpc_id = aws_vpc.embassy.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
inbound {
label = "allow-ssh"
action = "ACCEPT"
protocol = "TCP"
ports = "22"
ipv4 = ["0.0.0.0/0"]
ipv6 = ["::/0"]
}
# wireguard
ingress {
from_port = 51820
to_port = 51820
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
# idk, why not lol
inbound {
label = "allow-web"
action = "ACCEPT"
protocol = "TCP"
ports = "80,443"
ipv4 = ["0.0.0.0/0"]
ipv6 = ["::/0"]
}
# We'll selectively open ports, but we'll reserve these for general purpose
ingress {
from_port = 20000
to_port = 20100
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
inbound {
label = "allow-forward-tcp"
action = "ACCEPT"
protocol = "TCP"
ports = "20000-20100"
ipv4 = ["0.0.0.0/0"]
ipv6 = ["::/0"]
}
ingress {
from_port = 20000
to_port = 20100
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
inbound {
label = "allow-forward-udp"
action = "ACCEPT"
protocol = "UDP"
ports = "20000-20100"
ipv4 = ["0.0.0.0/0"]
ipv6 = ["::/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
inbound {
label = "allow-wireguard"
action = "ACCEPT"
protocol = "UDP"
ports = "51820"
ipv4 = ["0.0.0.0/0"]
ipv6 = ["::/0"]
}
inbound_policy = "DROP"
outbound_policy = "ACCEPT"
linodes = [linode_instance.embassy.id]
}
resource "aws_route_table" "embassy" {
vpc_id = aws_vpc.embassy.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.embassy.id
}
}
resource "aws_route_table_association" "embassy_assoc" {
subnet_id = aws_subnet.embassy.id
route_table_id = aws_route_table.embassy.id
}
resource "aws_network_interface" "embassy" {
subnet_id = aws_subnet.embassy.id
# Required for private_ip_list
private_ip_list_enabled = true
# private_ips aren't ordered meaning this NIC and its dependent resources may
# be re-created upon changing the number of IPs.
# private_ip_list, however, _is_ ordered, hence why we use it over private_ips
private_ip_list = [
for i in range(var.ip_count) : cidrhost(aws_subnet.embassy.cidr_block, i + 32)
]
security_groups = [aws_security_group.embassy.id]
}
resource "aws_instance" "embassy" {
ami = "ami-00c257e12d6828491"
instance_type = "t2.micro"
availability_zone = aws_subnet.embassy.availability_zone
key_name = aws_key_pair.titanium.key_name
network_interface {
network_interface_id = aws_network_interface.embassy.id
device_index = 0
}
tags = {
Name = "embassy-01"
}
}
resource "aws_eip" "eip" {
count = var.ip_count
}
resource "aws_eip_association" "eip_assoc" {
count = var.ip_count
network_interface_id = aws_network_interface.embassy.id
allocation_id = aws_eip.eip[count.index].id
private_ip_address = aws_network_interface.embassy.private_ip_list[count.index]
resource "cloudflare_dns_record" "embassy_ipv4" {
zone_id = var.cloudflare_zone_id
content = linode_instance.embassy.ip_address
name = "embassy.mnke.org"
proxied = false
ttl = 1 # 1 = automatic TTL
type = "A"
}
resource "ansible_host" "embassy" {
# any of the public ips will work
name = aws_eip.eip[0].public_ip
# Ideally, we'd use the domain name here, but the way the internal DNS server
# is set up right now, we don't forward mnke.org requests because we have
# a primary zone for mnke.org (I think). We should change this if possible
name = linode_instance.embassy.ip_address
groups = ["embassy"]
variables = {
ipv6_subnet = "${linode_ipv6_range.embassy.range}/${linode_ipv6_range.embassy.prefix_length}"
ansible_user = "root"
}
}

View File

@ -4,9 +4,13 @@ terraform {
source = "ansible/ansible"
version = "1.3.0"
}
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
linode = {
source = "linode/linode"
version = "2.34.1"
}
cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 5"
}
}
}

View File

@ -2,10 +2,6 @@ variable "ssh_public_key_file" {
type = string
}
variable "ip_count" {
type = number
validation {
condition = var.ip_count >= 1
error_message = "Need at least one ip"
}
variable "cloudflare_zone_id" {
type = string
}

View File

@ -4,8 +4,8 @@ resource "proxmox_virtual_environment_pool" "flock_pool" {
}
resource "proxmox_virtual_environment_vm" "wings" {
count = var.wing_count
name = "${var.flock_name}-wings-${format("%02s", count.index + 1)}"
count = length(var.wing_names)
name = "${var.flock_name}-${var.wing_names[count.index]}"
description = "Managed by Terraform"
tags = ["terraform", "ubuntu", "wings", var.flock_name]
@ -19,8 +19,8 @@ resource "proxmox_virtual_environment_vm" "wings" {
}
memory {
dedicated = 8192
floating = 8192
dedicated = 16384
floating = 16384
}
agent {
@ -88,7 +88,7 @@ resource "proxmox_virtual_environment_vm" "wings" {
}
resource "ansible_host" "wings" {
count = var.wing_count
count = length(var.wing_names)
name = "${proxmox_virtual_environment_vm.wings[count.index].name}.local"
groups = ["${var.flock_name}_wings", var.flock_name]
}

View File

@ -19,10 +19,10 @@ variable "flock_name" {
type = string
}
variable "wing_count" {
type = number
variable "wing_names" {
type = list(string)
validation {
condition = var.wing_count <= 16
condition = length(var.wing_names) <= 16
error_message = "Too many wing nodes"
}
}

View File

@ -20,6 +20,14 @@ terraform {
source = "hashicorp/aws"
version = "~> 5.0"
}
linode = {
source = "linode/linode"
version = "2.34.1"
}
cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 5"
}
}
backend "s3" {
@ -52,6 +60,10 @@ provider "proxmox" {
}
}
provider "cloudflare" {
api_token = var.cloudflare_api_token
}
provider "dns" {
update {
server = local.dns_server_ip
@ -64,3 +76,7 @@ provider "dns" {
provider "aws" {
region = "us-west-2"
}
provider "linode" {
token = var.linode_pat
}

View File

@ -60,3 +60,17 @@ variable "ssh_private_key_file" {
type = string
description = "Path to private key file. Make sure this matches the public key"
}
variable "linode_pat" {
type = string
sensitive = true
}
variable "cloudflare_api_token" {
type = string
sensitive = true
}
variable "cloudflare_zone_id" {
type = string
}

View File

@ -7,3 +7,5 @@ ssh_import_id = "gh:tonyd33"
ssh_public_key_file = "~/.ssh/id_rsa.pub"
ssh_private_key_file = "~/.ssh/id_rsa"
cloudflare_zone_id = "4a9cbe349529b23a665bc65edfa5a4f9"