From 2c8f9d0ea2561b69dc4a06cf8285894f8295b6d0 Mon Sep 17 00:00:00 2001 From: Tony Du Date: Sat, 15 Feb 2025 17:08:13 -0800 Subject: [PATCH] feat: Expose game servers --- ansible/inventory/full/02-hosts.yml | 7 + .../inventory/full/group_vars/all/main.yml | 2 + .../inventory/full/group_vars/vpn/main.yml | 18 +++ .../full/group_vars/vpn_client/main.yml | 17 +++ .../full/group_vars/vpn_server/main.yml | 55 ++++++++ ansible/jumper.yml | 29 ++++ ansible/roles/dns-client/tasks/main.yml | 10 +- ansible/roles/requirements.yml | 5 + ansible/roles/wings/files/docker-compose.yml | 30 ++++ ansible/roles/wings/tasks/main.yml | 16 +++ ansible/wings.yml | 33 +++++ dns/zones/jodye.mnke.org.zone | 4 + dns/zones/mnke.org.zone | 1 + dns/zones/proxy.dolo.mnke.org.zone | 4 + dns/zones/web.jodye.mnke.org.zone | 4 + .../pterodactyl-panel/docker-compose.yml | 92 ++++++++++++ .../external/build/kustomization.yaml | 2 +- .../build/web-wings-01-jodye-mnke.yaml | 36 +++++ .../build/wings-01-web-jodye-mnke.yaml | 36 +++++ .../external/templater/templates/proxy.yaml | 3 + .../external/templater/values.yaml | 6 +- .../certificates/wildcard-mnke-org.yaml | 2 + tf/huts.tf | 79 +++++++++++ tf/main.tf | 28 +++- tf/modules/embassy/main.tf | 121 ++++++++++++++++ tf/modules/{outpost => embassy}/providers.tf | 0 tf/modules/embassy/variables.tf | 11 ++ tf/modules/flock/main.tf | 132 ++---------------- tf/modules/flock/variables.tf | 16 +-- tf/modules/outpost/main.tf | 92 ------------ tf/modules/outpost/variables.tf | 7 - 31 files changed, 660 insertions(+), 238 deletions(-) create mode 100644 ansible/inventory/full/group_vars/vpn/main.yml create mode 100644 ansible/inventory/full/group_vars/vpn_client/main.yml create mode 100644 ansible/inventory/full/group_vars/vpn_server/main.yml create mode 100644 ansible/roles/requirements.yml create mode 100644 ansible/roles/wings/files/docker-compose.yml create mode 100644 ansible/roles/wings/tasks/main.yml create mode 100644 ansible/wings.yml create mode 100644 dns/zones/jodye.mnke.org.zone create mode 100644 dns/zones/proxy.dolo.mnke.org.zone create mode 100644 dns/zones/web.jodye.mnke.org.zone create mode 100644 docker/compose/pterodactyl-panel/docker-compose.yml create mode 100644 k8s/apps/ingressroutes/external/build/web-wings-01-jodye-mnke.yaml create mode 100644 k8s/apps/ingressroutes/external/build/wings-01-web-jodye-mnke.yaml create mode 100644 tf/modules/embassy/main.tf rename tf/modules/{outpost => embassy}/providers.tf (100%) create mode 100644 tf/modules/embassy/variables.tf delete mode 100644 tf/modules/outpost/main.tf delete mode 100644 tf/modules/outpost/variables.tf diff --git a/ansible/inventory/full/02-hosts.yml b/ansible/inventory/full/02-hosts.yml index 9b089c7..32449b6 100644 --- a/ansible/inventory/full/02-hosts.yml +++ b/ansible/inventory/full/02-hosts.yml @@ -1,2 +1,9 @@ plugin: ansible.builtin.constructed strict: true + +groups: + vpn: >- + group_names | intersect(['embassy']) | length == 1 or + group_names | intersect(['jodye']) | length == 1 + vpn_server: group_names | intersect(['embassy']) | length == 1 + vpn_client: group_names | intersect(['jodye']) | length == 1 diff --git a/ansible/inventory/full/group_vars/all/main.yml b/ansible/inventory/full/group_vars/all/main.yml index 2afbac0..a848978 100644 --- a/ansible/inventory/full/group_vars/all/main.yml +++ b/ansible/inventory/full/group_vars/all/main.yml @@ -1,3 +1,5 @@ +ansible_user: ubuntu + dns_server: admin_username: "{{ secrets.admin_username }}" admin_password: "{{ secrets.admin_password }}" diff --git a/ansible/inventory/full/group_vars/vpn/main.yml b/ansible/inventory/full/group_vars/vpn/main.yml new file mode 100644 index 0000000..3ecefe7 --- /dev/null +++ b/ansible/inventory/full/group_vars/vpn/main.yml @@ -0,0 +1,18 @@ +ansible_user: ubuntu + +# Directory to store WireGuard configuration on the remote hosts +wireguard_remote_directory: /etc/wireguard + +wireguard_interface_restart: true +# TODO: Enable this when stable +wireguard_service_enabled: false +wireguard_service_state: started + +# We can generate this dynamically, but it really doesn't seem like it's worth +# the work. +nat_map: + jodye-wings-01.local: + lan_ip: 10.0.29.40 + vpn_ip: 10.4.4.33 + vps_ip: 172.32.1.33 + diff --git a/ansible/inventory/full/group_vars/vpn_client/main.yml b/ansible/inventory/full/group_vars/vpn_client/main.yml new file mode 100644 index 0000000..6e53b6c --- /dev/null +++ b/ansible/inventory/full/group_vars/vpn_client/main.yml @@ -0,0 +1,17 @@ +wireguard_addresses: + - "{{ nat_map[inventory_hostname].vpn_ip }}" +wireguard_endpoint: "" + +# Don't set this +# wireguard_dns: 10.0.123.123 + +# Keep the PAT mapping open. Only needed for the client; the server is always +# reachable +wireguard_persistent_keepalive: 30 + +# don't route local addresses through the wg tunnel +wireguard_preup: + - ip route add 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink + +wireguard_postdown: + - ip route del 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink diff --git a/ansible/inventory/full/group_vars/vpn_server/main.yml b/ansible/inventory/full/group_vars/vpn_server/main.yml new file mode 100644 index 0000000..9db9846 --- /dev/null +++ b/ansible/inventory/full/group_vars/vpn_server/main.yml @@ -0,0 +1,55 @@ +# This should really be set per host, but I'm abusing the fact that there's only +# one vpn_server host +wireguard_addresses: + - "10.4.4.1/24" + +wireguard_endpoint: "{{ inventory_hostname }}" + +wireguard_preup: + - echo 1 > /proc/sys/net/ipv4/ip_forward + +wireguard_postup: | + {% filter from_yaml %} + {%- for value in (nat_map | dict2items | map(attribute='value')) %} + + # incoming packets to vps_ip, dst port 10,000-40,000 are DNAT'd to vpn_ip + # with a matching port + - iptables -t nat -A PREROUTING -p tcp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }} + + # incoming packets from vpn_ip are SNAT'd to vps_ip with a matching port to + # complete the reverse NAT path + - iptables -t nat -A POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }} + + # Same thing for UDP. We do this selectively because we don't wanna mess with + # stuff like icmp + - iptables -t nat -A PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }} + - iptables -t nat -A POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }} + {%- endfor %} + {% endfilter %} + +# Exact reverse of above to delete all the rules +wireguard_predown: | + {% filter from_yaml %} + {%- for value in (nat_map | dict2items | map(attribute='value') | reverse) %} + - iptables -t nat -D POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }} + - iptables -t nat -D PREROUTING -p tcp -i enX0 -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }} + - iptables -t nat -D PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }} + - iptables -t nat -D POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }} + {%- endfor %} + {% endfilter %} + +wireguard_postdown: + - echo 0 > /proc/sys/net/ipv4/ip_forward + +# https://www.procustodibus.com/blog/2021/03/wireguard-allowedips-calculator/ +# Above recommends to just add specific routing rules rather than compute +# an equivalent list of subnets +# +# Yes, this is supposed to be defined on vpn_server rather than vpn_client, like +# I initially thought. The reason for this is likely because the role was meant +# for a fully meshed network rather than a single server with multiple clients, +# and each host defines a list of IPs that should be routed _to this host_, not +# a list of IPs that should be routed to the "server" (because everyone is a +# peer in a fully meshed network) +wireguard_allowed_ips: "0.0.0.0/0" + diff --git a/ansible/jumper.yml b/ansible/jumper.yml index 24b2305..4da3791 100644 --- a/ansible/jumper.yml +++ b/ansible/jumper.yml @@ -46,6 +46,35 @@ source: /mnt/docker/docker-root/volumes target: /var/lib/docker/volumes + # This is used _only_ to enable stacks for things like rolling updates + - name: Init swarm + become: true + docker_swarm: + state: present + advertise_addr: "{{ansible_default_ipv4.address}}" + default_addr_pool: + - 172.17.0.0/16 + - 172.18.0.0/16 + - 172.19.0.0/16 + + - name: Create ingress network + # The ingress network conflicts with my subnet and ends up causing problems, + # so we have to set a different subnet first + become: true + docker_network: + name: ingress + driver: overlay + ingress: true + scope: swarm + ipam_config: + - subnet: 172.31.0.0/16 + gateway: 172.31.0.1 + driver_options: + # I'm honestly not completely sure what this, but in the default + # ingress network that's created during swarm initialization, this exists + # and things don't seem to work without it. + com.docker.network.driver.overlay.vxlanid_list: 4096 + - name: Mount extra shares tags: [extra-shares] hosts: jumper diff --git a/ansible/roles/dns-client/tasks/main.yml b/ansible/roles/dns-client/tasks/main.yml index 30ff7a4..dcb62c5 100644 --- a/ansible/roles/dns-client/tasks/main.yml +++ b/ansible/roles/dns-client/tasks/main.yml @@ -6,6 +6,7 @@ content: | [Resolve] DNS=10.0.123.123 + DNSStubListener=no # FallbackDNS=1.1.1.1 dest: /etc/systemd/resolved.conf register: dns_servers_configuration @@ -16,5 +17,12 @@ name: systemd-resolved state: restarted when: dns_servers_configuration.changed - retries: 2 + # This is gonna break the SSH connection + async: 10 + poll: 0 + ignore_errors: yes +- name: Wait for SSH to come back + wait_for_connection: + delay: 5 + timeout: 500 diff --git a/ansible/roles/requirements.yml b/ansible/roles/requirements.yml new file mode 100644 index 0000000..bcfaeea --- /dev/null +++ b/ansible/roles/requirements.yml @@ -0,0 +1,5 @@ +--- +roles: + - name: githubixx.ansible_role_wireguard + src: https://github.com/githubixx/ansible-role-wireguard.git + version: 17.0.0 diff --git a/ansible/roles/wings/files/docker-compose.yml b/ansible/roles/wings/files/docker-compose.yml new file mode 100644 index 0000000..7c66750 --- /dev/null +++ b/ansible/roles/wings/files/docker-compose.yml @@ -0,0 +1,30 @@ +--- +version: '3.8' + +services: + wings: + image: ghcr.io/pterodactyl/wings:v1.11.13 + restart: always + ports: + - "8080:8080" + - "2022:2022" + - "443:443" + tty: true + environment: + TZ: "America/Vancouver" + WINGS_UID: 988 + WINGS_GID: 988 + WINGS_USERNAME: pterodactyl + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + - "/var/lib/docker/containers/:/var/lib/docker/containers/" + - "/etc/pterodactyl/:/etc/pterodactyl/" + - "/var/lib/pterodactyl/:/var/lib/pterodactyl/" + - "/var/log/pterodactyl/:/var/log/pterodactyl/" + - "/tmp/pterodactyl/:/tmp/pterodactyl/" + - "/etc/ssl/certs:/etc/ssl/certs:ro" + # you may need /srv/daemon-data if you are upgrading from an old daemon + #- "/srv/daemon-data/:/srv/daemon-data/" + # Required for ssl if you use let's encrypt. uncomment to use. + #- "/etc/letsencrypt/:/etc/letsencrypt/" + diff --git a/ansible/roles/wings/tasks/main.yml b/ansible/roles/wings/tasks/main.yml new file mode 100644 index 0000000..a01862b --- /dev/null +++ b/ansible/roles/wings/tasks/main.yml @@ -0,0 +1,16 @@ +- name: Verify compose directory exists + file: + path: "/home/{{ansible_user}}/compose/wings" + state: directory + mode: 0755 + +- name: Copy wings configuration + copy: + src: docker-compose.yml + dest: "/home/{{ansible_user}}/compose/wings/docker-compose.yml" + mode: 0755 + +- name: Deploy wings + become: true + community.docker.docker_compose_v2: + project_src: "/home/{{ansible_user}}/compose/wings" diff --git a/ansible/wings.yml b/ansible/wings.yml new file mode 100644 index 0000000..a2813f0 --- /dev/null +++ b/ansible/wings.yml @@ -0,0 +1,33 @@ +--- +- name: Ensure prerequisites met + # TODO: Check # of IP addrs on enX0 >= # of wings + 1 + hosts: vpn + tasks: [] + +- name: Install wings + hosts: jodye_wings + remote_user: ubuntu + # Don't forget to create a new disk if creating new wings. This is + # purposefully manual to give more fine-grained control + vars: + pv_disks: + - /dev/sda + vg_name: vg1 + lv_name: pvs + lv_size: +100%FREE + fs_type: ext4 + mount_path: /var/lib/pterodactyl + extra_docker_daemon_options: | + "dns": ["10.0.123.123"], + roles: + - dns-client + - lvm + - docker + - wings + +- name: Set up VPN + hosts: vpn + become: true + roles: + - githubixx.ansible_role_wireguard + diff --git a/dns/zones/jodye.mnke.org.zone b/dns/zones/jodye.mnke.org.zone new file mode 100644 index 0000000..4c487a3 --- /dev/null +++ b/dns/zones/jodye.mnke.org.zone @@ -0,0 +1,4 @@ +$ORIGIN jodye.mnke.org. +@ 900 IN SOA dns-server. hostadmin 3 900 300 604800 900 +@ 3600 IN NS dns-server. +wings-01 600 IN A 10.0.29.40 diff --git a/dns/zones/mnke.org.zone b/dns/zones/mnke.org.zone index 3052999..4979a75 100644 --- a/dns/zones/mnke.org.zone +++ b/dns/zones/mnke.org.zone @@ -5,3 +5,4 @@ authentik 600 IN CNAME authentik.dolo blog 600 IN CNAME blog.dolo git 600 IN CNAME git.jumper vault 600 IN CNAME vault.jumper +panel 600 IN CNAME panel.jumper diff --git a/dns/zones/proxy.dolo.mnke.org.zone b/dns/zones/proxy.dolo.mnke.org.zone new file mode 100644 index 0000000..420813f --- /dev/null +++ b/dns/zones/proxy.dolo.mnke.org.zone @@ -0,0 +1,4 @@ +$ORIGIN proxy.dolo.mnke.org. +@ 900 IN SOA dns-server. hostadmin 2 900 300 604800 900 +@ 3600 IN NS dns-server. +* 600 IN CNAME dolo.mnke.org. diff --git a/dns/zones/web.jodye.mnke.org.zone b/dns/zones/web.jodye.mnke.org.zone new file mode 100644 index 0000000..41409b3 --- /dev/null +++ b/dns/zones/web.jodye.mnke.org.zone @@ -0,0 +1,4 @@ +$ORIGIN web.jodye.mnke.org. +@ 900 IN SOA dns-server. hostadmin 2 900 300 604800 900 +@ 3600 IN NS dns-server. +wings-01 600 IN CNAME dolo.mnke.org. diff --git a/docker/compose/pterodactyl-panel/docker-compose.yml b/docker/compose/pterodactyl-panel/docker-compose.yml new file mode 100644 index 0000000..4165e08 --- /dev/null +++ b/docker/compose/pterodactyl-panel/docker-compose.yml @@ -0,0 +1,92 @@ +--- +version: '3.8' + +x-common: + database: &db-environment + MYSQL_PASSWORD: &db-password "${MYSQL_PASSWORD}" + MYSQL_ROOT_PASSWORD: "${MYSQL_ROOT_PASSWORD}" + panel: + &panel-environment + APP_URL: "https://${PANEL_HOST:-panel.mnke.org}" + # A list of valid timezones can be found here: http://php.net/manual/en/timezones.php + APP_TIMEZONE: "America/Vancouver" + APP_SERVICE_AUTHOR: "${EMAIL:-tony@mnke.org}" + TRUSTED_PROXIES: "${TRUSTED_PROXIES:-*}" + # Uncomment the line below and set to a non-empty value if you want to use Let's Encrypt + # to generate an SSL certificate for the Panel. + # LE_EMAIL: "" + mail: &mail-environment + MAIL_FROM: "${EMAIL:-tony@mnke.org}" + MAIL_DRIVER: "smtp" + MAIL_HOST: "mail" + MAIL_PORT: "${MAIL_PORT:-1025}" + MAIL_USERNAME: "${MAIL_USERNAME:-}" + MAIL_PASSWORD: "${MAIL_PASSWORD:-}" + MAIL_ENCRYPTION: "true" + +services: + database: + image: mariadb:10.5 + restart: always + command: --default-authentication-plugin=mysql_native_password + networks: + - pterodactyl-panel + volumes: + - ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/database:/var/lib/mysql + environment: + <<: *db-environment + MYSQL_DATABASE: "panel" + MYSQL_USER: "pterodactyl" + + cache: + image: redis:7.4.2-alpine + restart: unless-stopped + networks: + - pterodactyl-panel + + panel: + image: ghcr.io/pterodactyl/panel:latest + restart: unless-stopped + # ports: + # - "80:80" + # - "443:443" + networks: + - pterodactyl-panel + - traefik + volumes: + - ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/var/:/app/var/ + - ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/nginx/:/etc/nginx/http.d/ + - ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/certs/:/etc/letsencrypt/ + - ${PTERODACTYL_DIRECTORY:-/mnt/docker/volumes/pterodactyl}/logs/:/app/storage/logs + environment: + <<: [*panel-environment, *mail-environment] + DB_PASSWORD: *db-password + APP_ENV: "production" + APP_ENVIRONMENT_ONLY: "false" + CACHE_DRIVER: "redis" + SESSION_DRIVER: "redis" + QUEUE_DRIVER: "redis" + REDIS_HOST: "cache" + DB_HOST: "database" + DB_PORT: "3306" + labels: + - "traefik.enable=true" + - "traefik.http.routers.pterodactyl-panel.rule=Host(`${PANEL_HOST:-panel.mnke.org}`)" + - "traefik.http.routers.pterodactyl-panel.entrypoints=websecure" + - "traefik.http.routers.pterodactyl-panel.tls.certresolver=letsencrypt" + - "traefik.http.services.pterodactyl-panel.loadbalancer.server.port=80" + - "traefik.docker.network=traefik" + deploy: + resources: + limits: + cpus: '0.50' + memory: 1G + reservations: + cpus: '0.25' + memory: 128M + +networks: + pterodactyl-panel: + name: pterodactyl-panel + traefik: + external: true diff --git a/k8s/apps/ingressroutes/external/build/kustomization.yaml b/k8s/apps/ingressroutes/external/build/kustomization.yaml index 63269a4..05a19cb 100644 --- a/k8s/apps/ingressroutes/external/build/kustomization.yaml +++ b/k8s/apps/ingressroutes/external/build/kustomization.yaml @@ -7,4 +7,4 @@ resources: - seerr-mnke.yaml - seerr-tonydu.yaml - dns-dolo-mnke.yaml - - jodye-wings-01-dolo-mnke.yaml + - wings-01-web-jodye-mnke.yaml diff --git a/k8s/apps/ingressroutes/external/build/web-wings-01-jodye-mnke.yaml b/k8s/apps/ingressroutes/external/build/web-wings-01-jodye-mnke.yaml new file mode 100644 index 0000000..1fcc516 --- /dev/null +++ b/k8s/apps/ingressroutes/external/build/web-wings-01-jodye-mnke.yaml @@ -0,0 +1,36 @@ +--- +# This file was automatically generated. Do not modify. +apiVersion: v1 +kind: Service +metadata: + name: web-wings-01-jodye-mnke-external + namespace: default +spec: + type: ExternalName + externalName: wings-01.jodye.mnke.org + ports: + - name: web-wings-01-jodye-mnke-external + port: 443 + targetPort: 443 + +--- +# This file was automatically generated. Do not modify. +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: web-wings-01-jodye-mnke-external + namespace: default +spec: + entryPoints: + - websecure + routes: + - match: Host(`web.wings-01.jodye.mnke.org`) + kind: Rule + services: + - kind: Service + name: web-wings-01-jodye-mnke-external + port: 443 + passHostHeader: true + scheme: http + tls: + secretName: wildcard-mnke-org-tls diff --git a/k8s/apps/ingressroutes/external/build/wings-01-web-jodye-mnke.yaml b/k8s/apps/ingressroutes/external/build/wings-01-web-jodye-mnke.yaml new file mode 100644 index 0000000..f7145f6 --- /dev/null +++ b/k8s/apps/ingressroutes/external/build/wings-01-web-jodye-mnke.yaml @@ -0,0 +1,36 @@ +--- +# This file was automatically generated. Do not modify. +apiVersion: v1 +kind: Service +metadata: + name: wings-01-web-jodye-mnke-external + namespace: default +spec: + type: ExternalName + externalName: wings-01.jodye.mnke.org + ports: + - name: wings-01-web-jodye-mnke-external + port: 443 + targetPort: 443 + +--- +# This file was automatically generated. Do not modify. +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: wings-01-web-jodye-mnke-external + namespace: default +spec: + entryPoints: + - websecure + routes: + - match: Host(`wings-01.web.jodye.mnke.org`) + kind: Rule + services: + - kind: Service + name: wings-01-web-jodye-mnke-external + port: 443 + passHostHeader: true + scheme: http + tls: + secretName: wildcard-mnke-org-tls diff --git a/k8s/apps/ingressroutes/external/templater/templates/proxy.yaml b/k8s/apps/ingressroutes/external/templater/templates/proxy.yaml index 22c7fe5..7ad3dc8 100644 --- a/k8s/apps/ingressroutes/external/templater/templates/proxy.yaml +++ b/k8s/apps/ingressroutes/external/templater/templates/proxy.yaml @@ -41,6 +41,9 @@ spec: name: {{ service_name }}-external port: {{ upstream_port }} passHostHeader: {{ pass_host_header | default('true') }} + {%- if scheme %} + scheme: {{ scheme }} + {%- endif %} {%- endfor %} tls: secretName: {{ tls_secret_name }} diff --git a/k8s/apps/ingressroutes/external/templater/values.yaml b/k8s/apps/ingressroutes/external/templater/values.yaml index 94025da..f4785b0 100644 --- a/k8s/apps/ingressroutes/external/templater/values.yaml +++ b/k8s/apps/ingressroutes/external/templater/values.yaml @@ -47,9 +47,9 @@ proxies: upstream_port: 5380 pass_host_header: false - - service_name: jodye-wings-01-dolo-mnke + - service_name: wings-01-web-jodye-mnke tls_secret_name: wildcard-mnke-org-tls - listen_host: jodye-wings-01.dolo.mnke.org + listen_host: wings-01.web.jodye.mnke.org upstream_host: wings-01.jodye.mnke.org upstream_port: 443 - + scheme: http diff --git a/k8s/infrastructure/configs/certificates/wildcard-mnke-org.yaml b/k8s/infrastructure/configs/certificates/wildcard-mnke-org.yaml index 852483a..cba8417 100644 --- a/k8s/infrastructure/configs/certificates/wildcard-mnke-org.yaml +++ b/k8s/infrastructure/configs/certificates/wildcard-mnke-org.yaml @@ -20,6 +20,8 @@ spec: - "*.mnke.org" - "*.home.mnke.org" - "*.dolo.mnke.org" + - "*.jodye.mnke.org" + - "*.web.jodye.mnke.org" commonName: "*.mnke.org" issuerRef: name: le-cf-issuer diff --git a/tf/huts.tf b/tf/huts.tf index d21567f..c185a45 100644 --- a/tf/huts.tf +++ b/tf/huts.tf @@ -193,3 +193,82 @@ resource "proxmox_virtual_environment_vm" "bench" { ] } } + +# resource "proxmox_virtual_environment_vm" "press" { + # name = "press" + # description = "Managed by Terraform" + # tags = ["terraform", "ubuntu", "hut"] + + # node_name = "pve" + # vm_id = 20002 + + # cpu { + # cores = 1 + # type = "host" + # } + + # memory { + # dedicated = 1024 + # floating = 1024 + # } + + # agent { + # enabled = true + # } + + # startup { + # order = "1" + # up_delay = "60" + # down_delay = "60" + # } + + # disk { + # datastore_id = var.proxmox_vm_storage + # file_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id + # interface = "virtio0" + # iothread = true + # discard = "on" + # size = 16 + # file_format = "qcow2" + # } + + # initialization { + # ip_config { + # ipv4 { + # address = "10.0.44.4/16" + # gateway = var.gateway + # } + # } + # datastore_id = var.proxmox_image_storage + + # user_data_file_id = proxmox_virtual_environment_file.common_cloud_init.id + # } + + # network_device { + # bridge = "vmbr0" + # } + + # operating_system { + # type = "l26" + # } + + # connection { + # type = "ssh" + # user = "ubuntu" + # private_key = file(var.ssh_private_key_file) + # host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0] + # } + + # provisioner "remote-exec" { + # inline = [ + # "sudo hostnamectl set-hostname ${self.name}", + # "sudo systemctl restart avahi-daemon", + # ] + # } + + # lifecycle { + # ignore_changes = [ + # initialization[0].user_data_file_id, + # ] + # } +# } diff --git a/tf/main.tf b/tf/main.tf index 092d175..97e6bcb 100644 --- a/tf/main.tf +++ b/tf/main.tf @@ -86,9 +86,27 @@ module "k8s_folly" { ssh_private_key_file = var.ssh_private_key_file } -# module "outpost" { - # source = "./modules/outpost" +module "flock_jodye" { + source = "./modules/flock" - # ip_count = 1 - # ssh_public_key_file = var.ssh_public_key_file -# } + flock_name = "jodye" + vm_id_prefix = "9" + subnet_cidr = "10.0.29.0/24" + gateway = var.gateway + wing_count = 1 + dns_server_ip = local.dns_server_ip + + proxmox_vm_storage = var.proxmox_vm_storage + proxmox_image_storage = var.proxmox_image_storage + + cloud_init_file_id = proxmox_virtual_environment_file.common_cloud_init.id + cloud_image_id = proxmox_virtual_environment_file.ubuntu_cloud_image.id + ssh_private_key_file = var.ssh_private_key_file +} + +module "embassy" { + source = "./modules/embassy" + + ip_count = 2 + ssh_public_key_file = var.ssh_public_key_file +} diff --git a/tf/modules/embassy/main.tf b/tf/modules/embassy/main.tf new file mode 100644 index 0000000..02c92b7 --- /dev/null +++ b/tf/modules/embassy/main.tf @@ -0,0 +1,121 @@ +resource "aws_key_pair" "titanium" { + key_name = "titanium" + public_key = file(var.ssh_public_key_file) +} + +resource "aws_vpc" "embassy" { + # whatever + cidr_block = "172.32.0.0/16" +} + +resource "aws_subnet" "embassy" { + vpc_id = aws_vpc.embassy.id + cidr_block = cidrsubnet(aws_vpc.embassy.cidr_block, 8, 1) + availability_zone = "us-west-2a" +} + +resource "aws_internet_gateway" "embassy" { + vpc_id = aws_vpc.embassy.id +} + +resource "aws_security_group" "embassy" { + vpc_id = aws_vpc.embassy.id + + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + # wireguard + ingress { + from_port = 51820 + to_port = 51820 + protocol = "udp" + cidr_blocks = ["0.0.0.0/0"] + } + + # everything else + ingress { + from_port = 10000 + to_port = 40000 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 10000 + to_port = 40000 + protocol = "udp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_route_table" "embassy" { + vpc_id = aws_vpc.embassy.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.embassy.id + } +} + +resource "aws_route_table_association" "embassy_assoc" { + subnet_id = aws_subnet.embassy.id + route_table_id = aws_route_table.embassy.id +} + +resource "aws_network_interface" "embassy" { + subnet_id = aws_subnet.embassy.id + # Required for private_ip_list + private_ip_list_enabled = true + # private_ips aren't ordered meaning this NIC and its dependent resources may + # be re-created upon changing the number of IPs. + # private_ip_list, however, _is_ ordered, hence why we use it over private_ips + private_ip_list = [ + for i in range(var.ip_count) : cidrhost(aws_subnet.embassy.cidr_block, i + 32) + ] + security_groups = [aws_security_group.embassy.id] +} + +resource "aws_instance" "embassy" { + ami = "ami-00c257e12d6828491" + instance_type = "t2.micro" + + availability_zone = aws_subnet.embassy.availability_zone + key_name = aws_key_pair.titanium.key_name + + network_interface { + network_interface_id = aws_network_interface.embassy.id + device_index = 0 + } + + tags = { + Name = "embassy-01" + } +} + +resource "aws_eip" "eip" { + count = var.ip_count +} + +resource "aws_eip_association" "eip_assoc" { + count = var.ip_count + network_interface_id = aws_network_interface.embassy.id + allocation_id = aws_eip.eip[count.index].id + private_ip_address = aws_network_interface.embassy.private_ip_list[count.index] +} + +resource "ansible_host" "embassy" { + # any of the public ips will work + name = aws_eip.eip[0].public_ip + groups = ["embassy"] +} diff --git a/tf/modules/outpost/providers.tf b/tf/modules/embassy/providers.tf similarity index 100% rename from tf/modules/outpost/providers.tf rename to tf/modules/embassy/providers.tf diff --git a/tf/modules/embassy/variables.tf b/tf/modules/embassy/variables.tf new file mode 100644 index 0000000..c46a062 --- /dev/null +++ b/tf/modules/embassy/variables.tf @@ -0,0 +1,11 @@ +variable "ssh_public_key_file" { + type = string +} + +variable "ip_count" { + type = number + validation { + condition = var.ip_count >= 1 + error_message = "Need at least one ip" + } +} diff --git a/tf/modules/flock/main.tf b/tf/modules/flock/main.tf index d02e00b..64240c5 100644 --- a/tf/modules/flock/main.tf +++ b/tf/modules/flock/main.tf @@ -1,115 +1,17 @@ -locals { - managers = [ - for i in range(var.manager_count) : { - name = "${var.swarm_name}-manager-${format("%02s", i + 1)}" - } - ] - workers = [ - for i in range(var.worker_count) : { - name = "${var.swarm_name}-worker-${format("%02s", i + 1)}" - } - ] -} - -resource "proxmox_virtual_environment_pool" "swarm_pool" { +resource "proxmox_virtual_environment_pool" "flock_pool" { comment = "Managed by Terraform" - pool_id = var.swarm_name + pool_id = var.flock_name } -resource "proxmox_virtual_environment_vm" "swarm_manager" { - count = var.manager_count - name = local.managers[count.index].name +resource "proxmox_virtual_environment_vm" "wings" { + count = var.wing_count + name = "${var.flock_name}-wings-${format("%02s", count.index + 1)}" description = "Managed by Terraform" - tags = ["terraform", "ubuntu", "swarm-manager", var.swarm_name] + tags = ["terraform", "ubuntu", "wings", var.flock_name] node_name = "pve" vm_id = "${var.vm_id_prefix}${count.index + 101}" - pool_id = proxmox_virtual_environment_pool.swarm_pool.id - - cpu { - cores = 2 - type = "host" - } - - memory { - dedicated = 2048 - floating = 2048 - } - - agent { - # read 'Qemu guest agent' section, change to true only when ready - enabled = true - } - - startup { - order = "1" - up_delay = "60" - down_delay = "60" - } - - disk { - datastore_id = var.proxmox_vm_storage - file_id = var.cloud_image_id - interface = "virtio0" - iothread = true - discard = "on" - size = 32 - file_format = "qcow2" - } - - initialization { - ip_config { - ipv4 { - # x.x.x.32 - x.x.x.39 - address = "${cidrhost(var.subnet_cidr, count.index + 32)}/24" - gateway = var.gateway - } - } - datastore_id = var.proxmox_image_storage - - user_data_file_id = var.cloud_init_file_id - } - - network_device { - bridge = "vmbr0" - } - - operating_system { - type = "l26" - } - - connection { - type = "ssh" - user = "ubuntu" - private_key = file(var.ssh_private_key_file) - host = split("/", self.initialization[0].ip_config[0].ipv4[0].address)[0] - } - - provisioner "remote-exec" { - inline = [ - "sudo hostnamectl set-hostname ${self.name}", - "sudo systemctl restart avahi-daemon", - ] - } - - lifecycle { - ignore_changes = [ - initialization[0].user_data_file_id, - ] - } -} - - - -resource "proxmox_virtual_environment_vm" "swarm_worker" { - count = var.worker_count - name = local.workers[count.index].name - description = "Managed by Terraform" - tags = ["terraform", "ubuntu", "swarm-worker", var.swarm_name] - - node_name = "pve" - vm_id = "${var.vm_id_prefix}${count.index + 301}" - pool_id = proxmox_virtual_environment_pool.swarm_pool.id + pool_id = proxmox_virtual_environment_pool.flock_pool.id cpu { cores = 4 @@ -127,7 +29,7 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" { } startup { - order = "2" + order = "5" up_delay = "60" down_delay = "60" } @@ -137,7 +39,7 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" { file_id = var.cloud_image_id interface = "virtio0" discard = "on" - size = 32 + size = 16 file_format = "qcow2" } @@ -179,18 +81,14 @@ resource "proxmox_virtual_environment_vm" "swarm_worker" { lifecycle { ignore_changes = [ initialization[0].user_data_file_id, + # These will have manually provisioned disks + disk, ] } } -resource "ansible_host" "swarm_manager" { - count = var.manager_count - name = "${local.managers[count.index].name}.local" - groups = ["${var.swarm_name}_manager", var.swarm_name] -} - -resource "ansible_host" "swarm_worker" { - count = var.worker_count - name = "${local.workers[count.index].name}.local" - groups = ["${var.swarm_name}_worker", var.swarm_name] +resource "ansible_host" "wings" { + count = var.wing_count + name = "${proxmox_virtual_environment_vm.wings[count.index].name}.local" + groups = ["${var.flock_name}_wings", var.flock_name] } diff --git a/tf/modules/flock/variables.tf b/tf/modules/flock/variables.tf index 0019eec..f5cb89f 100644 --- a/tf/modules/flock/variables.tf +++ b/tf/modules/flock/variables.tf @@ -15,23 +15,15 @@ variable "gateway" { type = string default = "10.0.0.1" } -variable "swarm_name" { +variable "flock_name" { type = string } -variable "manager_count" { +variable "wing_count" { type = number validation { - condition = var.manager_count <= 8 - error_message = "Too many manager nodes" - } -} - -variable "worker_count" { - type = number - validation { - condition = var.worker_count <= 16 - error_message = "Too many worker nodes" + condition = var.wing_count <= 16 + error_message = "Too many wing nodes" } } diff --git a/tf/modules/outpost/main.tf b/tf/modules/outpost/main.tf deleted file mode 100644 index 1710f5c..0000000 --- a/tf/modules/outpost/main.tf +++ /dev/null @@ -1,92 +0,0 @@ -resource "aws_key_pair" "titanium" { - key_name = "titanium" - public_key = file(var.ssh_public_key_file) -} - -resource "aws_vpc" "outpost" { - # whatever - cidr_block = "172.32.0.0/16" -} - -resource "aws_subnet" "outpost" { - vpc_id = aws_vpc.outpost.id - cidr_block = cidrsubnet(aws_vpc.outpost.cidr_block, 8, 1) - availability_zone = "us-west-2a" -} - -resource "aws_internet_gateway" "outpost" { - vpc_id = aws_vpc.outpost.id -} - -resource "aws_security_group" "outpost" { - vpc_id = aws_vpc.outpost.id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_route_table" "outpost" { - vpc_id = aws_vpc.outpost.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.outpost.id - } -} - -resource "aws_route_table_association" "outpost_assoc" { - subnet_id = aws_subnet.outpost.id - route_table_id = aws_route_table.outpost.id -} - -resource "aws_network_interface" "outpost" { - subnet_id = aws_subnet.outpost.id - # Required for private_ip_list - private_ip_list_enabled = true - # private_ips aren't ordered meaning this NIC and its dependent resources may - # be re-created upon changing the number of IPs. - # private_ip_list, however, _is_ ordered, hence why we use it over private_ips - private_ip_list = [ - for i in range(var.ip_count) : cidrhost(aws_subnet.outpost.cidr_block, i + 32) - ] - security_groups = [aws_security_group.outpost.id] -} - -resource "aws_instance" "outpost" { - ami = "ami-00c257e12d6828491" - instance_type = "t2.micro" - - availability_zone = aws_subnet.outpost.availability_zone - key_name = aws_key_pair.titanium.key_name - - network_interface { - network_interface_id = aws_network_interface.outpost.id - device_index = 0 - } - - tags = { - Name = "outpost-01" - } -} - -resource "aws_eip" "eip" { - count = var.ip_count -} - -resource "aws_eip_association" "eip_assoc" { - count = var.ip_count - network_interface_id = aws_network_interface.outpost.id - allocation_id = aws_eip.eip[count.index].id - private_ip_address = aws_network_interface.outpost.private_ip_list[count.index] -} diff --git a/tf/modules/outpost/variables.tf b/tf/modules/outpost/variables.tf deleted file mode 100644 index 852fbe3..0000000 --- a/tf/modules/outpost/variables.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "ssh_public_key_file" { - type = string -} - -variable "ip_count" { - type = number -}