feat: Add moirai flock and change embassy to linode
This commit is contained in:
parent
51fd04b3bd
commit
dde508f88b
@ -4,6 +4,6 @@ strict: true
|
|||||||
groups:
|
groups:
|
||||||
vpn: >-
|
vpn: >-
|
||||||
group_names | intersect(['embassy']) | length == 1 or
|
group_names | intersect(['embassy']) | length == 1 or
|
||||||
group_names | intersect(['jodye']) | length == 1
|
group_names | intersect(['moirai']) | length == 1
|
||||||
vpn_server: group_names | intersect(['embassy']) | length == 1
|
vpn_server: group_names | intersect(['embassy']) | length == 1
|
||||||
vpn_client: group_names | intersect(['jodye']) | length == 1
|
vpn_client: group_names | intersect(['moirai']) | length == 1
|
||||||
|
@ -1,17 +1,30 @@
|
|||||||
ansible_user: ubuntu
|
|
||||||
|
|
||||||
# Directory to store WireGuard configuration on the remote hosts
|
# Directory to store WireGuard configuration on the remote hosts
|
||||||
wireguard_remote_directory: /etc/wireguard
|
wireguard_remote_directory: /etc/wireguard
|
||||||
|
|
||||||
wireguard_interface_restart: false
|
wireguard_interface_restart: false
|
||||||
wireguard_service_enabled: true
|
wireguard_service_enabled: false
|
||||||
wireguard_service_state: started
|
wireguard_service_state: started
|
||||||
|
|
||||||
|
# Keep the NAT mapping open. Should only be needed for server -> client, but
|
||||||
|
# if the server disconnects, we may never be able to re-establish a connection.
|
||||||
|
# So this is on both client and server just in case that happens.
|
||||||
|
wireguard_persistent_keepalive: 25
|
||||||
|
|
||||||
|
wireguard_ipv6_subnet: "fde0:fb5b:2593::/64"
|
||||||
|
# Setting this here doesn't seem to work. We set it in a playbook later
|
||||||
|
# public_ipv6_subnet: "{{ hostvars[groups['embassy'][0]].ipv6_subnet }}"
|
||||||
|
|
||||||
# We can generate this dynamically, but it really doesn't seem like it's worth
|
# We can generate this dynamically, but it really doesn't seem like it's worth
|
||||||
# the work.
|
# the work.
|
||||||
nat_map:
|
nat_map:
|
||||||
jodye-wings-01.local:
|
moirai-clotho.local:
|
||||||
lan_ip: 10.0.29.40
|
vpn_ipv6: "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('16') | ansible.utils.ipaddr('address') }}"
|
||||||
vpn_ip: 10.4.4.33
|
vps_ipv6: "{{ public_ipv6_subnet | ansible.utils.ipaddr('16') | ansible.utils.ipaddr('address') }}"
|
||||||
vps_ip: 172.32.1.33
|
|
||||||
|
|
||||||
|
moirai-lachesis.local:
|
||||||
|
vpn_ipv6: "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('17') | ansible.utils.ipaddr('address') }}"
|
||||||
|
vps_ipv6: "{{ public_ipv6_subnet | ansible.utils.ipaddr('17') | ansible.utils.ipaddr('address') }}"
|
||||||
|
|
||||||
|
moirai-atropos.local:
|
||||||
|
vpn_ipv6: "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('18') | ansible.utils.ipaddr('address') }}"
|
||||||
|
vps_ipv6: "{{ public_ipv6_subnet | ansible.utils.ipaddr('18') | ansible.utils.ipaddr('address') }}"
|
||||||
|
@ -1,20 +1,18 @@
|
|||||||
wireguard_addresses:
|
wireguard_addresses:
|
||||||
- "{{ nat_map[inventory_hostname].vpn_ip }}"
|
- "{{ nat_map[inventory_hostname].vpn_ipv6 }}"
|
||||||
|
|
||||||
|
# wireguard_endpoint: "{{ nat_map[inventory_hostname].vpn_ipv6 }}"
|
||||||
wireguard_endpoint: ""
|
wireguard_endpoint: ""
|
||||||
|
|
||||||
# Don't set this
|
# Don't set this
|
||||||
# wireguard_dns: 10.0.123.123
|
# wireguard_dns: 10.0.123.123
|
||||||
|
|
||||||
# Keep the PAT mapping open. Only needed for the client; the server is always
|
|
||||||
# reachable
|
|
||||||
wireguard_persistent_keepalive: 30
|
|
||||||
|
|
||||||
# don't route local addresses through the wg tunnel
|
# don't route local addresses through the wg tunnel
|
||||||
wireguard_preup:
|
# wireguard_preup:
|
||||||
- ip route add 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
|
# - ip route add 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
|
||||||
|
|
||||||
wireguard_postdown:
|
# wireguard_postdown:
|
||||||
- ip route del 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
|
# - ip route del 10.0.0.0/16 via 10.0.0.1 dev eth0 proto static onlink
|
||||||
|
|
||||||
# Ok, I could not get the stuff below working properly. What I _wanted_ to do
|
# Ok, I could not get the stuff below working properly. What I _wanted_ to do
|
||||||
# was make it so that _only_ traffic that was sent from the wireguard tunnel
|
# was make it so that _only_ traffic that was sent from the wireguard tunnel
|
||||||
|
@ -1,43 +1,49 @@
|
|||||||
# This should really be set per host, but I'm abusing the fact that there's only
|
# This should really be set per host, but I'm abusing the fact that there's only
|
||||||
# one vpn_server host
|
# one vpn_server host
|
||||||
wireguard_addresses:
|
wireguard_addresses:
|
||||||
- "10.4.4.1/24"
|
- "{{ wireguard_ipv6_subnet | ansible.utils.ipaddr('net') | ansible.utils.ipaddr('1') }}"
|
||||||
|
|
||||||
wireguard_endpoint: "{{ inventory_hostname }}"
|
wireguard_endpoint: "{{ inventory_hostname }}"
|
||||||
|
|
||||||
wireguard_preup:
|
wireguard_preup:
|
||||||
- echo 1 > /proc/sys/net/ipv4/ip_forward
|
- echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||||
|
- echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
|
||||||
|
|
||||||
wireguard_postup: |
|
wireguard_postup: |
|
||||||
{% filter from_yaml %}
|
{% filter from_yaml %}
|
||||||
{%- for value in (nat_map | dict2items | map(attribute='value')) %}
|
{% for value in (nat_map | dict2items | map(attribute='value')) %}
|
||||||
# incoming packets to vps_ip, dst port 10,000-40,000 are DNAT'd to vpn_ip
|
- ip -6 addr add {{ value.vps_ipv6 }} dev eth0
|
||||||
# with a matching port
|
|
||||||
- iptables -t nat -A PREROUTING -p tcp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
|
|
||||||
|
|
||||||
# incoming packets from vpn_ip are SNAT'd to vps_ip with a matching port to
|
# Incoming packets to this node's public IP are DNAT'd and forwarded to the
|
||||||
# complete the reverse NAT path
|
# matching internal VPN IP
|
||||||
- iptables -t nat -A POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
|
- ip6tables -t nat -A PREROUTING -p tcp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
|
||||||
|
|
||||||
# Same thing for UDP. We do this selectively because we don't wanna mess with
|
# Incoming packets to an internal VPN IP are SNAT'd to use this node's public
|
||||||
# stuff like icmp
|
# IP. I think `-j MASQUERADE` might work here rather than doing the SNAT
|
||||||
- iptables -t nat -A PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
|
# manually(?), but I don't mind being explicit here.
|
||||||
- iptables -t nat -A POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
|
- ip6tables -t nat -A POSTROUTING -p tcp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
|
||||||
{%- endfor %}
|
|
||||||
|
# Same thing with UDP. We do this selectively so we don't mess with things
|
||||||
|
# like ICMP6 and whatnot.
|
||||||
|
- ip6tables -t nat -A PREROUTING -p udp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
|
||||||
|
- ip6tables -t nat -A POSTROUTING -p udp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
|
||||||
|
{% endfor %}
|
||||||
{% endfilter %}
|
{% endfilter %}
|
||||||
|
|
||||||
# Exact reverse of above to delete all the rules
|
# Exact reverse of above to delete all the rules
|
||||||
wireguard_predown: |
|
wireguard_predown: |
|
||||||
{% filter from_yaml %}
|
{% filter from_yaml %}
|
||||||
{%- for value in (nat_map | dict2items | map(attribute='value') | reverse) %}
|
{% for value in (nat_map | dict2items | map(attribute='value') | reverse) %}
|
||||||
- iptables -t nat -D POSTROUTING -p tcp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
|
- ip6tables -t nat -D POSTROUTING -p udp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
|
||||||
- iptables -t nat -D PREROUTING -p tcp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
|
- ip6tables -t nat -D PREROUTING -p udp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
|
||||||
- iptables -t nat -D PREROUTING -p udp -d {{ value.vps_ip }} --dport 10000:40000 -j DNAT --to-destination {{ value.vpn_ip }}
|
- ip6tables -t nat -D POSTROUTING -p tcp -s {{ value.vpn_ipv6 }} -j SNAT --to-source {{ value.vps_ipv6 }}
|
||||||
- iptables -t nat -D POSTROUTING -p udp -s {{ value.vpn_ip }} -j SNAT --to-source {{ value.vps_ip }}
|
- ip6tables -t nat -D PREROUTING -p tcp -d {{ value.vps_ipv6 }} --dport 20000:20100 -j DNAT --to-destination {{ value.vpn_ipv6 }}
|
||||||
{%- endfor %}
|
- ip -6 addr del {{ value.vps_ipv6 }} dev eth0
|
||||||
|
{% endfor %}
|
||||||
{% endfilter %}
|
{% endfilter %}
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
|
- echo 0 > /proc/sys/net/ipv6/conf/all/forwarding
|
||||||
- echo 0 > /proc/sys/net/ipv4/ip_forward
|
- echo 0 > /proc/sys/net/ipv4/ip_forward
|
||||||
|
|
||||||
# https://www.procustodibus.com/blog/2021/03/wireguard-allowedips-calculator/
|
# https://www.procustodibus.com/blog/2021/03/wireguard-allowedips-calculator/
|
||||||
@ -50,5 +56,5 @@ wireguard_postdown:
|
|||||||
# and each host defines a list of IPs that should be routed _to this host_, not
|
# and each host defines a list of IPs that should be routed _to this host_, not
|
||||||
# a list of IPs that should be routed to the "server" (because everyone is a
|
# a list of IPs that should be routed to the "server" (because everyone is a
|
||||||
# peer in a fully meshed network)
|
# peer in a fully meshed network)
|
||||||
wireguard_allowed_ips: "0.0.0.0/0"
|
wireguard_allowed_ips: "::0/0"
|
||||||
|
|
||||||
|
@ -27,8 +27,8 @@
|
|||||||
ingress: true
|
ingress: true
|
||||||
scope: swarm
|
scope: swarm
|
||||||
ipam_config:
|
ipam_config:
|
||||||
- subnet: 172.254.0.0/16
|
- subnet: 172.31.0.0/16
|
||||||
gateway: 172.254.0.1
|
gateway: 172.31.0.1
|
||||||
driver_options:
|
driver_options:
|
||||||
# I'm honestly not completely sure what this, but in the default
|
# I'm honestly not completely sure what this, but in the default
|
||||||
# ingress network that's created during swarm initialization, this exists
|
# ingress network that's created during swarm initialization, this exists
|
||||||
|
@ -2,4 +2,4 @@
|
|||||||
roles:
|
roles:
|
||||||
- name: githubixx.ansible_role_wireguard
|
- name: githubixx.ansible_role_wireguard
|
||||||
src: https://github.com/githubixx/ansible-role-wireguard.git
|
src: https://github.com/githubixx/ansible-role-wireguard.git
|
||||||
version: 17.0.0
|
version: 17.1.0
|
||||||
|
@ -1,29 +1,50 @@
|
|||||||
---
|
---
|
||||||
- name: Ensure prerequisites met
|
- name: Preparation
|
||||||
# TODO: Check # of IP addrs on enX0 >= # of wings + 1
|
|
||||||
hosts: vpn
|
hosts: vpn
|
||||||
tasks: []
|
pre_tasks:
|
||||||
|
- name: Verify only one embassy
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: >
|
||||||
|
groups['embassy'] | length == 1 and
|
||||||
|
groups['vpn_server'] | length == 1 and
|
||||||
|
groups['vpn_server'] | intersect(groups['embassy']) | length == 1
|
||||||
|
msg: Expected only one embassy host
|
||||||
|
- name: Verify ipv6_subnet is set
|
||||||
|
when: inventory_hostname == groups['embassy'][0]
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: ipv6_subnet is defined
|
||||||
|
msg: >
|
||||||
|
Expected ipv6_subnet to be defined.
|
||||||
|
This should have been done in Terraform or otherwise.
|
||||||
|
tasks:
|
||||||
|
# As mentioned in the other file, if I set this statically on group_vars,
|
||||||
|
# things seem to break.
|
||||||
|
- set_fact:
|
||||||
|
public_ipv6_subnet: "{{ hostvars[groups['embassy'][0]].ipv6_subnet }}"
|
||||||
|
|
||||||
- name: Install wings
|
- name: Prepare embassy
|
||||||
hosts: jodye_wings
|
hosts: embassy
|
||||||
remote_user: ubuntu
|
become: true
|
||||||
# Don't forget to create a new disk if creating new wings. This is
|
tasks:
|
||||||
# purposefully manual to give more fine-grained control
|
- name: Disable password-based authentication
|
||||||
vars:
|
lineinfile:
|
||||||
pv_disks:
|
path: "/etc/ssh/sshd_config"
|
||||||
- /dev/sda
|
regexp: '^()PasswordAuthentication yes()$'
|
||||||
vg_name: vg1
|
line: 'PasswordAuthentication no'
|
||||||
lv_name: pvs
|
register: passwordauthentication
|
||||||
lv_size: +100%FREE
|
|
||||||
fs_type: ext4
|
- name: Enable public key authentication in SSH
|
||||||
mount_path: /var/lib/pterodactyl
|
lineinfile:
|
||||||
extra_docker_daemon_options: |
|
path: "/etc/ssh/sshd_config"
|
||||||
"dns": ["10.0.123.123"],
|
regexp: '^()PubkeyAuthentication()$'
|
||||||
roles:
|
line: 'PubkeyAuthentication yes'
|
||||||
- dns-client
|
register: publickeyauthentication
|
||||||
- lvm
|
|
||||||
- docker
|
- name: Restart SSH
|
||||||
- wings
|
service:
|
||||||
|
name: ssh
|
||||||
|
state: restarted
|
||||||
|
when: passwordauthentication.changed or publickeyauthentication.changed
|
||||||
|
|
||||||
- name: Set up VPN
|
- name: Set up VPN
|
||||||
hosts: vpn
|
hosts: vpn
|
||||||
@ -31,3 +52,24 @@
|
|||||||
roles:
|
roles:
|
||||||
- githubixx.ansible_role_wireguard
|
- githubixx.ansible_role_wireguard
|
||||||
|
|
||||||
|
# - name: Install wings
|
||||||
|
# hosts: moirai_wings
|
||||||
|
# remote_user: ubuntu
|
||||||
|
# # Don't forget to create a new disk if creating new wings. This is
|
||||||
|
# # purposefully manual to give more fine-grained control
|
||||||
|
# vars:
|
||||||
|
# pv_disks:
|
||||||
|
# - /dev/sda
|
||||||
|
# vg_name: vg1
|
||||||
|
# lv_name: pvs
|
||||||
|
# lv_size: +100%FREE
|
||||||
|
# fs_type: ext4
|
||||||
|
# mount_path: /var/lib/pterodactyl
|
||||||
|
# extra_docker_daemon_options: |
|
||||||
|
# "dns": ["10.0.123.123"],
|
||||||
|
# roles:
|
||||||
|
# - dns-client
|
||||||
|
# - lvm
|
||||||
|
# - docker
|
||||||
|
# - wings
|
||||||
|
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
$ORIGIN mnke.org.
|
$ORIGIN mnke.org.
|
||||||
@ 900 IN SOA dns-server. hostadmin 38 900 300 604800 900
|
@ 900 IN SOA dns-server. hostadmin 43 900 300 604800 900
|
||||||
@ 3600 IN NS dns-server.
|
@ 3600 IN NS dns-server.
|
||||||
|
atropos_moirai 600 IN CNAME dolo
|
||||||
authentik 600 IN CNAME authentik.dolo
|
authentik 600 IN CNAME authentik.dolo
|
||||||
blog 600 IN CNAME blog.dolo
|
blog 600 IN CNAME blog.dolo
|
||||||
|
clotho_moirai 600 IN CNAME dolo
|
||||||
git 600 IN CNAME git.jumper
|
git 600 IN CNAME git.jumper
|
||||||
|
lachesis_moirai 600 IN CNAME dolo
|
||||||
media 600 IN CNAME media.dolo
|
media 600 IN CNAME media.dolo
|
||||||
nc 600 IN CNAME nc.dolo
|
nc 600 IN CNAME nc.dolo
|
||||||
panel 600 IN CNAME panel.dolo
|
panel 600 IN CNAME panel.dolo
|
||||||
|
6
dns/zones/moirai.mnke.org.zone
Normal file
6
dns/zones/moirai.mnke.org.zone
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
$ORIGIN moirai.mnke.org.
|
||||||
|
@ 900 IN SOA dns-server. hostadmin 4 900 300 604800 900
|
||||||
|
@ 3600 IN NS dns-server.
|
||||||
|
atropos 600 IN A 10.0.49.42
|
||||||
|
clotho 600 IN A 10.0.29.40
|
||||||
|
lachesis 600 IN A 10.0.29.41
|
@ -120,14 +120,18 @@ data:
|
|||||||
service: https://up.mnke.org
|
service: https://up.mnke.org
|
||||||
- hostname: panel.mnke.org
|
- hostname: panel.mnke.org
|
||||||
service: https://panel.mnke.org
|
service: https://panel.mnke.org
|
||||||
- hostname: wings-01_jodye.mnke.org
|
|
||||||
service: https://wings-01_jodye.mnke.org
|
|
||||||
- hostname: vault.mnke.org
|
- hostname: vault.mnke.org
|
||||||
service: https://vault.mnke.org
|
service: https://vault.mnke.org
|
||||||
- hostname: authentik.mnke.org
|
- hostname: authentik.mnke.org
|
||||||
service: https://authentik.mnke.org
|
service: https://authentik.mnke.org
|
||||||
- hostname: nc.mnke.org
|
- hostname: nc.mnke.org
|
||||||
service: https://nc.mnke.org
|
service: https://nc.mnke.org
|
||||||
|
- hostname: clotho_moirai.mnke.org
|
||||||
|
service: https://clotho_moirai.mnke.org
|
||||||
|
- hostname: lachesis_moirai.mnke.org
|
||||||
|
service: https://lachesis_moirai.mnke.org
|
||||||
|
- hostname: atropos_moirai.mnke.org
|
||||||
|
service: https://atropos_moirai.mnke.org
|
||||||
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
|
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
|
||||||
- service: http_status:404
|
- service: http_status:404
|
||||||
|
|
||||||
|
@ -17,6 +17,25 @@ spec:
|
|||||||
remoteRef:
|
remoteRef:
|
||||||
key: ghost-mysql-password
|
key: ghost-mysql-password
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: external-secrets.io/v1beta1
|
||||||
|
kind: ExternalSecret
|
||||||
|
metadata:
|
||||||
|
name: ghost-email-creds
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
secretStoreRef:
|
||||||
|
kind: ClusterSecretStore
|
||||||
|
name: infisical
|
||||||
|
|
||||||
|
target:
|
||||||
|
name: ghost-email-creds
|
||||||
|
|
||||||
|
data:
|
||||||
|
- secretKey: email-password
|
||||||
|
remoteRef:
|
||||||
|
key: tony-mnke-org-email-password
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: external-secrets.io/v1beta1
|
apiVersion: external-secrets.io/v1beta1
|
||||||
kind: ExternalSecret
|
kind: ExternalSecret
|
||||||
|
@ -3,13 +3,13 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: wings-01-jodye-external
|
name: atropos-moirai-external
|
||||||
namespace: default
|
namespace: default
|
||||||
spec:
|
spec:
|
||||||
type: ExternalName
|
type: ExternalName
|
||||||
externalName: wings-01.jodye.mnke.org
|
externalName: atropos.moirai.mnke.org
|
||||||
ports:
|
ports:
|
||||||
- name: wings-01-jodye-external
|
- name: atropos-moirai-external
|
||||||
port: 443
|
port: 443
|
||||||
targetPort: 443
|
targetPort: 443
|
||||||
|
|
||||||
@ -18,17 +18,17 @@ spec:
|
|||||||
apiVersion: traefik.io/v1alpha1
|
apiVersion: traefik.io/v1alpha1
|
||||||
kind: IngressRoute
|
kind: IngressRoute
|
||||||
metadata:
|
metadata:
|
||||||
name: wings-01-jodye-external
|
name: atropos-moirai-external
|
||||||
namespace: default
|
namespace: default
|
||||||
spec:
|
spec:
|
||||||
entryPoints:
|
entryPoints:
|
||||||
- websecure
|
- websecure
|
||||||
routes:
|
routes:
|
||||||
- match: Host(`wings-01_jodye.mnke.org`)
|
- match: Host(`atropos_moirai.mnke.org`)
|
||||||
kind: Rule
|
kind: Rule
|
||||||
services:
|
services:
|
||||||
- kind: Service
|
- kind: Service
|
||||||
name: wings-01-jodye-external
|
name: atropos-moirai-external
|
||||||
port: 443
|
port: 443
|
||||||
passHostHeader: true
|
passHostHeader: true
|
||||||
scheme: http
|
scheme: http
|
36
k8s/apps/ingressroutes/external/build/clotho-moirai.yaml
vendored
Normal file
36
k8s/apps/ingressroutes/external/build/clotho-moirai.yaml
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
# This file was automatically generated. Do not modify.
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: clotho-moirai-external
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
type: ExternalName
|
||||||
|
externalName: clotho.moirai.mnke.org
|
||||||
|
ports:
|
||||||
|
- name: clotho-moirai-external
|
||||||
|
port: 443
|
||||||
|
targetPort: 443
|
||||||
|
|
||||||
|
---
|
||||||
|
# This file was automatically generated. Do not modify.
|
||||||
|
apiVersion: traefik.io/v1alpha1
|
||||||
|
kind: IngressRoute
|
||||||
|
metadata:
|
||||||
|
name: clotho-moirai-external
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
entryPoints:
|
||||||
|
- websecure
|
||||||
|
routes:
|
||||||
|
- match: Host(`clotho_moirai.mnke.org`)
|
||||||
|
kind: Rule
|
||||||
|
services:
|
||||||
|
- kind: Service
|
||||||
|
name: clotho-moirai-external
|
||||||
|
port: 443
|
||||||
|
passHostHeader: true
|
||||||
|
scheme: http
|
||||||
|
tls:
|
||||||
|
secretName: wildcard-mnke-org-tls
|
@ -9,5 +9,7 @@ resources:
|
|||||||
- wizarr-tonydu.yaml
|
- wizarr-tonydu.yaml
|
||||||
- dns-dolo-mnke.yaml
|
- dns-dolo-mnke.yaml
|
||||||
- vaultwarden.yaml
|
- vaultwarden.yaml
|
||||||
- wings-01-jodye.yaml
|
|
||||||
- panel.yaml
|
- panel.yaml
|
||||||
|
- clotho-moirai.yaml
|
||||||
|
- lachesis-moirai.yaml
|
||||||
|
- atropos-moirai.yaml
|
||||||
|
36
k8s/apps/ingressroutes/external/build/lachesis-moirai.yaml
vendored
Normal file
36
k8s/apps/ingressroutes/external/build/lachesis-moirai.yaml
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
# This file was automatically generated. Do not modify.
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: lachesis-moirai-external
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
type: ExternalName
|
||||||
|
externalName: lachesis.moirai.mnke.org
|
||||||
|
ports:
|
||||||
|
- name: lachesis-moirai-external
|
||||||
|
port: 443
|
||||||
|
targetPort: 443
|
||||||
|
|
||||||
|
---
|
||||||
|
# This file was automatically generated. Do not modify.
|
||||||
|
apiVersion: traefik.io/v1alpha1
|
||||||
|
kind: IngressRoute
|
||||||
|
metadata:
|
||||||
|
name: lachesis-moirai-external
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
entryPoints:
|
||||||
|
- websecure
|
||||||
|
routes:
|
||||||
|
- match: Host(`lachesis_moirai.mnke.org`)
|
||||||
|
kind: Rule
|
||||||
|
services:
|
||||||
|
- kind: Service
|
||||||
|
name: lachesis-moirai-external
|
||||||
|
port: 443
|
||||||
|
passHostHeader: true
|
||||||
|
scheme: http
|
||||||
|
tls:
|
||||||
|
secretName: wildcard-mnke-org-tls
|
@ -65,16 +65,36 @@ proxies:
|
|||||||
upstream_port: 443
|
upstream_port: 443
|
||||||
pass_host_header: true
|
pass_host_header: true
|
||||||
|
|
||||||
- service_name: wings-01-jodye
|
|
||||||
tls_secret_name: wildcard-mnke-org-tls
|
|
||||||
listen_host: wings-01_jodye.mnke.org
|
|
||||||
upstream_host: wings-01.jodye.mnke.org
|
|
||||||
upstream_port: 443
|
|
||||||
scheme: http
|
|
||||||
|
|
||||||
- service_name: panel
|
- service_name: panel
|
||||||
tls_secret_name: wildcard-mnke-org-tls
|
tls_secret_name: wildcard-mnke-org-tls
|
||||||
listen_host: panel.mnke.org
|
listen_host: panel.mnke.org
|
||||||
upstream_host: panel.jumper.mnke.org
|
upstream_host: panel.jumper.mnke.org
|
||||||
upstream_port: 443
|
upstream_port: 443
|
||||||
pass_host_header: true
|
pass_host_header: true
|
||||||
|
|
||||||
|
# The reason why we do clotho_moirai instead of clotho.moirai is because
|
||||||
|
# Cloudflare SSL doesn't cover *.moirai.mnke.org by default. I'm not sure
|
||||||
|
# if there's any configuration to allow TLS passthrough on Cloudflare
|
||||||
|
# (probably not) or to upload my own SSL cert.
|
||||||
|
#
|
||||||
|
# TODO: Check if we can host this on clotho.moirai.mnke.org with SSL
|
||||||
|
- service_name: clotho-moirai
|
||||||
|
tls_secret_name: wildcard-mnke-org-tls
|
||||||
|
listen_host: clotho_moirai.mnke.org
|
||||||
|
upstream_host: clotho.moirai.mnke.org
|
||||||
|
upstream_port: 443
|
||||||
|
scheme: http
|
||||||
|
|
||||||
|
- service_name: lachesis-moirai
|
||||||
|
tls_secret_name: wildcard-mnke-org-tls
|
||||||
|
listen_host: lachesis_moirai.mnke.org
|
||||||
|
upstream_host: lachesis.moirai.mnke.org
|
||||||
|
upstream_port: 443
|
||||||
|
scheme: http
|
||||||
|
|
||||||
|
- service_name: atropos-moirai
|
||||||
|
tls_secret_name: wildcard-mnke-org-tls
|
||||||
|
listen_host: atropos_moirai.mnke.org
|
||||||
|
upstream_host: atropos.moirai.mnke.org
|
||||||
|
upstream_port: 443
|
||||||
|
scheme: http
|
||||||
|
@ -86,14 +86,14 @@ module "k8s_folly" {
|
|||||||
ssh_private_key_file = var.ssh_private_key_file
|
ssh_private_key_file = var.ssh_private_key_file
|
||||||
}
|
}
|
||||||
|
|
||||||
module "flock_jodye" {
|
module "flock_moirai" {
|
||||||
source = "./modules/flock"
|
source = "./modules/flock"
|
||||||
|
|
||||||
flock_name = "jodye"
|
flock_name = "moirai"
|
||||||
vm_id_prefix = "9"
|
vm_id_prefix = "9"
|
||||||
subnet_cidr = "10.0.29.0/24"
|
subnet_cidr = "10.0.29.0/24"
|
||||||
gateway = var.gateway
|
gateway = var.gateway
|
||||||
wing_count = 1
|
wing_names = ["clotho", "lachesis", "atropos"]
|
||||||
dns_server_ip = local.dns_server_ip
|
dns_server_ip = local.dns_server_ip
|
||||||
|
|
||||||
proxmox_vm_storage = var.proxmox_vm_storage
|
proxmox_vm_storage = var.proxmox_vm_storage
|
||||||
@ -107,6 +107,6 @@ module "flock_jodye" {
|
|||||||
module "embassy" {
|
module "embassy" {
|
||||||
source = "./modules/embassy"
|
source = "./modules/embassy"
|
||||||
|
|
||||||
ip_count = 2
|
|
||||||
ssh_public_key_file = var.ssh_public_key_file
|
ssh_public_key_file = var.ssh_public_key_file
|
||||||
|
cloudflare_zone_id = var.cloudflare_zone_id
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ resource "proxmox_virtual_environment_vm" "dns_server" {
|
|||||||
pool_id = var.pool_id
|
pool_id = var.pool_id
|
||||||
|
|
||||||
cpu {
|
cpu {
|
||||||
cores = 1
|
cores = 2
|
||||||
type = "host"
|
type = "host"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,7 +81,7 @@ resource "proxmox_virtual_environment_vm" "dns_server" {
|
|||||||
|
|
||||||
lifecycle {
|
lifecycle {
|
||||||
ignore_changes = [
|
ignore_changes = [
|
||||||
initialization[0].user_data_file_id,
|
initialization,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,121 +1,94 @@
|
|||||||
resource "aws_key_pair" "titanium" {
|
resource "linode_sshkey" "titanium" {
|
||||||
key_name = "titanium"
|
label = "titanium"
|
||||||
public_key = file(var.ssh_public_key_file)
|
ssh_key = chomp(file(var.ssh_public_key_file))
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_vpc" "embassy" {
|
resource "linode_instance" "embassy" {
|
||||||
# whatever
|
image = "linode/ubuntu24.04"
|
||||||
cidr_block = "172.32.0.0/16"
|
label = "embassy"
|
||||||
|
region = "us-sea"
|
||||||
|
type = "g6-nanode-1"
|
||||||
|
authorized_keys = [linode_sshkey.titanium.ssh_key]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_subnet" "embassy" {
|
resource "linode_ipv6_range" "embassy" {
|
||||||
vpc_id = aws_vpc.embassy.id
|
linode_id = linode_instance.embassy.id
|
||||||
cidr_block = cidrsubnet(aws_vpc.embassy.cidr_block, 8, 1)
|
prefix_length = 64
|
||||||
availability_zone = "us-west-2a"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_internet_gateway" "embassy" {
|
resource "linode_firewall" "embassy" {
|
||||||
vpc_id = aws_vpc.embassy.id
|
label = "embassy"
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_security_group" "embassy" {
|
inbound {
|
||||||
vpc_id = aws_vpc.embassy.id
|
label = "allow-ssh"
|
||||||
|
action = "ACCEPT"
|
||||||
ingress {
|
protocol = "TCP"
|
||||||
from_port = 22
|
ports = "22"
|
||||||
to_port = 22
|
ipv4 = ["0.0.0.0/0"]
|
||||||
protocol = "tcp"
|
ipv6 = ["::/0"]
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# wireguard
|
# idk, why not lol
|
||||||
ingress {
|
inbound {
|
||||||
from_port = 51820
|
label = "allow-web"
|
||||||
to_port = 51820
|
action = "ACCEPT"
|
||||||
protocol = "udp"
|
protocol = "TCP"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
ports = "80,443"
|
||||||
|
ipv4 = ["0.0.0.0/0"]
|
||||||
|
ipv6 = ["::/0"]
|
||||||
}
|
}
|
||||||
|
|
||||||
# We'll selectively open ports, but we'll reserve these for general purpose
|
inbound {
|
||||||
ingress {
|
label = "allow-forward-tcp"
|
||||||
from_port = 20000
|
action = "ACCEPT"
|
||||||
to_port = 20100
|
protocol = "TCP"
|
||||||
protocol = "tcp"
|
ports = "20000-20100"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
ipv4 = ["0.0.0.0/0"]
|
||||||
|
ipv6 = ["::/0"]
|
||||||
}
|
}
|
||||||
|
|
||||||
ingress {
|
inbound {
|
||||||
from_port = 20000
|
label = "allow-forward-udp"
|
||||||
to_port = 20100
|
action = "ACCEPT"
|
||||||
protocol = "udp"
|
protocol = "UDP"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
ports = "20000-20100"
|
||||||
|
ipv4 = ["0.0.0.0/0"]
|
||||||
|
ipv6 = ["::/0"]
|
||||||
}
|
}
|
||||||
|
|
||||||
egress {
|
inbound {
|
||||||
from_port = 0
|
label = "allow-wireguard"
|
||||||
to_port = 0
|
action = "ACCEPT"
|
||||||
protocol = "-1"
|
protocol = "UDP"
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
ports = "51820"
|
||||||
|
ipv4 = ["0.0.0.0/0"]
|
||||||
|
ipv6 = ["::/0"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inbound_policy = "DROP"
|
||||||
|
|
||||||
|
outbound_policy = "ACCEPT"
|
||||||
|
|
||||||
|
linodes = [linode_instance.embassy.id]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table" "embassy" {
|
resource "cloudflare_dns_record" "embassy_ipv4" {
|
||||||
vpc_id = aws_vpc.embassy.id
|
zone_id = var.cloudflare_zone_id
|
||||||
|
content = linode_instance.embassy.ip_address
|
||||||
route {
|
name = "embassy.mnke.org"
|
||||||
cidr_block = "0.0.0.0/0"
|
proxied = false
|
||||||
gateway_id = aws_internet_gateway.embassy.id
|
ttl = 1 # 1 = automatic TTL
|
||||||
}
|
type = "A"
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_route_table_association" "embassy_assoc" {
|
|
||||||
subnet_id = aws_subnet.embassy.id
|
|
||||||
route_table_id = aws_route_table.embassy.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_network_interface" "embassy" {
|
|
||||||
subnet_id = aws_subnet.embassy.id
|
|
||||||
# Required for private_ip_list
|
|
||||||
private_ip_list_enabled = true
|
|
||||||
# private_ips aren't ordered meaning this NIC and its dependent resources may
|
|
||||||
# be re-created upon changing the number of IPs.
|
|
||||||
# private_ip_list, however, _is_ ordered, hence why we use it over private_ips
|
|
||||||
private_ip_list = [
|
|
||||||
for i in range(var.ip_count) : cidrhost(aws_subnet.embassy.cidr_block, i + 32)
|
|
||||||
]
|
|
||||||
security_groups = [aws_security_group.embassy.id]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_instance" "embassy" {
|
|
||||||
ami = "ami-00c257e12d6828491"
|
|
||||||
instance_type = "t2.micro"
|
|
||||||
|
|
||||||
availability_zone = aws_subnet.embassy.availability_zone
|
|
||||||
key_name = aws_key_pair.titanium.key_name
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network_interface_id = aws_network_interface.embassy.id
|
|
||||||
device_index = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "embassy-01"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_eip" "eip" {
|
|
||||||
count = var.ip_count
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_eip_association" "eip_assoc" {
|
|
||||||
count = var.ip_count
|
|
||||||
network_interface_id = aws_network_interface.embassy.id
|
|
||||||
allocation_id = aws_eip.eip[count.index].id
|
|
||||||
private_ip_address = aws_network_interface.embassy.private_ip_list[count.index]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "ansible_host" "embassy" {
|
resource "ansible_host" "embassy" {
|
||||||
# any of the public ips will work
|
# Ideally, we'd use the domain name here, but the way the internal DNS server
|
||||||
name = aws_eip.eip[0].public_ip
|
# is set up right now, we don't forward mnke.org requests because we have
|
||||||
|
# a primary zone for mnke.org (I think). We should change this if possible
|
||||||
|
name = linode_instance.embassy.ip_address
|
||||||
groups = ["embassy"]
|
groups = ["embassy"]
|
||||||
|
variables = {
|
||||||
|
ipv6_subnet = "${linode_ipv6_range.embassy.range}/${linode_ipv6_range.embassy.prefix_length}"
|
||||||
|
ansible_user = "root"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,13 @@ terraform {
|
|||||||
source = "ansible/ansible"
|
source = "ansible/ansible"
|
||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
}
|
}
|
||||||
aws = {
|
linode = {
|
||||||
source = "hashicorp/aws"
|
source = "linode/linode"
|
||||||
version = "~> 5.0"
|
version = "2.34.1"
|
||||||
|
}
|
||||||
|
cloudflare = {
|
||||||
|
source = "cloudflare/cloudflare"
|
||||||
|
version = "~> 5"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,10 +2,6 @@ variable "ssh_public_key_file" {
|
|||||||
type = string
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ip_count" {
|
variable "cloudflare_zone_id" {
|
||||||
type = number
|
type = string
|
||||||
validation {
|
|
||||||
condition = var.ip_count >= 1
|
|
||||||
error_message = "Need at least one ip"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,8 @@ resource "proxmox_virtual_environment_pool" "flock_pool" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "proxmox_virtual_environment_vm" "wings" {
|
resource "proxmox_virtual_environment_vm" "wings" {
|
||||||
count = var.wing_count
|
count = length(var.wing_names)
|
||||||
name = "${var.flock_name}-wings-${format("%02s", count.index + 1)}"
|
name = "${var.flock_name}-${var.wing_names[count.index]}"
|
||||||
description = "Managed by Terraform"
|
description = "Managed by Terraform"
|
||||||
tags = ["terraform", "ubuntu", "wings", var.flock_name]
|
tags = ["terraform", "ubuntu", "wings", var.flock_name]
|
||||||
|
|
||||||
@ -19,8 +19,8 @@ resource "proxmox_virtual_environment_vm" "wings" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
memory {
|
memory {
|
||||||
dedicated = 8192
|
dedicated = 16384
|
||||||
floating = 8192
|
floating = 16384
|
||||||
}
|
}
|
||||||
|
|
||||||
agent {
|
agent {
|
||||||
@ -88,7 +88,7 @@ resource "proxmox_virtual_environment_vm" "wings" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "ansible_host" "wings" {
|
resource "ansible_host" "wings" {
|
||||||
count = var.wing_count
|
count = length(var.wing_names)
|
||||||
name = "${proxmox_virtual_environment_vm.wings[count.index].name}.local"
|
name = "${proxmox_virtual_environment_vm.wings[count.index].name}.local"
|
||||||
groups = ["${var.flock_name}_wings", var.flock_name]
|
groups = ["${var.flock_name}_wings", var.flock_name]
|
||||||
}
|
}
|
||||||
|
@ -19,10 +19,10 @@ variable "flock_name" {
|
|||||||
type = string
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "wing_count" {
|
variable "wing_names" {
|
||||||
type = number
|
type = list(string)
|
||||||
validation {
|
validation {
|
||||||
condition = var.wing_count <= 16
|
condition = length(var.wing_names) <= 16
|
||||||
error_message = "Too many wing nodes"
|
error_message = "Too many wing nodes"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,14 @@ terraform {
|
|||||||
source = "hashicorp/aws"
|
source = "hashicorp/aws"
|
||||||
version = "~> 5.0"
|
version = "~> 5.0"
|
||||||
}
|
}
|
||||||
|
linode = {
|
||||||
|
source = "linode/linode"
|
||||||
|
version = "2.34.1"
|
||||||
|
}
|
||||||
|
cloudflare = {
|
||||||
|
source = "cloudflare/cloudflare"
|
||||||
|
version = "~> 5"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
backend "s3" {
|
backend "s3" {
|
||||||
@ -52,6 +60,10 @@ provider "proxmox" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
provider "cloudflare" {
|
||||||
|
api_token = var.cloudflare_api_token
|
||||||
|
}
|
||||||
|
|
||||||
provider "dns" {
|
provider "dns" {
|
||||||
update {
|
update {
|
||||||
server = local.dns_server_ip
|
server = local.dns_server_ip
|
||||||
@ -64,3 +76,7 @@ provider "dns" {
|
|||||||
provider "aws" {
|
provider "aws" {
|
||||||
region = "us-west-2"
|
region = "us-west-2"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
provider "linode" {
|
||||||
|
token = var.linode_pat
|
||||||
|
}
|
||||||
|
@ -60,3 +60,17 @@ variable "ssh_private_key_file" {
|
|||||||
type = string
|
type = string
|
||||||
description = "Path to private key file. Make sure this matches the public key"
|
description = "Path to private key file. Make sure this matches the public key"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "linode_pat" {
|
||||||
|
type = string
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cloudflare_api_token" {
|
||||||
|
type = string
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cloudflare_zone_id" {
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
@ -7,3 +7,5 @@ ssh_import_id = "gh:tonyd33"
|
|||||||
|
|
||||||
ssh_public_key_file = "~/.ssh/id_rsa.pub"
|
ssh_public_key_file = "~/.ssh/id_rsa.pub"
|
||||||
ssh_private_key_file = "~/.ssh/id_rsa"
|
ssh_private_key_file = "~/.ssh/id_rsa"
|
||||||
|
|
||||||
|
cloudflare_zone_id = "4a9cbe349529b23a665bc65edfa5a4f9"
|
||||||
|
Loading…
x
Reference in New Issue
Block a user