feat: Revamp infrastructure

This commit is contained in:
Tony Du 2025-02-09 12:56:04 -08:00
parent 71eedb8118
commit f5799ce9ac
65 changed files with 713 additions and 330 deletions

View File

@ -1,14 +0,0 @@
---
- name: Create LVM and mount it
hosts: lvm
remote_user: ubuntu
become: true
vars:
pv_disks: "{{ lvm.pv_disks }}"
vg_name: "{{ lvm.vg_name }}"
lv_name: "{{ lvm.lv_name }}"
lv_size: "{{ lvm.lv_size }}"
fs_type: "{{ lvm.fs_type }}"
mount_path: "{{ lvm.mount_path }}"
roles:
- lvm

View File

@ -18,6 +18,20 @@
- multipathd.service - multipathd.service
- multipathd.socket - multipathd.socket
- name: Create LVM and mount it
hosts: lvm
become: true
vars:
pv_disks: "{{ lvm.pv_disks }}"
vg_name: "{{ lvm.vg_name }}"
lv_name: "{{ lvm.lv_name }}"
lv_size: "{{ lvm.lv_size }}"
fs_type: "{{ lvm.fs_type }}"
# Consider mounting directly at the /var/lib/longhorn in the future
mount_path: "{{ lvm.mount_path }}"
roles:
- lvm
- name: Prepare Proxmox cluster - name: Prepare Proxmox cluster
hosts: proxmox hosts: proxmox
gather_facts: true gather_facts: true
@ -45,7 +59,6 @@
become: true become: true
when: custom_registries when: custom_registries
- name: Setup k3s servers - name: Setup k3s servers
hosts: master hosts: master
environment: "{{ proxy_env | default({}) }}" environment: "{{ proxy_env | default({}) }}"

View File

@ -11,6 +11,7 @@ volumes:
prowlarr_config: prowlarr_config:
radarr_config: radarr_config:
sonarr_config: sonarr_config:
wizarr_config:
services: services:
transmission-openvpn: transmission-openvpn:
@ -206,3 +207,24 @@ services:
cpus: '0.1' cpus: '0.1'
memory: 64M memory: 64M
wizarr:
image: tonyd33/wizarr
environment:
# This is intentionally not WIZARR_HOST. I'm still in the process of
# migrating everything into mnke.org domain.
- 'APP_URL=https://wizarr.tonydu.me'
- DISABLE_BUILTIN_AUTH=false
- TZ=America/Vancouver
volumes:
- wizarr_config:/data/database
networks:
- media
- traefik
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.routers.wizarr.rule=Host(`${WIZARR_HOST:-wizarr.jumper.mnke.org}`)"
- "traefik.http.routers.wizarr.entrypoints=websecure"
- "traefik.http.routers.wizarr.tls=true"
- "traefik.http.services.wizarr.loadbalancer.server.port=5690"
- "traefik.docker.network=traefik"

View File

@ -29,9 +29,9 @@ spec:
annotations: annotations:
cert-manager.io/cluster-issuer: le-cf-issuer cert-manager.io/cluster-issuer: le-cf-issuer
kubernetes.io/ingress.class: traefik kubernetes.io/ingress.class: traefik
ingressClassName: traefik # ingressClassName: traefik
hostname: blog.mnke.org hostname: blog.mnke.org
tls: true # tls: true
allowEmptyPassword: false allowEmptyPassword: false
ghostEmail: tonydu121@hotmail.com ghostEmail: tonydu121@hotmail.com

View File

@ -1,38 +0,0 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: kube-prometheus-stack
namespace: kube-prometheus-stack
spec:
interval: 10m
chart:
spec:
chart: kube-prometheus-stack
sourceRef:
kind: HelmRepository
name: prometheus-community
namespace: kube-prometheus-stack
interval: 10m
values:
grafana:
adminPassword: admin
defaultDashboardsTimezone: browser
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: le-cf-issuer
kubernetes.io/ingress.class: traefik
hosts:
- gf.dolo.mnke.org
prometheus:
prometheusSpec:
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: longhorn
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 4Gi

View File

@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- common - common
- kube-prometheus-stack # - kube-prometheus-stack
- uptime-kuma - uptime-kuma
- rancher # - rancher
- ghost - ghost

View File

@ -1,9 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: rancher-stable
namespace: cattle-system
spec:
interval: 1m
url: https://releases.rancher.com/server-charts/stable

View File

@ -1,18 +1,18 @@
--- # ---
apiVersion: kustomize.toolkit.fluxcd.io/v1 # apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization # kind: Kustomization
metadata: # metadata:
name: apps # name: apps
namespace: flux-system # namespace: flux-system
spec: # spec:
interval: 10m0s # interval: 10m0s
retryInterval: 30s # retryInterval: 30s
dependsOn: # dependsOn:
- name: infrastructure # - name: infrastructure
sourceRef: # sourceRef:
kind: GitRepository # kind: GitRepository
name: flux-system # name: flux-system
path: ./k8s/apps # path: ./k8s/apps
prune: true # prune: true
wait: true # wait: true
timeout: 5m0s # timeout: 5m0s

View File

@ -2,7 +2,7 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1 apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization kind: Kustomization
metadata: metadata:
name: external-secrets name: infrastructure-01
namespace: flux-system namespace: flux-system
spec: spec:
interval: 1h interval: 1h
@ -11,7 +11,7 @@ spec:
sourceRef: sourceRef:
kind: GitRepository kind: GitRepository
name: flux-system name: flux-system
path: ./k8s/infrastructure/external-secrets path: ./k8s/infrastructure/01
wait: true wait: true
prune: true prune: true
@ -19,7 +19,7 @@ spec:
apiVersion: kustomize.toolkit.fluxcd.io/v1 apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization kind: Kustomization
metadata: metadata:
name: secrets name: infrastructure-02
namespace: flux-system namespace: flux-system
spec: spec:
interval: 1h interval: 1h
@ -28,67 +28,11 @@ spec:
sourceRef: sourceRef:
kind: GitRepository kind: GitRepository
name: flux-system name: flux-system
path: ./k8s/infrastructure/secrets path: ./k8s/infrastructure/02
wait: true wait: true
prune: true prune: true
dependsOn: dependsOn:
- name: external-secrets - name: infrastructure-01
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cert-manager
namespace: flux-system
spec:
interval: 1h
retryInterval: 30s
timeout: 5m
sourceRef:
kind: GitRepository
name: flux-system
path: ./k8s/infrastructure/cert-manager
wait: true
prune: true
dependsOn:
- name: secrets
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: traefik
namespace: flux-system
spec:
interval: 1h
retryInterval: 30s
timeout: 5m
sourceRef:
kind: GitRepository
name: flux-system
path: ./k8s/infrastructure/traefik
wait: true
prune: true
dependsOn:
- name: cert-manager
- name: secrets
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: storage
namespace: flux-system
spec:
interval: 1h
retryInterval: 30s
timeout: 5m
sourceRef:
kind: GitRepository
name: flux-system
path: ./k8s/infrastructure/storage
wait: true
prune: true
--- ---
# What I want is one single unit that the rest of my applications relying on # What I want is one single unit that the rest of my applications relying on
@ -115,8 +59,5 @@ spec:
wait: true wait: true
prune: false prune: false
dependsOn: dependsOn:
- name: external-secrets - name: infrastructure-01
- name: secrets - name: infrastructure-02
- name: cert-manager
- name: traefik
- name: storage

View File

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
external-secrets.io/secrets.cloudflare: require

View File

@ -1,36 +1,20 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
external-secrets.io/secrets.cloudflare: require
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: jetstack
namespace: cert-manager
spec:
interval: 1m
url: https://charts.jetstack.io
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
metadata: metadata:
name: cert-manager name: cert-manager
namespace: cert-manager namespace: flux-system
spec: spec:
interval: 10m interval: 10m
releaseName: cert-manager
targetNamespace: cert-manager
chart: chart:
spec: spec:
chart: cert-manager chart: cert-manager
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: jetstack name: jetstack
namespace: cert-manager namespace: flux-system
interval: 10m interval: 10m
values: values:
crds: crds:

View File

@ -0,0 +1,9 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: jetstack
namespace: flux-system
spec:
interval: 1m
url: https://charts.jetstack.io

View File

@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespaces

View File

@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- monitor.yaml

View File

@ -2,5 +2,4 @@
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: cattle-system name: monitor

View File

@ -2,4 +2,4 @@
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: kube-prometheus-stack name: external-secrets

View File

@ -0,0 +1,18 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: external-secrets
namespace: flux-system
spec:
interval: 10m
releaseName: external-secrets
targetNamespace: external-secrets
chart:
spec:
chart: external-secrets
sourceRef:
kind: HelmRepository
name: external-secrets
namespace: flux-system
interval: 10m

View File

@ -0,0 +1,9 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: external-secrets
namespace: flux-system
spec:
interval: 1m
url: https://charts.external-secrets.io

View File

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- repository.yaml
- release.yaml

View File

@ -0,0 +1,59 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: kube-prometheus-stack
namespace: flux-system
spec:
interval: 10m
releaseName: kube-prometheus-stack
targetNamespace: monitor
chart:
spec:
chart: kube-prometheus-stack
sourceRef:
kind: HelmRepository
name: prometheus-community
namespace: flux-system
interval: 10m
values:
grafana:
adminPassword: admin
defaultDashboardsTimezone: browser
# This kind of sucks, but this is a forward declaration of the issuer and
# ingress class. The problem is that we want Traefik and other services
# to be able to use Prometheus operators, but they require CRDs installed
# within this chart.
#
# By sequencing Prometheus to be installed first, these labels just won't
# be recognized by the ingress and cluster issuer until they're installed
# later -- undesirable, but acceptable -- as opposed to flatly failing
# from missing CRDs by installing Traefik first.
#
# Really, the ideal solution is probably to install all CRDs first, but
# I'm not sure how to do that in a way that guarantees compatibility
# with the CRDs that might be installed in Helm charts later. We can skip
# installing CRDs from the Helm chart, but if the CRDs get updated, we
# need to manually update the CRDs in our repository.
#
# Alternatively, we could declare an Ingress/IngressRoute after Traefik
# is installed, but it wouldn't solve the root problem around dependent
# CRDs
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: le-cf-issuer
kubernetes.io/ingress.class: traefik
hosts:
- gf.dolo.mnke.org
prometheus:
prometheusSpec:
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: longhorn
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 4Gi

View File

@ -3,7 +3,7 @@ apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository kind: HelmRepository
metadata: metadata:
name: prometheus-community name: prometheus-community
namespace: kube-prometheus-stack namespace: flux-system
spec: spec:
interval: 10m interval: 10m
url: https://prometheus-community.github.io/helm-charts url: https://prometheus-community.github.io/helm-charts

View File

@ -0,0 +1,12 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- common
- external-secrets
- cert-manager
- longhorn
- nfs-subdir-external-provisioner
- kube-prometheus-stack
- loki
- promtail

View File

@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- secret.yaml
- repository.yaml
- release.yaml

View File

@ -0,0 +1,98 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: loki
namespace: flux-system
spec:
interval: 10m
releaseName: loki
targetNamespace: monitor
chart:
spec:
chart: loki
sourceRef:
kind: HelmRepository
name: grafana
namespace: flux-system
interval: 10m
valuesFrom:
- kind: Secret
name: loki-creds
valuesKey: minio-password
targetPath: minio.rootPassword
values:
# https://grafana.com/docs/loki/latest/setup/install/helm/install-monolithic/
loki:
auth_enabled: false
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: "2024-04-01"
store: tsdb
object_store: s3
schema: v13
index:
prefix: loki_index_
period: 24h
pattern_ingester:
enabled: true
# compactor:
# retention_enabled: true
# retention_delete_delay: 2h
limits_config:
retention_period: 744h
allow_structured_metadata: true
volume_enabled: true
ruler:
enable_api: true
minio:
enabled: true
persistence:
size: 8Gi
rootUser: root
# rootPassword: ''
deploymentMode: SingleBinary
singleBinary:
replicas: 1
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
# Turn this for debugging
lokiCanary:
enabled: false
# If the canary is turned off, this has to be turned off too
test:
enabled: false
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
chunksCache:
allocatedMemory: 512

View File

@ -0,0 +1,9 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: grafana
namespace: flux-system
spec:
interval: 1m
url: https://grafana.github.io/helm-charts

View File

@ -0,0 +1,18 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: loki-creds
namespace: flux-system
spec:
secretStoreRef:
kind: ClusterSecretStore
name: infisical
target:
name: loki-creds
data:
- secretKey: minio-password
remoteRef:
key: loki-minio-password

View File

@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- repository.yaml
- release.yaml

View File

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: longhorn-system

View File

@ -2,27 +2,25 @@
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
metadata: metadata:
name: rancher name: longhorn
namespace: cattle-system namespace: flux-system
spec: spec:
interval: 10m interval: 10m
releaseName: longhorn
targetNamespace: longhorn-system
chart: chart:
spec: spec:
chart: rancher chart: longhorn
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: rancher-stable name: longhorn
namespace: cattle-system namespace: flux-system
interval: 10m interval: 10m
values: values:
bootstrapPassword: 'admin' # This is a forward declaration!
hostname: rancher.dolo.mnke.org
ingress: ingress:
enabled: true enabled: true
extraAnnotations: annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: le-cf-issuer cert-manager.io/cluster-issuer: le-cf-issuer
ingressClassName: traefik kubernetes.io/ingress.class: traefik
tls: host: longhorn.dolo.mnke.org
source: secret
secretName: rancher-tls

View File

@ -0,0 +1,9 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: longhorn
namespace: flux-system
spec:
interval: 1m
url: https://charts.longhorn.io

View File

@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- repository.yaml
- release.yaml

View File

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nfs-subdir-external-provisioner

View File

@ -1,34 +1,20 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nfs-subdir-external-provisioner
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: nfs-subdir-external-provisioner
namespace: nfs-subdir-external-provisioner
spec:
interval: 1m
url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
metadata: metadata:
name: nfs-subdir-external-provisioner name: nfs-subdir-external-provisioner
namespace: nfs-subdir-external-provisioner namespace: flux-system
spec: spec:
interval: 10m interval: 10m
releaseName: nfs-subdir-external-provisioner
targetNamespace: nfs-subdir-external-provisioner
chart: chart:
spec: spec:
chart: nfs-subdir-external-provisioner chart: nfs-subdir-external-provisioner
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: nfs-subdir-external-provisioner name: nfs-subdir-external-provisioner
namespace: nfs-subdir-external-provisioner namespace: flux-system
interval: 10m interval: 10m
values: values:
nfs: nfs:
@ -38,5 +24,3 @@ spec:
accessModes: ReadWriteMany accessModes: ReadWriteMany
name: nfs-client name: nfs-client
defaultClass: false defaultClass: false

View File

@ -0,0 +1,9 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: nfs-subdir-external-provisioner
namespace: flux-system
spec:
interval: 1m
url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/

View File

@ -0,0 +1,16 @@
--- # Clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: promtail-clusterrole
namespace: monitor
rules:
- apiGroups: [""]
resources:
- nodes
- services
- pods
verbs:
- get
- watch
- list

View File

@ -0,0 +1,61 @@
--- # configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail-config
namespace: monitor
data:
promtail.yaml: |
server:
http_listen_port: 9080
grpc_listen_port: 0
clients:
- url: http://loki:3100/loki/api/v1/push
positions:
filename: /tmp/positions.yaml
target_config:
sync_period: 10s
scrape_configs:
- job_name: pod-logs
kubernetes_sd_configs:
- role: pod
pipeline_stages:
- docker: {}
relabel_configs:
# Longhorn hits the label limit of 15. The longhorn_io labels don't
# seem really relevant for logging purposes
- action: labeldrop
regex: longhorn_io_*
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_pod_name
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__

View File

@ -0,0 +1,44 @@
--- # Daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: promtail-daemonset
namespace: monitor
spec:
selector:
matchLabels:
name: promtail
template:
metadata:
labels:
name: promtail
spec:
serviceAccount: promtail-serviceaccount
containers:
- name: promtail-container
image: grafana/promtail
args:
- -config.file=/etc/promtail/promtail.yaml
env:
- name: 'HOSTNAME' # needed when using kubernetes_sd_configs
valueFrom:
fieldRef:
fieldPath: 'spec.nodeName'
volumeMounts:
- name: logs
mountPath: /var/log
- name: promtail-config
mountPath: /etc/promtail
- mountPath: /var/lib/docker/containers
name: varlibdockercontainers
readOnly: true
volumes:
- name: logs
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: promtail-config
configMap:
name: promtail-config

View File

@ -0,0 +1,11 @@
---
# Recommended by Grafana to install through raw manifests
# https://grafana.com/docs/loki/latest/send-data/promtail/installation/#install-as-kubernetes-daemonset-recommended
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- daemonset.yaml
- configmap.yaml
- clusterrole.yaml
- serviceaccount.yaml
- rolebinding.yaml

View File

@ -0,0 +1,14 @@
--- # Rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: promtail-clusterrolebinding
namespace: monitor
subjects:
- kind: ServiceAccount
name: promtail-serviceaccount
namespace: monitor
roleRef:
kind: ClusterRole
name: promtail-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,6 @@
--- # ServiceAccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: promtail-serviceaccount
namespace: monitor

View File

@ -47,3 +47,4 @@ spec:
name: '*' name: '*'
- kind: Kustomization - kind: Kustomization
name: '*' name: '*'

View File

@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- discord.yaml

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- le-cf-issuer.yaml

View File

@ -5,8 +5,8 @@ metadata:
name: le-cf-issuer name: le-cf-issuer
spec: spec:
acme: acme:
server: https://acme-v02.api.letsencrypt.org/directory # server: https://acme-v02.api.letsencrypt.org/directory
# server: https://acme-staging-v02.api.letsencrypt.org/directory server: https://acme-staging-v02.api.letsencrypt.org/directory
email: tonydu121@hotmail.com email: tonydu121@hotmail.com
privateKeySecretRef: privateKeySecretRef:
name: le-cf-issuer-pk name: le-cf-issuer-pk

View File

@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- secret-stores
- issuers
- secrets
- traefik
- alerts

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- infisical.yaml

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cloudflare.yaml

View File

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- wildcard-mnke-org.yaml

View File

@ -12,4 +12,3 @@ spec:
issuerRef: issuerRef:
name: le-cf-issuer name: le-cf-issuer
kind: ClusterIssuer kind: ClusterIssuer

View File

@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- repository.yaml
- release.yaml
- certificates

View File

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: traefik
labels:
external-secrets.io/secrets.cloudflare: require

View File

@ -1,36 +1,20 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: traefik
labels:
external-secrets.io/secrets.cloudflare: require
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: traefik
namespace: traefik
spec:
interval: 1m
url: https://helm.traefik.io/traefik
--- ---
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
metadata: metadata:
name: traefik name: traefik
namespace: traefik namespace: flux-system
spec: spec:
interval: 10m interval: 10m
releaseName: traefik
targetNamespace: traefik
chart: chart:
spec: spec:
chart: traefik chart: traefik
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: traefik name: traefik
namespace: traefik namespace: flux-system
interval: 10m interval: 10m
values: values:
globalArguments: globalArguments:
@ -39,7 +23,14 @@ spec:
additionalArguments: additionalArguments:
- "--serversTransport.insecureSkipVerify=true" - "--serversTransport.insecureSkipVerify=true"
- "--log.level=DEBUG"
logs:
general:
level: INFO
format: json
access:
enabled: true
format: json
deployment: deployment:
enabled: true enabled: true
@ -109,6 +100,7 @@ spec:
defaultCertificate: defaultCertificate:
secretName: wildcard-mnke-org-tls secretName: wildcard-mnke-org-tls
# Mostly from https://github.com/traefik/traefik-helm-chart/blob/master/EXAMPLES.md#use-prometheus-operator
metrics: metrics:
prometheus: prometheus:
service: service:
@ -116,6 +108,10 @@ spec:
disableAPICheck: false disableAPICheck: false
serviceMonitor: serviceMonitor:
enabled: true enabled: true
# IMPORTANT:
# This must match the kube-prometheus-stack release name
additionalLabels:
release: kube-prometheus-stack
metricRelabelings: metricRelabelings:
- sourceLabels: [__name__] - sourceLabels: [__name__]
separator: ; separator: ;
@ -134,6 +130,10 @@ spec:
honorLabels: true honorLabels: true
prometheusRule: prometheusRule:
enabled: true enabled: true
# IMPORTANT:
# This must match the kube-prometheus-stack release name
additionalLabels:
release: kube-prometheus-stack
rules: rules:
- alert: TraefikDown - alert: TraefikDown
expr: up{job="traefik"} == 0 expr: up{job="traefik"} == 0
@ -144,3 +144,4 @@ spec:
annotations: annotations:
summary: "Traefik Down" summary: "Traefik Down"
description: "{{ $labels.pod }} on {{ $labels.nodename }} is down" description: "{{ $labels.pod }} on {{ $labels.nodename }} is down"

View File

@ -0,0 +1,9 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: traefik
namespace: flux-system
spec:
interval: 1m
url: https://helm.traefik.io/traefik

View File

@ -1,5 +1,3 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources: []
- alerts.yaml

View File

@ -1,33 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: external-secrets
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: external-secrets
namespace: external-secrets
spec:
interval: 1m
url: https://charts.external-secrets.io
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: external-secrets
namespace: external-secrets
spec:
interval: 10m
chart:
spec:
chart: external-secrets
sourceRef:
kind: HelmRepository
name: external-secrets
namespace: external-secrets
interval: 10m

View File

@ -1,33 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: longhorn-system
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: longhorn
namespace: longhorn-system
spec:
interval: 1m
url: https://charts.longhorn.io
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: longhorn
namespace: longhorn-system
spec:
interval: 10m
chart:
spec:
chart: longhorn
sourceRef:
kind: HelmRepository
name: longhorn
namespace: longhorn-system
interval: 10m

View File

@ -0,0 +1,29 @@
---
apiVersion: v1
kind: Service
metadata:
name: jellyfin
namespace: default
spec:
type: ExternalName
externalName: 10.0.0.250
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin
namespace: default
spec:
entryPoints:
- websecure
routes:
- match: Host(`jellyfin.mnke.org`)
kind: Rule
services:
- name: jellyfin
kind: Service
passHostHeader: false
port: 8096
# tls:
# certResolver: le-cf-issuer

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: traefik-metrics-custom
namespace: traefik
labels:
app.kubernetes.io/instance: traefik
app.kubernetes.io/name: traefik-metrics-custom
spec:
type: ClusterIP
ports:
- name: traefik-metrics
port: 9100
targetPort: metrics
protocol: TCP
selector:
app.kubernetes.io/instance: traefik

View File

@ -0,0 +1,20 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: traefik-monitor
namespace: traefik
labels:
app: traefik
release: kube-prometheus-stack
spec:
jobLabel: traefik-metrics
selector:
matchLabels:
app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/component: metrics
namespaceSelector:
matchNames:
- traefik
endpoints:
- port: metrics
path: /metrics

View File

@ -1,25 +1,25 @@
--- # ---
apiVersion: v1 # apiVersion: v1
kind: ConfigMap # kind: ConfigMap
metadata: # metadata:
name: coredns # name: coredns
namespace: kube-system # namespace: kube-system
annotations: # annotations:
fluxcd.io/ignore: "true" # fluxcd.io/ignore: "true"
data: # data:
Corefile: | # Corefile: |
.:53 { # .:53 {
errors # errors
health # health
ready # ready
kubernetes cluster.local in-addr.arpa ip6.arpa { # kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure # pods insecure
fallthrough in-addr.arpa ip6.arpa # fallthrough in-addr.arpa ip6.arpa
} # }
forward . 10.0.123.123 # forward . 10.0.123.123
cache 30 # cache 30
loop # loop
reload # reload
loadbalance # loadbalance
} # }

View File

@ -185,16 +185,10 @@ resource "ansible_host" "swarm_manager" {
count = var.manager_count count = var.manager_count
name = "${local.managers[count.index].name}.local" name = "${local.managers[count.index].name}.local"
groups = ["${var.swarm_name}_manager", var.swarm_name] groups = ["${var.swarm_name}_manager", var.swarm_name]
variables = {
ipv4_address = proxmox_virtual_environment_vm.swarm_manager[count.index].ipv4_addresses[1][0]
}
} }
resource "ansible_host" "swarm_worker" { resource "ansible_host" "swarm_worker" {
count = var.worker_count count = var.worker_count
name = "${local.workers[count.index].name}.local" name = "${local.workers[count.index].name}.local"
groups = ["${var.swarm_name}_worker", var.swarm_name] groups = ["${var.swarm_name}_worker", var.swarm_name]
variables = {
ipv4_address = proxmox_virtual_environment_vm.swarm_worker[count.index].ipv4_addresses[1][0]
}
} }