--- apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: traefik namespace: flux-system spec: interval: 10m releaseName: traefik targetNamespace: traefik install: crds: Skip chart: spec: chart: traefik sourceRef: kind: HelmRepository name: traefik namespace: flux-system interval: 10m values: globalArguments: - "--global.sendanonymoususage=false" - "--global.checknewversion=false" additionalArguments: - "--serversTransport.insecureSkipVerify=true" logs: general: level: INFO format: json access: enabled: true format: json fields: general: defaultmode: keep headers: defaultmode: keep deployment: enabled: true replicas: 3 annotations: {} podAnnotations: {} additionalContainers: [] initContainers: [] ports: web: middlewares: - traefik-bouncer@kubernetescrd port: 80 redirections: entryPoint: to: websecure scheme: https permanent: true websecure: middlewares: - traefik-bouncer@kubernetescrd port: 443 http3: enabled: true tls: enabled: true # Not publicly accessible though. To view it, run # `kubectl port-forward -n traefik $(kubectl get pods -n traefik --selector "app.kubernetes.io/name=traefik" --output=name | head -n1) 8080:8080` # and then visit http://127.0.0.1:8080 (the 127.0.0.1 CAN'T be interchanged # with localhost! idk why) ingressRoute: dashboard: enabled: true ingressClass: name: traefik providers: kubernetesCRD: enabled: true # I thought we should be specifying this, but if we do, it prevents # proxying external services from working for some reason. # ingressClass: traefik allowExternalNameServices: true allowCrossNamespace: true kubernetesIngress: enabled: true # Ditto above # ingressClass: traefik allowExternalNameServices: true publishedService: enabled: false rbac: enabled: true service: enabled: true type: LoadBalancer # This is needed to get the real IP address, otherwise Kubernetes SNAT # will mask it externalTrafficPolicy: Local annotations: {} labels: {} spec: loadBalancerIP: 10.0.185.128 loadBalancerSourceRanges: [] externalIPs: [] topologySpreadConstraints: # Force scheduler to put traefik pods on nodes where no other traefik pods # are scheduled. # This tries to counteract the effects of having a Local # externalTrafficPolicy, where Kubernetes only load balances on the node # level rather than the pod level. This means we should try to avoid # scheduling traefik pods on the same node to have more balanced # load balancing - labelSelector: matchLabels: app.kubernetes.io/name: '{{ template "traefik.name" . }}' maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule # tlsStore: # default: # defaultCertificate: # secretName: wildcard-mnke-org-tls experimental: plugins: crowdsec-bouncer-traefik-plugin: moduleName: "github.com/maxlerebourg/crowdsec-bouncer-traefik-plugin" version: "v1.3.3" # Mostly from https://github.com/traefik/traefik-helm-chart/blob/master/EXAMPLES.md#use-prometheus-operator metrics: prometheus: service: enabled: true disableAPICheck: false serviceMonitor: enabled: true # IMPORTANT: # This must match the kube-prometheus-stack release name additionalLabels: release: kube-prometheus-stack metricRelabelings: - sourceLabels: [__name__] separator: ; regex: ^fluentd_output_status_buffer_(oldest|newest)_.+ replacement: $1 action: drop relabelings: - sourceLabels: [__meta_kubernetes_pod_node_name] separator: ; regex: ^()$ targetLabel: nodename replacement: $1 action: replace jobLabel: traefik interval: 30s honorLabels: true prometheusRule: enabled: true # IMPORTANT: # This must match the kube-prometheus-stack release name additionalLabels: release: kube-prometheus-stack rules: - alert: TraefikDown expr: up{job="traefik"} == 0 for: 5m labels: context: traefik severity: warning annotations: summary: "Traefik Down" description: "{{ $labels.pod }} on {{ $labels.nodename }} is down"