diff --git a/.gitignore b/.gitignore
index 2217a41..7e36af7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,4 @@
 .env
+secrets.yaml
+secrets.yml
 venv
diff --git a/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2 b/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2
index 8c7a61a..fd5cd9f 100644
--- a/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2
+++ b/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2
@@ -1,6 +1,5 @@
 networks:
   traefik:
-    driver: overlay
     attachable: true
     name: traefik
 
diff --git a/k8s/helmfile.d/00-core.yaml b/k8s/helmfile.d/00-core.yaml
deleted file mode 100644
index 537f5d2..0000000
--- a/k8s/helmfile.d/00-core.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-repositories:
-  - name: traefik
-    url: https://helm.traefik.io/traefik
-  - name: jetstack
-    url: https://charts.jetstack.io
-  - name: rancher-stable
-    url: https://releases.rancher.com/server-charts/stable
-  - name: longhorn
-    url: https://charts.longhorn.io
-  - name: bitnami
-    url: https://charts.bitnami.com/bitnami
-  - name: nfs-subdir-external-provisioner
-    url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
-  - name: prometheus-community
-    url: https://prometheus-community.github.io/helm-charts
-
-environments:
-  staging: &staging
-    values:
-      - ./values/globals/staging.yaml.gotmpl
-  production:
-  default: *staging
-
----
-releases:
-  - name: traefik
-    namespace: {{ .Values.globals.traefik.namespace }}
-    createNamespace: true
-    chart: traefik/traefik
-    values:
-      - ./values/traefik/values.yaml.gotmpl
-
-  - name: cert-manager
-    namespace: {{ .Values.globals.certManager.namespace }}
-    createNamespace: true
-    chart: jetstack/cert-manager
-    values:
-      - ./values/cert-manager/values.yml
-
-  - name: certs
-    chart: ./charts/certs
-    needs:
-      - {{ .Values.globals.certManager.namespace }}/cert-manager
-    values:
-      - ./values/certs/values.yaml.gotmpl
-
-  - name: nfs-subdir-external-provisioner
-    namespace: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }}
-    createNamespace: true
-    chart: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }}/nfs-subdir-external-provisioner
-    values:
-      - ./values/nfs-subdir-external-provisioner/values.yaml.gotmpl
-
-  - name: rancher
-    namespace: {{ .Values.globals.rancher.namespace }}
-    createNamespace: true
-    chart: rancher-stable/rancher
-    needs:
-      - {{ .Values.globals.certManager.namespace }}/cert-manager
-    values:
-      - ./values/rancher/values.yaml.gotmpl
-
-  - name: longhorn
-    namespace: {{ .Values.globals.longhorn.namespace }}
-    createNamespace: true
-    chart: longhorn/longhorn
-    values:
-      - ./values/longhorn/values.yaml.gotmpl
-    needs:
-      - {{ .Values.globals.rancher.namespace }}/rancher
-
-
-  - name: kube-prometheus-stack
-    namespace: {{ .Values.globals.kubePrometheusStack.namespace }}
-    createNamespace: true
-    chart: prometheus-community/kube-prometheus-stack
-    needs:
-      - {{ .Values.globals.certManager.namespace }}/cert-manager
-      - {{ .Values.globals.longhorn.namespace }}/longhorn
-    values:
-      - ./values/kube-prometheus-stack/values.yaml.gotmpl
-
diff --git a/k8s/helmfile.d/01-databases.yaml b/k8s/helmfile.d/01-databases.yaml
deleted file mode 100644
index bcff3f4..0000000
--- a/k8s/helmfile.d/01-databases.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-repositories:
-  - name: bitnami
-    url: https://charts.bitnami.com/bitnami
-  - name: runix
-    url: https://helm.runix.net
-
-environments:
-  staging: &staging
-    values:
-      - ./values/globals/staging.yaml.gotmpl
-  production:
-  default: *staging
-
----
-releases:
-  - name: mysql
-    namespace: {{ .Values.globals.mysql.namespace }}
-    createNamespace: true
-    chart: bitnami/mysql
-    values:
-      - ./values/mysql/values.yaml.gotmpl
-
-  - name: phpmyadmin
-    namespace: {{ .Values.globals.phpmyadmin.namespace }}
-    createNamespace: true
-    chart: bitnami/phpmyadmin
-    values:
-      - ./values/phpmyadmin/values.yaml.gotmpl
-    needs:
-      - {{ .Values.globals.mysql.namespace }}/mysql
-
-  - name: postgres
-    namespace: {{ .Values.globals.postgres.namespace }}
-    createNamespace: true
-    chart: bitnami/postgresql
-    values:
-      - ./values/postgres/values.yaml.gotmpl
-
-  - name: pgadmin4
-    namespace: {{ .Values.globals.pgadmin4.namespace }}
-    createNamespace: true
-    chart: runix/pgadmin4
-    values:
-      - ./values/pgadmin4/values.yaml.gotmpl
-    needs:
-      - {{ .Values.globals.postgres.namespace }}/postgres
-
-  - name: init-dbs
-    # It doesn't really matter where we put this, but I don't want it polluting
-    # the default namespace
-    namespace: init-dbs
-    createNamespace: true
-    chart: ./charts/init-dbs
-    values:
-      - ./values/init-dbs/values.yaml.gotmpl
-    needs:
-      - {{ .Values.globals.postgres.namespace }}/postgres
-      - {{ .Values.globals.mysql.namespace }}/mysql
-
-  - name: redis
-    namespace: {{ .Values.globals.redis.namespace }}
-    createNamespace: true
-    chart: bitnami/redis
-    values:
-      - ./values/redis/values.yaml.gotmpl
-
diff --git a/k8s/helmfile.d/02-applications.yaml b/k8s/helmfile.d/02-applications.yaml
deleted file mode 100644
index 83d0de7..0000000
--- a/k8s/helmfile.d/02-applications.yaml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-repositories:
-  - name: gitlab
-    url: https://charts.gitlab.io
-  - name: bitnami
-    url: https://charts.bitnami.com/bitnami
-  - name: uptime-kuma
-    url: https://helm.irsigler.cloud
-  - name: authentik
-    url: https://charts.goauthentik.io
-  - name: harbor
-    url: https://helm.goharbor.io
-
-environments:
-  staging: &staging
-    values:
-      - ./values/globals/staging.yaml.gotmpl
-  production:
-  default: *staging
-
----
-releases:
-  - name: uptime-kuma
-    namespace: {{ .Values.globals.uptimeKuma.namespace }}
-    createNamespace: true
-    chart: uptime-kuma/uptime-kuma
-    values:
-      - ./values/uptime-kuma/values.yaml.gotmpl
-
-  - name: authentik
-    namespace: {{ .Values.globals.authentik.namespace }}
-    createNamespace: true
-    chart: authentik/authentik
-    values:
-      - ./values/authentik/values.yaml.gotmpl
-
-  - name: argo-cd
-    namespace: {{ .Values.globals.argocd.namespace }}
-    createNamespace: true
-    chart: bitnami/argo-cd
-    values:
-      - ./values/argo-cd/values.yaml.gotmpl
-
-  - name: harbor
-    namespace: {{ .Values.globals.harbor.namespace }}
-    createNamespace: true
-    chart: bitnami/harbor
-    values:
-      - ./values/harbor/values.yaml.gotmpl
-
-  - name: ghost
-    namespace: {{ .Values.globals.ghost.namespace }}
-    createNamespace: true
-    chart: bitnami/ghost
-    values:
-      - ./values/ghost/values.yaml.gotmpl
-
-  - name: gitea
-    installed: false
-    namespace: {{ .Values.globals.gitea.namespace }}
-    createNamespace: true
-    chart: bitnami/gitea
-    values:
-      - ./values/gitea/values.yaml.gotmpl
diff --git a/k8s/helmfile.d/charts/certs/.helmignore b/k8s/helmfile.d/charts/certs/.helmignore
deleted file mode 100644
index 0e8a0eb..0000000
--- a/k8s/helmfile.d/charts/certs/.helmignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*.orig
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/k8s/helmfile.d/charts/certs/Chart.yaml b/k8s/helmfile.d/charts/certs/Chart.yaml
deleted file mode 100644
index 06cd0fa..0000000
--- a/k8s/helmfile.d/charts/certs/Chart.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-apiVersion: v2
-name: certs
-description: A Helm chart for Kubernetes
-
-# A chart can be either an 'application' or a 'library' chart.
-#
-# Application charts are a collection of templates that can be packaged into versioned archives
-# to be deployed.
-#
-# Library charts provide useful utilities or functions for the chart developer. They're included as
-# a dependency of application charts to inject those utilities and functions into the rendering
-# pipeline. Library charts do not define any templates and therefore cannot be deployed.
-type: application
-
-# This is the chart version. This version number should be incremented each time you make changes
-# to the chart and its templates, including the app version.
-# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 0.1.0
-
-# This is the version number of the application being deployed. This version number should be
-# incremented each time you make changes to the application. Versions are not expected to
-# follow Semantic Versioning. They should reflect the version the application is using.
-# It is recommended to use it with quotes.
-appVersion: "1.16.0"
diff --git a/k8s/helmfile.d/charts/certs/templates/NOTES.txt b/k8s/helmfile.d/charts/certs/templates/NOTES.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/k8s/helmfile.d/charts/certs/templates/_helpers.tpl b/k8s/helmfile.d/charts/certs/templates/_helpers.tpl
deleted file mode 100644
index 27f9fc8..0000000
--- a/k8s/helmfile.d/charts/certs/templates/_helpers.tpl
+++ /dev/null
@@ -1,62 +0,0 @@
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "certs.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "certs.fullname" -}}
-{{- if .Values.fullnameOverride }}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- $name := default .Chart.Name .Values.nameOverride }}
-{{- if contains $name .Release.Name }}
-{{- .Release.Name | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-{{- end }}
-{{- end }}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "certs.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "certs.labels" -}}
-helm.sh/chart: {{ include "certs.chart" . }}
-{{ include "certs.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "certs.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "certs.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
-
-{{/*
-Create the name of the service account to use
-*/}}
-{{- define "certs.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create }}
-{{- default (include "certs.fullname" .) .Values.serviceAccount.name }}
-{{- else }}
-{{- default "default" .Values.serviceAccount.name }}
-{{- end }}
-{{- end }}
diff --git a/k8s/helmfile.d/charts/certs/templates/certificate.yml b/k8s/helmfile.d/charts/certs/templates/certificate.yml
deleted file mode 100644
index 64fd72a..0000000
--- a/k8s/helmfile.d/charts/certs/templates/certificate.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-apiVersion: cert-manager.io/v1
-kind: Certificate
-metadata:
-  name: {{ .Values.certificateName }}
-  namespace: {{ .Values.certificateNamespace }}
-spec:
-  secretName: {{ .Values.certificateSecretName }}
-  issuerRef:
-    name: {{ .Values.issuerName | quote }}
-    kind: ClusterIssuer
-  commonName: {{ .Values.commonName | quote }}
-  dnsNames:
-    {{- range .Values.dnsNames }}
-    - {{ . | quote }}
-    {{- end}}
-
diff --git a/k8s/helmfile.d/charts/certs/templates/cf-secret.yml b/k8s/helmfile.d/charts/certs/templates/cf-secret.yml
deleted file mode 100644
index a5db44b..0000000
--- a/k8s/helmfile.d/charts/certs/templates/cf-secret.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
-  name: {{ .Values.cloudflareTokenSecretName }}
-  namespace: {{ .Values.certManagerNamespace }}
-type: Opaque
-stringData:
-  cloudflare-token: {{ .Values.cloudflareSecretToken }}
-
diff --git a/k8s/helmfile.d/charts/certs/templates/clusterissuer.yml b/k8s/helmfile.d/charts/certs/templates/clusterissuer.yml
deleted file mode 100644
index bcb1f74..0000000
--- a/k8s/helmfile.d/charts/certs/templates/clusterissuer.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-apiVersion: cert-manager.io/v1
-kind: ClusterIssuer
-metadata:
-  name: {{ .Values.issuerName }}
-spec:
-  acme:
-    server: {{- if eq .Values.issuerMode "staging" }} "https://acme-staging-v02.api.letsencrypt.org/directory" {{- else }} "https://acme-v02.api.letsencrypt.org/directory" {{- end }}
-    email: {{ .Values.acmeEmail }}
-    privateKeySecretRef:
-      name: {{ .Values.privateKeySecretRef }}
-    solvers:
-      - dns01:
-          cloudflare:
-            email: {{ .Values.cloudflareEmail }}
-            apiTokenSecretRef:
-              name: {{ .Values.cloudflareTokenSecretName }}
-              key: cloudflare-token
-        selector:
-          dnsZones:
-            {{- range .Values.dnsZones }}
-            - {{ . | quote }}
-            {{- end}}
-
diff --git a/k8s/helmfile.d/charts/certs/values.yaml b/k8s/helmfile.d/charts/certs/values.yaml
deleted file mode 100644
index 97a1db8..0000000
--- a/k8s/helmfile.d/charts/certs/values.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-acmeEmail: tonydu121@hotmail.com
-cloudflareEmail: tonydu121@hotmail.com
-
-# staging or production
-issuerMode: staging
-
-issuerName: letsencrypt
-privateKeySecretRef: letsencrypt
-
-certManagerNamespace: cert-manager
-
-cloudflareSecretToken: redacted
-cloudflareTokenSecretName: cloudflare-token-secret
-
-dnsZones:
-  - "mnke.org"
-  - "*.mnke.org"
-  - "*.hl.mnke.org"
-
-# TODO: Allow for multiple creation
-certificateName: hl-mnke-org
-certificateNamespace: default
-certificateSecretName: hl-mnke-org-tls
-
-commonName: "*.hl.mnke.org"
-dnsNames:
-  - "hl.mnke.org"
-  - "*.hl.mnke.org"
diff --git a/k8s/helmfile.d/charts/init-dbs/.helmignore b/k8s/helmfile.d/charts/init-dbs/.helmignore
deleted file mode 100644
index 0e8a0eb..0000000
--- a/k8s/helmfile.d/charts/init-dbs/.helmignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*.orig
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/k8s/helmfile.d/charts/init-dbs/Chart.yaml b/k8s/helmfile.d/charts/init-dbs/Chart.yaml
deleted file mode 100644
index d530203..0000000
--- a/k8s/helmfile.d/charts/init-dbs/Chart.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-apiVersion: v2
-name: init-dbs
-description: A Helm chart for Kubernetes
-
-# A chart can be either an 'application' or a 'library' chart.
-#
-# Application charts are a collection of templates that can be packaged into versioned archives
-# to be deployed.
-#
-# Library charts provide useful utilities or functions for the chart developer. They're included as
-# a dependency of application charts to inject those utilities and functions into the rendering
-# pipeline. Library charts do not define any templates and therefore cannot be deployed.
-type: application
-
-# This is the chart version. This version number should be incremented each time you make changes
-# to the chart and its templates, including the app version.
-# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 0.1.0
-
-# This is the version number of the application being deployed. This version number should be
-# incremented each time you make changes to the application. Versions are not expected to
-# follow Semantic Versioning. They should reflect the version the application is using.
-# It is recommended to use it with quotes.
-appVersion: "1.16.0"
diff --git a/k8s/helmfile.d/charts/init-dbs/templates/NOTES.txt b/k8s/helmfile.d/charts/init-dbs/templates/NOTES.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/k8s/helmfile.d/charts/init-dbs/templates/_helpers.tpl b/k8s/helmfile.d/charts/init-dbs/templates/_helpers.tpl
deleted file mode 100644
index c58e41b..0000000
--- a/k8s/helmfile.d/charts/init-dbs/templates/_helpers.tpl
+++ /dev/null
@@ -1,51 +0,0 @@
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "init-dbs.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "init-dbs.fullname" -}}
-{{- if .Values.fullnameOverride }}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- $name := default .Chart.Name .Values.nameOverride }}
-{{- if contains $name .Release.Name }}
-{{- .Release.Name | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-{{- end }}
-{{- end }}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "init-dbs.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "init-dbs.labels" -}}
-helm.sh/chart: {{ include "init-dbs.chart" . }}
-{{ include "init-dbs.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "init-dbs.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "init-dbs.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
diff --git a/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml b/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml
deleted file mode 100644
index 32403a3..0000000
--- a/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-{{- range .Values.postgres.databases }}
----
-apiVersion: batch/v1
-kind: Job
-metadata:
-  name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }}
-spec:
-  template:
-    spec:
-      imagePullSecrets:
-        {{- toYaml $.Values.imagePullSecrets | nindent 8 }}
-      restartPolicy: OnFailure
-      containers:
-      - name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }}
-        image: {{ $.Values.postgres.image.ref }}
-        imagePullPolicy: {{ $.Values.postgres.image.pullPolicy }}
-        command:
-          - /bin/sh
-          - -c
-        args:
-          # If the username and database exists, whatever, just exit.
-          # Yeah, if something else went wrong, we're still exiting with code 0,
-          # but it should be fine.
-          - |
-            sleep 10s && \
-              psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \
-              -tc "SELECT 1 FROM pg_database WHERE datname = '$DATABASE'" |\
-              grep -q 1 ||\
-              psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \
-                -c "CREATE USER $USERNAME WITH ENCRYPTED PASSWORD '$PASSWORD';" \
-                -c "CREATE DATABASE $DATABASE WITH OWNER = $USERNAME;"
-        env:
-          - name: PGUSER
-            valueFrom:
-              secretKeyRef:
-                key: username
-                name: {{ include "init-dbs.fullname" $ }}-postgres
-          - name: PGPASSWORD
-            valueFrom:
-              secretKeyRef:
-                key: password
-                name: {{ include "init-dbs.fullname" $ }}-postgres
-          - name: USERNAME
-            valueFrom:
-              secretKeyRef:
-                key: {{ .database }}-username
-                name: {{ include "init-dbs.fullname" $ }}-postgres
-          - name: PASSWORD
-            valueFrom:
-              secretKeyRef:
-                key: {{ .database }}-password
-                name: {{ include "init-dbs.fullname" $ }}-postgres
-          - name: DATABASE
-            valueFrom:
-              secretKeyRef:
-                key: {{ .database }}-database
-                name: {{ include "init-dbs.fullname" $ }}-postgres
-{{- end }}
-
-{{- range .Values.mysql.databases }}
----
-apiVersion: batch/v1
-kind: Job
-metadata:
-  name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }}
-spec:
-  template:
-    spec:
-      imagePullSecrets:
-        {{- toYaml $.Values.imagePullSecrets | nindent 8 }}
-      restartPolicy: OnFailure
-      containers:
-      - name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }}
-        image: {{ $.Values.mysql.image.ref }}
-        imagePullPolicy: {{ $.Values.mysql.image.pullPolicy }}
-        command:
-          - /bin/sh
-          - -c
-        args:
-          - |
-              sleep 10s && \
-              mysql -h {{ $.Values.mysql.host }} -u $MYUSER mysql --password=$MYPASSWORD \
-              -e "CREATE DATABASE IF NOT EXISTS $DATABASE;" \
-              -e "CREATE USER IF NOT EXISTS '$USERNAME'@'%' IDENTIFIED BY '$PASSWORD';" \
-              -e "GRANT ALL PRIVILEGES ON $DATABASE TO '$USERNAME'@'%';"
-        env:
-          - name: MYUSER
-            valueFrom:
-              secretKeyRef:
-                key: username
-                name: {{ include "init-dbs.fullname" $ }}-mysql
-          - name: MYPASSWORD
-            valueFrom:
-              secretKeyRef:
-                key: password
-                name: {{ include "init-dbs.fullname" $ }}-mysql
-          - name: USERNAME
-            valueFrom:
-              secretKeyRef:
-                key: {{ .database }}-username
-                name: {{ include "init-dbs.fullname" $ }}-mysql
-          - name: PASSWORD
-            valueFrom:
-              secretKeyRef:
-                key: {{ .database }}-password
-                name: {{ include "init-dbs.fullname" $ }}-mysql
-          - name: DATABASE
-            valueFrom:
-              secretKeyRef:
-                key: {{ .database }}-database
-                name: {{ include "init-dbs.fullname" $ }}-mysql
-{{- end }}
diff --git a/k8s/helmfile.d/charts/init-dbs/templates/secrets.yaml b/k8s/helmfile.d/charts/init-dbs/templates/secrets.yaml
deleted file mode 100644
index 2f4a021..0000000
--- a/k8s/helmfile.d/charts/init-dbs/templates/secrets.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
-  name: {{ include "init-dbs.fullname" $ }}-postgres
-  labels:
-    {{- include "init-dbs.labels" $ | nindent 4 }}
-type: Opaque
-stringData:
-  username: {{ .Values.postgres.username }}
-  password: {{ .Values.postgres.password }}
-  {{- range .Values.postgres.databases }}
-  {{ .database }}-database: {{ .database }}
-  {{ .database }}-username: {{ .username }}
-  {{ .database }}-password: {{ .password }}
-  {{- end }}
-
----
-apiVersion: v1
-kind: Secret
-metadata:
-  name: {{ include "init-dbs.fullname" $ }}-mysql
-  labels:
-    {{- include "init-dbs.labels" $ | nindent 4 }}
-type: Opaque
-stringData:
-  username: {{ .Values.mysql.username }}
-  password: {{ .Values.mysql.password }}
-  {{- range .Values.mysql.databases }}
-  {{ .database }}-database: {{ .database }}
-  {{ .database }}-username: {{ .username }}
-  {{ .database }}-password: {{ .password }}
-  {{- end }}
diff --git a/k8s/helmfile.d/charts/init-dbs/values.yaml b/k8s/helmfile.d/charts/init-dbs/values.yaml
deleted file mode 100644
index 2c53126..0000000
--- a/k8s/helmfile.d/charts/init-dbs/values.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-imagePullSecrets: []
-# This is to override the chart name.
-nameOverride: ""
-fullnameOverride: ""
-
-postgres:
-  image:
-    ref: postgres
-    # This sets the pull policy for images.
-    pullPolicy: IfNotPresent
-  host: ""
-  username: postgres
-  password: ""
-  databases:
-    - database: test
-      username: test
-      password: test
-    - database: test1
-      username: test1
-      password: test1
-mysql:
-  image:
-    ref: mysql
-    # This sets the pull policy for images.
-    pullPolicy: IfNotPresent
-  host: ""
-  username: root
-  password: ""
-  databases:
-    - database: test
-      username: test
-      password: test
-    - database: test1
-      username: test1
-      password: test1
diff --git a/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl b/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl
deleted file mode 100644
index f89abd7..0000000
--- a/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl
+++ /dev/null
@@ -1,4190 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
-
-## @param global.imageRegistry Global Docker image registry
-## @param global.imagePullSecrets Global Docker registry secret names as an array
-## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
-##
-global:
-  imageRegistry: ""
-  ## E.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  defaultStorageClass: {{ .Values.globals.argocd.storageClass }}
-  storageClass: ""
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: auto
-## @section Common parameters
-
-## @param kubeVersion Override Kubernetes version
-##
-kubeVersion: ""
-## @param nameOverride String to partially override common.names.fullname
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override common.names.fullname
-##
-fullnameOverride: ""
-## @param commonLabels Labels to add to all deployed objects
-##
-commonLabels: {}
-## @param commonAnnotations Annotations to add to all deployed objects
-##
-commonAnnotations: {}
-## @param clusterDomain Kubernetes cluster domain name
-##
-clusterDomain: cluster.local
-## @param extraDeploy Array of extra objects to deploy with the release
-##
-extraDeploy: []
-## @section Argo CD image parameters
-
-## Bitnami Argo CD image
-## ref: https://hub.docker.com/r/bitnami/argo-cd/tags/
-## @param image.registry [default: REGISTRY_NAME] Argo CD image registry
-## @param image.repository [default: REPOSITORY_NAME/argo-cd] Argo CD image repository
-## @skip image.tag Argo CD image tag (immutable tags are recommended)
-## @param image.digest Argo CD image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-## @param image.pullPolicy Argo CD image pull policy
-## @param image.pullSecrets Argo CD image pull secrets
-## @param image.debug Enable Argo CD image debug mode
-##
-image:
-  registry: docker.io
-  repository: bitnami/argo-cd
-  tag: 2.13.4-debian-12-r0
-  digest: ""
-  ## Specify a imagePullPolicy
-  ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ## e.g:
-  ## pullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  pullSecrets: []
-  ## Enable debug mode
-  ##
-  debug: false
-## @section Argo CD application controller parameters
-
-## Argo CD Controller
-##
-controller:
-  ## @param controller.kind Kind to deploy ArgoCD application controller in.
-  ## Use either StatefulSet or Deployment (default). StatefulSet is required when running in HA mode.
-  ## ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/high_availability/
-  ##
-  kind: Deployment
-  ## @param controller.replicaCount Number of Argo CD replicas to deploy
-  ##
-  replicaCount: 1
-  ## Configure extra options for Argo CD containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param controller.startupProbe.enabled Enable startupProbe on Argo CD nodes
-  ## @param controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param controller.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param controller.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param controller.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param controller.livenessProbe.enabled Enable livenessProbe on Argo CD nodes
-  ## @param controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param controller.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param controller.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param controller.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param controller.readinessProbe.enabled Enable readinessProbe on Argo CD nodes
-  ## @param controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param controller.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param controller.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param controller.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param controller.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param controller.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param controller.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Argo CD resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "micro"
-  ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param controller.podSecurityContext.enabled Enabled Argo CD pods' Security Context
-  ## @param controller.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param controller.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param controller.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param controller.podSecurityContext.fsGroup Set Argo CD pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param controller.containerSecurityContext.enabled Enabled Argo CD containers' Security Context
-  ## @param controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param controller.containerSecurityContext.runAsUser Set Argo CD containers' Security Context runAsUser
-  ## @param controller.containerSecurityContext.runAsGroup Set Argo CD containers' Security Context runAsGroup
-  ## @param controller.containerSecurityContext.allowPrivilegeEscalation Set Argo CD containers' Security Context allowPrivilegeEscalation
-  ## @param controller.containerSecurityContext.capabilities.drop Set Argo CD containers' Security Context capabilities to be dropped
-  ## @param controller.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' Security Context readOnlyRootFilesystem
-  ## @param controller.containerSecurityContext.runAsNonRoot Set Argo CD container's Security Context runAsNonRoot
-  ## @param controller.containerSecurityContext.privileged Set controller container's Security Context privileged
-  ## @param controller.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    privileged: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## ServiceAccount configuration for the Argo CD application controller
-  ##
-  serviceAccount:
-    ## @param controller.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: true
-    ## @param controller.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param controller.serviceAccount.automountServiceAccountToken Automount service account token for the application controller service account
-    ##
-    automountServiceAccountToken: false
-    ## @param controller.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-    ##
-    annotations: {}
-  ## Enable admin clusterrole resources. Allows to Argo CD to deploy to the K8s cluster
-  ## @param controller.clusterAdminAccess Enable K8s cluster admin access for the application controller
-  ##
-  clusterAdminAccess: true
-  ## Enable Custom Rules for the Application Controller cluster role
-  ## @param controller.clusterRoleRules Use custom rules for the application controller's cluster role
-  ##
-  clusterRoleRules: []
-  ## Argo CD application controller log format: text|json
-  ## @param controller.logFormat Format for the Argo CD application controller logs. Options: [text, json]
-  ##
-  logFormat: text
-  ## Argo CD application controller log level
-  ## @param controller.logLevel Log level for the Argo CD application controller
-  ##
-  logLevel: info
-  ## Argo CD application controller ports
-  ## @param controller.containerPorts.metrics Argo CD application controller metrics port number
-  containerPorts:
-    metrics: 8082
-  ## Argo CD application controller service parameters
-  ##
-  service:
-    ## @param controller.service.type Argo CD service type
-    ##
-    type: ClusterIP
-    ## @param controller.service.ports.metrics Argo CD application controller service port
-    ##
-    ports:
-      metrics: 8082
-    ## Node ports to expose
-    ## @param controller.service.nodePorts.metrics Node port for Argo CD application controller service
-    ## NOTE: choose port between <30000-32767>
-    ##
-    nodePorts:
-      metrics: ""
-    ## @param controller.service.clusterIP Argo CD application controller service Cluster IP
-    ## e.g.:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param controller.service.loadBalancerIP Argo CD application controller service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerIP: ""
-    ## @param controller.service.loadBalancerSourceRanges Argo CD application controller service Load Balancer sources
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g:
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param controller.service.externalTrafficPolicy Argo CD application controller service external traffic policy
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param controller.service.annotations Additional custom annotations for Argo CD application controller service
-    ##
-    annotations: {}
-    ## @param controller.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param controller.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param controller.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    sessionAffinityConfig: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param controller.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param controller.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param controller.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param controller.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security)
-    ##
-    kubeAPIServerPorts: [443, 6443, 8443]
-    ## @param controller.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param controller.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true)
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param controller.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param controller.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## Metrics configuration for Argo CD application controller
-  ##
-  metrics:
-    ## @param controller.metrics.enabled Enable Argo CD application controller metrics
-    ##
-    enabled: true
-    service:
-      ## @param controller.metrics.service.type Argo CD application controller service type
-      ##
-      type: ClusterIP
-      ## @param controller.metrics.service.ports.metrics Argo CD application controller metrics service port
-      ##
-      ports:
-        metrics: 8082
-      ## Node ports to expose
-      ## @param controller.metrics.service.nodePorts.metrics Node port for the application controller service
-      ## NOTE: choose port between <30000-32767>
-      ##
-      nodePorts:
-        metrics: ""
-      ## @param controller.metrics.service.clusterIP Argo CD application controller metrics service Cluster IP
-      ## e.g.:
-      ## clusterIP: None
-      ##
-      clusterIP: ""
-      ## @param controller.metrics.service.loadBalancerIP Argo CD application controller service Load Balancer IP
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-      ##
-      loadBalancerIP: ""
-      ## @param controller.metrics.service.loadBalancerSourceRanges Argo CD application controller service Load Balancer sources
-      ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-      ## e.g:
-      ## loadBalancerSourceRanges:
-      ##   - 10.10.10.0/24
-      ##
-      loadBalancerSourceRanges: []
-      ## @param controller.metrics.service.externalTrafficPolicy Argo CD application controller service external traffic policy
-      ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-      ##
-      externalTrafficPolicy: Cluster
-      ## @param controller.metrics.service.annotations Additional custom annotations for Argo CD application controller service
-      ##
-      annotations: {}
-      ## @param controller.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-      ## If "ClientIP", consecutive client requests will be directed to the same Pod
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-      ##
-      sessionAffinity: None
-      ## @param controller.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
-      ## sessionAffinityConfig:
-      ##   clientIP:
-      ##     timeoutSeconds: 300
-      sessionAffinityConfig: {}
-    ## Argo CD application controller metrics service monitor configuration
-    ##
-    serviceMonitor:
-      ## @param controller.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-      ##
-      enabled: false
-      ## @param controller.metrics.serviceMonitor.namespace Namespace which Prometheus is running in
-      ## e.g:
-      ## namespace: monitoring
-      ##
-      namespace: ""
-      ## @param controller.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-      ##
-      jobLabel: ""
-      ## @param controller.metrics.serviceMonitor.interval Interval at which metrics should be scraped
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      interval: 30s
-      ## @param controller.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      scrapeTimeout: 10s
-      ## @param controller.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      ## @param controller.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      ## @param controller.metrics.serviceMonitor.selector ServiceMonitor selector labels
-      ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-      ##
-      ## selector:
-      ##   prometheus: my-prometheus
-      ##
-      selector: {}
-      ## @param controller.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
-      ##
-      honorLabels: false
-    ## Rules for PrometheusRule object if enabled
-    ##
-    ## E.g.
-    ## @param controller.metrics.rules.enabled Enable render extra rules for PrometheusRule object
-    ## @param controller.metrics.rules.spec Rules to render into the PrometheusRule object
-    ## @param controller.metrics.rules.selector Selector for the PrometheusRule object
-    ## @param controller.metrics.rules.namespace Namespace where to create the PrometheusRule object
-    ## @param controller.metrics.rules.additionalLabels Additional lables to add to the PrometheusRule object
-    ##
-    rules:
-      enabled: false
-      ## E.g
-      ## - alert: ArgoAppMissing
-      ##   expr: |
-      ##     absent(argocd_app_info)
-      ##   for: 15m
-      ##   labels:
-      ##     severity: critical
-      ##   annotations:
-      ##     summary: "[ArgoCD] No reported applications"
-      ##     description: >
-      ##       ArgoCD has not reported any applications data for the past 15 minutes which
-      ##       means that it must be down or not functioning properly.  This needs to be
-      ##       resolved for this cloud to continue to maintain state.
-      ## - alert: ArgoAppNotSynced
-      ##   expr: |
-      ##     argocd_app_info{sync_status!="Synced"} == 1
-      ##   for: 12h
-      ##   labels:
-      ##     severity: warning
-      ##   annotations:
-      ##     summary: "[{{`{{ $labels.name }}`}}] Application not synchronized"
-      ##     description: >
-      ##       The application [{{`{{ $labels.name }}`}} has not been synchronized for over
-      ##       12 hours which means that the state of this cloud has drifted away from the
-      ##       state inside Git.
-      ##
-      spec: []
-      ## E.g
-      ## selector:
-      ##   prometheus: kube-prometheus
-      ##
-      selector: {}
-      namespace: monitoring
-      additionalLabels: {}
-  ## @param controller.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## Arguments that will be used by default for the application controller
-  ## @param controller.defaultArgs.statusProcessors Default status processors for Argo CD controller
-  ## @param controller.defaultArgs.operationProcessors Default operation processors for Argo CD controller
-  ## @param controller.defaultArgs.appResyncPeriod Default application resync period for Argo CD controller
-  ## @param controller.defaultArgs.selfHealTimeout Default self heal timeout for Argo CD controller
-  ##
-  defaultArgs:
-    statusProcessors: "20"
-    operationProcessors: "10"
-    appResyncPeriod: "180"
-    selfHealTimeout: "5"
-  ## @param controller.args Override default container args (useful when using custom images). Overrides the defaultArgs.
-  ##
-  args: []
-  ## @param controller.extraArgs Add extra arguments to the default arguments for the Argo CD controller
-  ##
-  extraArgs: []
-  ## ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution/
-  ##
-  dynamicClusterDistribution:
-    ## @param controller.dynamicClusterDistribution.enabled Whether dynamic cluster distribution is enabled.
-    ##
-    enabled: false
-    ## @param controller.dynamicClusterDistribution.heartbeatDuration Time to update the cluster sharding (defaults to 10 seconds).
-    ## ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution/#working-of-dynamic-distribution
-    ##
-    heartbeatDuration: ""
-  ## @param controller.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: true
-  ## @param controller.hostAliases Argo CD pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param controller.podLabels Extra labels for Argo CD pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param controller.podAnnotations Annotations for Argo CD pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param controller.podAffinityPreset Pod affinity preset. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param controller.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node controller.affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param controller.nodeAffinityPreset.type Node affinity preset type. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param controller.nodeAffinityPreset.key Node label key to match. Ignored if `controller.affinity` is set
-    ##
-    key: ""
-    ## @param controller.nodeAffinityPreset.values Node label values to match. Ignored if `controller.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param controller.affinity Affinity for Argo CD pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `controller.podAffinityPreset`, `controller.podAntiAffinityPreset`, and `controller.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param controller.nodeSelector Node labels for Argo CD pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param controller.tolerations Tolerations for Argo CD pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param controller.schedulerName Name of the k8s scheduler (other than default)
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param controller.shareProcessNamespace Enable shared process namespace in a pod.
-  ## If set to false (default), each container will run in separate namespace, controller will have PID=1.
-  ## If set to true, the /pause will run as init process and will reap any zombie PIDs,
-  ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds.
-  ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating.
-  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
-  ##
-  shareProcessNamespace: false
-  ## @param controller.topologySpreadConstraints Topology Spread Constraints for pod assignment
-  ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## The value is evaluated as a template
-  ##
-  topologySpreadConstraints: []
-  ## @param controller.updateStrategy.type Argo CD statefulset strategy type
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    ## StrategyType
-    ## Can be set to RollingUpdate or OnDelete
-    ##
-    type: RollingUpdate
-  ## @param controller.priorityClassName Argo CD pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param controller.runtimeClassName Name of the runtime class to be used by pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/
-  ##
-  runtimeClassName: ""
-  ## @param controller.lifecycleHooks for the Argo CD container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param controller.podManagementPolicy podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
-  ##
-  podManagementPolicy: ""
-  ## @param controller.extraEnvVars Array with extra environment variables to add to Argo CD nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param controller.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param controller.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param controller.extraVolumes Optionally specify extra list of additional volumes for the Argo CD pod(s)
-  ##
-  extraVolumes: []
-  ## @param controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param controller.sidecars Add additional sidecar containers to the Argo CD pod(s)
-  ## e.g:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param controller.initContainers Add additional init containers to the Argo CD pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-  ## e.g:
-  ## initContainers:
-  ##  - name: your-image-name
-  ##    image: your-image
-  ##    imagePullPolicy: Always
-  ##    command: ['sh', '-c', 'echo "hello world"']
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param controller.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param controller.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param controller.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `controller.pdb.minAvailable` and `controller.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-## @section Argo CD ApplicationSet controller parameters
-
-## ApplicationSet controller
-##
-applicationSet:
-  ## @param applicationSet.enabled Enable ApplicationSet controller
-  ##
-  enabled: false
-  ## @param applicationSet.replicaCount The number of ApplicationSet controller pods to run
-  ##
-  replicaCount: 1
-  ## @param applicationSet.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## Arguments that will be used by default for the application controller
-  ## @param applicationSet.defaultArgs.enableLeaderElection Enable leader election
-  ## @param applicationSet.defaultArgs.policy Default policy
-  ## @param applicationSet.defaultArgs.debug Enable debug mode
-  ## @param applicationSet.defaultArgs.dryRun Enable dry-run mode
-  ##
-  defaultArgs:
-    enableLeaderElection: false
-    policy: "sync"
-    debug: false
-    dryRun: false
-  ## @param applicationSet.args Override default container args (useful when using custom images). Overrides the defaultArgs.
-  ##
-  args: []
-  ## @param applicationSet.extraArgs Add extra arguments to the default arguments for the Argo CD applicationSet controller
-  ##
-  extraArgs: []
-  ## Argo CD applicationSet controller log format: text|json
-  ## @param applicationSet.logFormat Format for the Argo CD applicationSet controller logs. Options: [text, json]
-  ##
-  logFormat: text
-  ## Argo CD applicationSet controller log level
-  ## @param applicationSet.logLevel Log level for the Argo CD applicationSet controller
-  ##
-  logLevel: info
-  ## Argo CD applicationSet controller ports
-  ## @param applicationSet.containerPorts.metrics Argo CD applicationSet controller metrics port number
-  ## @param applicationSet.containerPorts.probe Argo CD applicationSet controller probe port number
-  ##
-  containerPorts:
-    metrics: 8085
-    probe: 8081
-  ## Metrics configuration for Argo CD applicationSet controller
-  ##
-  metrics:
-    ## @param applicationSet.metrics.enabled Enable Argo CD applicationSet controller metrics
-    ##
-    enabled: false
-    service:
-      ## @param applicationSet.metrics.service.type Argo CD applicationSet controller service type
-      ##
-      type: ClusterIP
-      ## @param applicationSet.metrics.service.ports.metrics Argo CD applicationSet controller metrics service port
-      ##
-      ports:
-        metrics: 8085
-      ## Node ports to expose
-      ## @param applicationSet.metrics.service.nodePorts.metrics Node port for the applicationSet controller service
-      ## NOTE: choose port between <30000-32767>
-      ##
-      nodePorts:
-        metrics: ""
-      ## @param applicationSet.metrics.service.clusterIP Argo CD applicationSet controller metrics service Cluster IP
-      ## e.g.:
-      ## clusterIP: None
-      ##
-      clusterIP: ""
-      ## @param applicationSet.metrics.service.loadBalancerIP Argo CD applicationSet controller service Load Balancer IP
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-      ##
-      loadBalancerIP: ""
-      ## @param applicationSet.metrics.service.loadBalancerSourceRanges Argo CD applicationSet controller service Load Balancer sources
-      ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-      ## e.g:
-      ## loadBalancerSourceRanges:
-      ##   - 10.10.10.0/24
-      ##
-      loadBalancerSourceRanges: []
-      ## @param applicationSet.metrics.service.externalTrafficPolicy Argo CD applicationSet controller service external traffic policy
-      ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-      ##
-      externalTrafficPolicy: Cluster
-      ## @param applicationSet.metrics.service.annotations Additional custom annotations for Argo CD applicationSet controller service
-      ##
-      annotations: {}
-      ## @param applicationSet.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-      ## If "ClientIP", consecutive client requests will be directed to the same Pod
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-      ##
-      sessionAffinity: None
-      ## @param applicationSet.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
-      ## sessionAffinityConfig:
-      ##   clientIP:
-      ##     timeoutSeconds: 300
-      ##
-      sessionAffinityConfig: {}
-    ## Argo CD applicationSet controller metrics service monitor configuration
-    ##
-    serviceMonitor:
-      ## @param applicationSet.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-      ##
-      enabled: false
-      ## @param applicationSet.metrics.serviceMonitor.namespace Namespace which Prometheus is running in
-      ## e.g:
-      ## namespace: monitoring
-      ##
-      namespace: ""
-      ## @param applicationSet.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-      ##
-      jobLabel: ""
-      ## @param applicationSet.metrics.serviceMonitor.interval Interval at which metrics should be scraped
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      interval: 30s
-      ## @param applicationSet.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      scrapeTimeout: 10s
-      ## @param applicationSet.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      ## @param applicationSet.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      ## @param applicationSet.metrics.serviceMonitor.selector ServiceMonitor selector labels
-      ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-      ##
-      ## selector:
-      ##   prometheus: my-prometheus
-      ##
-      selector: {}
-      ## @param applicationSet.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
-      ##
-      honorLabels: false
-  ## Argo CD applicationSet controller service parameters
-  ##
-  service:
-    ## @param applicationSet.service.type Argo CD applicationSet controller service type
-    ##
-    type: ClusterIP
-    ## @param applicationSet.service.ports.webhook Argo CD applicationSet controller service port
-    ##
-    ports:
-      webhook: 7000
-    ## Node ports to expose
-    ## @param applicationSet.service.nodePorts.webhook Node port for Argo CD applicationSet controller service
-    ## NOTE: choose port between <30000-32767>
-    ##
-    nodePorts:
-      webhook: ""
-    ## @param applicationSet.service.clusterIP Argo CD applicationSet controller service Cluster IP
-    ## e.g.:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param applicationSet.service.loadBalancerIP Argo CD applicationSet controller service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerIP: ""
-    ## @param applicationSet.service.loadBalancerSourceRanges Argo CD applicationSet controller service Load Balancer sources
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g:
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param applicationSet.service.externalTrafficPolicy Argo CD applicationSet controller service external traffic policy
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param applicationSet.service.annotations Additional custom annotations for Argo CD applicationSet controller service
-    ##
-    annotations: {}
-    ## @param applicationSet.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param applicationSet.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param applicationSet.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param applicationSet.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param applicationSet.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param applicationSet.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param applicationSet.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security)
-    ##
-    kubeAPIServerPorts: [443, 6443, 8443]
-    ## @param applicationSet.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param applicationSet.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true)
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param applicationSet.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param applicationSet.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## ServiceAccount configuration for the Argo CD applicationSet controller
-  ##
-  serviceAccount:
-    ## @param applicationSet.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: true
-    ## @param applicationSet.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param applicationSet.serviceAccount.automountServiceAccountToken Automount service account token for the applicationSet controller service account
-    ##
-    automountServiceAccountToken: false
-    ## @param applicationSet.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-    ##
-    annotations: {}
-  ## Enable admin clusterrole resources. Allows Argo CD applicationSet controller to have access to multiple namespaces
-  ## @param applicationSet.clusterAdminAccess Enable K8s cluster admin access for the application controller
-  ##
-  clusterAdminAccess: false
-  ## Enable Custom Rules for Argo CD applicationSet controller cluster role
-  ## @param applicationSet.clusterRoleRules Use custom rules for Argo CD applicationSet controller's cluster role
-  ##
-  clusterRoleRules: []
-  ## @param applicationSet.podAffinityPreset Pod affinity preset. Ignored if `applicationSet.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param applicationSet.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `applicationSet.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node applicationSet.affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param applicationSet.nodeAffinityPreset.type Node affinity preset type. Ignored if `applicationSet.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param applicationSet.nodeAffinityPreset.key Node label key to match. Ignored if `applicationSet.affinity` is set
-    ##
-    key: ""
-    ## @param applicationSet.nodeAffinityPreset.values Node label values to match. Ignored if `applicationSet.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param applicationSet.affinity Affinity for Argo CD applicationSet controller pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `applicationSet.podAffinityPreset`, `applicationSet.podAntiAffinityPreset`, and `applicationSet.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param applicationSet.podAnnotations Annotations for Argo CD applicationSet controller pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param applicationSet.podLabels Extra labels for Argo CD applicationSet controller pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param applicationSet.containerSecurityContext.enabled Enabled Argo CD applicationSet controller containers' Security Context
-  ## @param applicationSet.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param applicationSet.containerSecurityContext.runAsUser Set Argo CD applicationSet controller containers' Security Context runAsUser
-  ## @param applicationSet.containerSecurityContext.runAsGroup Set Argo CD applicationSet controller containers' Security Context runAsGroup
-  ## @param applicationSet.containerSecurityContext.allowPrivilegeEscalation Set Argo CD applicationSet controller containers' Security Context allowPrivilegeEscalation
-  ## @param applicationSet.containerSecurityContext.capabilities.drop Set Argo CD applicationSet controller containers' Security Context capabilities to be dropped
-  ## @param applicationSet.containerSecurityContext.readOnlyRootFilesystem Set Argo CD applicationSet controller containers' Security Context readOnlyRootFilesystem
-  ## @param applicationSet.containerSecurityContext.runAsNonRoot Set Argo CD applicationSet controller container's Security Context runAsNonRoot
-  ## @param applicationSet.containerSecurityContext.privileged Set applicationSet container's Security Context privileged
-  ## @param applicationSet.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    privileged: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param applicationSet.livenessProbe.enabled Enable livenessProbe on Argo CD applicationSet controller nodes
-  ## @param applicationSet.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param applicationSet.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param applicationSet.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param applicationSet.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param applicationSet.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param applicationSet.readinessProbe.enabled Enable readinessProbe on Argo CD applicationSet controller nodes
-  ## @param applicationSet.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param applicationSet.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param applicationSet.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param applicationSet.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param applicationSet.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param applicationSet.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param applicationSet.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Argo CD applicationSet controller resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param applicationSet.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if applicationSet.resources is set (applicationSet.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param applicationSet.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param applicationSet.podSecurityContext.enabled Enabled Argo CD applicationSet controller pods' Security Context
-  ## @param applicationSet.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param applicationSet.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param applicationSet.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param applicationSet.podSecurityContext.fsGroup Set Argo CD applicationSet controller pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## @param applicationSet.nodeSelector Node labels for Argo CD applicationSet controller pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param applicationSet.tolerations Tolerations for Argo CD applicationSet controller pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param applicationSet.updateStrategy.type Argo CD applicationSet controller statefulset strategy type
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    ## StrategyType
-    ## Can be set to RollingUpdate or OnDelete
-    ##
-    type: RollingUpdate
-  ## @param applicationSet.priorityClassName Argo CD applicationSet controller pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param applicationSet.extraVolumes Optionally specify extra list of additional volumes for the Argo CD applicationSet controller pod(s)
-  ##
-  extraVolumes: []
-  ## @param applicationSet.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD applicationSet controller container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param applicationSet.extraEnvVars Array with extra environment variables to add to Argo CD applicationSet controller nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param applicationSet.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD applicationSet controller nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param applicationSet.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD applicationSet controller nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## Webhook for the Git Generator
-  ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration)
-  ##
-  webhook:
-    ingress:
-      ## @param applicationSet.webhook.ingress.enabled Enable an ingress resource for Webhooks
-      ##
-      enabled: false
-      ## DEPRECATED: Use server.ingress.annotations instead of server.ingress.certManager
-      ## certManager: false
-      ##
-      ## @param applicationSet.webhook.ingress.annotations Additional ingress annotations
-      ##
-      annotations: {}
-      ## @param applicationSet.webhook.ingress.labels Additional ingress labels
-      ##
-      labels: {}
-      ## @param applicationSet.webhook.ingress.ingressClassName Defines which ingress controller will implement the resource
-      ##
-      ingressClassName: ""
-      ## @param applicationSet.webhook.ingress.hostname Ingress hostname for the Argo CD applicationSet ingress
-      ## Hostname must be provided if Ingress is enabled.
-      ##
-      hostname: ""
-      ## @param applicationSet.webhook.ingress.path Argo CD applicationSet ingress path
-      ##
-      path: /api/webhook
-      ## @param applicationSet.webhook.ingress.pathType Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
-      ##
-      pathType: Prefix
-      ## @param applicationSet.webhook.ingress.extraHosts Extra hosts array for the Argo CD applicationSet ingress
-      ## The list of additional hostnames to be covered with this ingress record.
-      ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
-      ##
-      extraHosts: []
-      ## @param applicationSet.webhook.ingress.extraPaths Extra paths for the Argo CD applicationSet ingress
-      ## Any additional arbitrary paths that may need to be added to the ingress under the main host.
-      ## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
-      ##
-      extraPaths: []
-      ## - path: /*
-      ##   backend:
-      ##     serviceName: ssl-redirect
-      ##     servicePort: use-annotation
-      ##
-      ## @param applicationSet.webhook.ingress.extraTls Extra TLS configuration for the Argo CD applicationSet ingress
-      ## The tls configuration for additional hostnames to be covered with this ingress record.
-      ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-      ##
-      extraTls: []
-      ## - hosts:
-      ##     - argocd.server.local
-      ##   secretName: argocd.server.local-tls
-      ##
-      ## @param applicationSet.webhook.ingress.tls Ingress TLS configuration
-      ##
-      tls: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param applicationSet.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param applicationSet.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param applicationSet.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `controller.pdb.minAvailable` and `controller.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-## @section Argo CD notifications controller parameters
-
-## notifications controller
-##
-notifications:
-  ## @param notifications.enabled Enable notifications controller
-  ##
-  enabled: false
-  ## @param notifications.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param notifications.args Override default container args (useful when using custom images).
-  ##
-  args: []
-  ## @param notifications.extraArgs Add extra arguments to the default arguments for the Argo CD notifications controller
-  ##
-  extraArgs: []
-  ## @param notifications.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: true
-  ## Argo CD notifications controller log format: text|json
-  ## @param notifications.logFormat Format for the Argo CD notifications controller logs. Options: [text, json]
-  ##
-  logFormat: text
-  ## Argo CD notifications controller log level
-  ## @param notifications.logLevel Log level for the Argo CD notifications controller
-  ##
-  logLevel: info
-  ## Argo CD notifications controller ports
-  ## @param notifications.containerPorts.metrics Argo CD notifications controller metrics port number
-  ##
-  containerPorts:
-    metrics: 8085
-  ## Metrics configuration for Argo CD notifications controller
-  ##
-  metrics:
-    ## @param notifications.metrics.enabled Enable Argo CD notifications controller metrics
-    ##
-    enabled: false
-    service:
-      ## @param notifications.metrics.service.type Argo CD notifications controller service type
-      ##
-      type: ClusterIP
-      ## @param notifications.metrics.service.ports.metrics Argo CD notifications controller metrics service port
-      ##
-      ports:
-        metrics: 8085
-      ## Node ports to expose
-      ## @param notifications.metrics.service.nodePorts.metrics Node port for the notifications controller service
-      ## NOTE: choose port between <30000-32767>
-      ##
-      nodePorts:
-        metrics: ""
-      ## @param notifications.metrics.service.clusterIP Argo CD notifications controller metrics service Cluster IP
-      ## e.g.:
-      ## clusterIP: None
-      ##
-      clusterIP: ""
-      ## @param notifications.metrics.service.loadBalancerIP Argo CD notifications controller service Load Balancer IP
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-      ##
-      loadBalancerIP: ""
-      ## @param notifications.metrics.service.loadBalancerSourceRanges Argo CD notifications controller service Load Balancer sources
-      ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-      ## e.g:
-      ## loadBalancerSourceRanges:
-      ##   - 10.10.10.0/24
-      ##
-      loadBalancerSourceRanges: []
-      ## @param notifications.metrics.service.externalTrafficPolicy Argo CD notifications controller service external traffic policy
-      ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-      ##
-      externalTrafficPolicy: Cluster
-      ## @param notifications.metrics.service.annotations Additional custom annotations for Argo CD notifications controller service
-      ##
-      annotations: {}
-      ## @param notifications.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-      ## If "ClientIP", consecutive client requests will be directed to the same Pod
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-      ##
-      sessionAffinity: None
-      ## @param notifications.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
-      ## sessionAffinityConfig:
-      ##   clientIP:
-      ##     timeoutSeconds: 300
-      ##
-      sessionAffinityConfig: {}
-    ## Argo CD notifications controller metrics service monitor configuration
-    ##
-    serviceMonitor:
-      ## @param notifications.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-      ##
-      enabled: false
-      ## @param notifications.metrics.serviceMonitor.namespace Namespace which Prometheus is running in
-      ## e.g:
-      ## namespace: monitoring
-      ##
-      namespace: ""
-      ## @param notifications.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-      ##
-      jobLabel: ""
-      ## @param notifications.metrics.serviceMonitor.interval Interval at which metrics should be scraped
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      interval: 30s
-      ## @param notifications.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      scrapeTimeout: 10s
-      ## @param notifications.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      ## @param notifications.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      ## @param notifications.metrics.serviceMonitor.selector ServiceMonitor selector labels
-      ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-      ##
-      ## selector:
-      ##   prometheus: my-prometheus
-      ##
-      selector: {}
-      ## @param notifications.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
-      ##
-      honorLabels: false
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param notifications.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param notifications.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param notifications.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param notifications.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security)
-    ##
-    kubeAPIServerPorts: [443, 6443, 8443]
-    ## @param notifications.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param notifications.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true)
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param notifications.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param notifications.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## ServiceAccount configuration for the Argo CD notifications controller
-  ##
-  serviceAccount:
-    ## @param notifications.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: true
-    ## @param notifications.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param notifications.serviceAccount.automountServiceAccountToken Automount service account token for the notifications controller service account
-    ##
-    automountServiceAccountToken: false
-    ## @param notifications.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-    ##
-    annotations: {}
-  ## Enable admin clusterrole resources. Allows Argo CD notifications controller to have access to multiple namespaces
-  ## @param notifications.clusterAdminAccess Enable K8s cluster admin access for the notifications controller
-  ##
-  clusterAdminAccess: false
-  ## Enable Custom Rules for Argo CD notifications controller cluster role
-  ## @param notifications.clusterRoleRules Use custom rules for notifications controller's cluster role
-  ##
-  clusterRoleRules: []
-  ## @param notifications.podAffinityPreset Pod affinity preset. Ignored if `notifications.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param notifications.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `notifications.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node notifications.affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param notifications.nodeAffinityPreset.type Node affinity preset type. Ignored if `notifications.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param notifications.nodeAffinityPreset.key Node label key to match. Ignored if `notifications.affinity` is set
-    ##
-    key: ""
-    ## @param notifications.nodeAffinityPreset.values Node label values to match. Ignored if `notifications.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param notifications.affinity Affinity for Argo CD notifications controller pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `notifications.podAffinityPreset`, `notifications.podAntiAffinityPreset`, and `notifications.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param notifications.podAnnotations Annotations for Argo CD notifications controller pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param notifications.podLabels Extra labels for Argo CD notifications controller pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param notifications.containerSecurityContext.enabled Enabled Argo CD notifications controller containers' Security Context
-  ## @param notifications.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param notifications.containerSecurityContext.runAsUser Set Argo CD notifications controller containers' Security Context runAsUser
-  ## @param notifications.containerSecurityContext.runAsGroup Set Argo CD notifications controller containers' Security Context runAsGroup
-  ## @param notifications.containerSecurityContext.allowPrivilegeEscalation Set Argo CD notifications controller containers' Security Context allowPrivilegeEscalation
-  ## @param notifications.containerSecurityContext.capabilities.drop Set Argo CD notifications controller containers' Security Context capabilities to be dropped
-  ## @param notifications.containerSecurityContext.readOnlyRootFilesystem Set Argo CD notifications controller containers' Security Context readOnlyRootFilesystem
-  ## @param notifications.containerSecurityContext.runAsNonRoot Set Argo CD notifications controller container's Security Context runAsNonRoot
-  ## @param notifications.containerSecurityContext.privileged Set notifications container's Security Context privileged
-  ## @param notifications.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    privileged: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Argo CD notifications controller resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param notifications.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if notifications.resources is set (notifications.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param notifications.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param notifications.podSecurityContext.enabled Enabled Argo CD notifications controller pods' Security Context
-  ## @param notifications.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param notifications.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param notifications.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param notifications.podSecurityContext.fsGroup Set Argo CD notifications controller pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## @param notifications.nodeSelector Node labels for Argo CD notifications controller pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param notifications.tolerations Tolerations for Argo CD notifications controller pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param notifications.priorityClassName Argo CD notifications controller pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param notifications.extraVolumes Optionally specify extra list of additional volumes for the Argo CD notifications controller pod(s)
-  ##
-  extraVolumes: []
-  ## @param notifications.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD notifications controller container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param notifications.extraEnvVars Array with extra environment variables to add to Argo CD notifications controller nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param notifications.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD notifications controller nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param notifications.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD notifications controller nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## Configure extra options for Notification containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param notifications.startupProbe.enabled Enable startupProbe on Notification nodes
-  ## @param notifications.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param notifications.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param notifications.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param notifications.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param notifications.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param notifications.livenessProbe.enabled Enable livenessProbe on Notification nodes
-  ## @param notifications.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param notifications.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param notifications.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param notifications.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param notifications.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param notifications.readinessProbe.enabled Enable readinessProbe on Notification nodes
-  ## @param notifications.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param notifications.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param notifications.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param notifications.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param notifications.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param notifications.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param notifications.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param notifications.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Webhook for the Git Generator
-  ## Ref: https://argocd-notifications.readthedocs.io/en/master/Generators-Git/#webhook-configuration)
-  ##
-  webhook:
-    ingress:
-      ## @param notifications.webhook.ingress.enabled Enable an ingress resource for Webhooks
-      ##
-      enabled: false
-      ## DEPRECATED: Use server.ingress.annotations instead of server.ingress.certManager
-      ## certManager: false
-      ##
-      ## @param notifications.webhook.ingress.annotations Additional ingress annotations
-      ##
-      annotations: {}
-      ## @param notifications.webhook.ingress.labels Additional ingress labels
-      ##
-      labels: {}
-      ## @param notifications.webhook.ingress.ingressClassName Defines which ingress controller will implement the resource
-      ##
-      ingressClassName: ""
-      ## @param notifications.webhook.ingress.hostname Ingress hostname for the Argo CD notifications ingress
-      ## Hostname must be provided if Ingress is enabled.
-      ##
-      hostname: ""
-      ## @param notifications.webhook.ingress.path Argo CD notifications ingress path
-      ##
-      path: /api/webhook
-      ## @param notifications.webhook.ingress.pathType Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
-      ##
-      pathType: Prefix
-      ## @param notifications.webhook.ingress.extraHosts Extra hosts array for the Argo CD notifications ingress
-      ## The list of additional hostnames to be covered with this ingress record.
-      ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
-      ##
-      extraHosts: []
-      ## @param notifications.webhook.ingress.extraPaths Extra paths for the Argo CD notifications ingress
-      ## Any additional arbitrary paths that may need to be added to the ingress under the main host.
-      ## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
-      ##
-      extraPaths: []
-      ## - path: /*
-      ##   backend:
-      ##     serviceName: ssl-redirect
-      ##     servicePort: use-annotation
-      ##
-      ## @param notifications.webhook.ingress.extraTls Extra TLS configuration for the Argo CD notifications ingress
-      ## The tls configuration for additional hostnames to be covered with this ingress record.
-      ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-      ##
-      extraTls: []
-      ## - hosts:
-      ##     - argocd.server.local
-      ##   secretName: argocd.server.local-tls
-      ##
-      ## @param notifications.webhook.ingress.tls Ingress TLS configuration
-      ##
-      tls: []
-  ## The optional bot component simplifies managing subscriptions
-  ## For more information: https://argocd-notifications.readthedocs.io/en/stable/bots/overview/
-  bots:
-    slack:
-      ## @param notifications.bots.slack.enabled Enable notifications controller
-      ##
-      enabled: false
-      ## @param notifications.bots.slack.command Override default container command (useful when using custom images)
-      ##
-      command: []
-      ## @param notifications.bots.slack.args Override default container args (useful when using custom images).
-      ##
-      args: []
-      ## @param notifications.bots.slack.extraArgs Add extra arguments to the default arguments for the Argo CD Slack bot
-      ##
-      extraArgs: []
-      ## Argo CD Slack bot service parameters
-      ##
-      service:
-        ## @param notifications.bots.slack.service.type Argo CD Slack bot service type
-        ##
-        type: LoadBalancer
-        ## @param notifications.bots.slack.service.ports.http Argo CD Slack bot service port
-        ##
-        ports:
-          http: 80
-        ## Node ports to expose
-        ## @param notifications.bots.slack.service.nodePorts.http Node port for Argo CD Slack bot service
-        ## NOTE: choose port between <30000-32767>
-        ##
-        nodePorts:
-          http: ""
-        ## @param notifications.bots.slack.service.clusterIP Argo CD Slack bot service Cluster IP
-        ## e.g.:
-        ## clusterIP: None
-        ##
-        clusterIP: ""
-        ## @param notifications.bots.slack.service.loadBalancerIP Argo CD Slack bot service Load Balancer IP
-        ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-        ##
-        loadBalancerIP: ""
-        ## @param notifications.bots.slack.service.loadBalancerSourceRanges Argo CD Slack bot service Load Balancer sources
-        ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-        ## e.g:
-        ## loadBalancerSourceRanges:
-        ##   - 10.10.10.0/24
-        ##
-        loadBalancerSourceRanges: []
-        ## @param notifications.bots.slack.service.externalTrafficPolicy Argo CD Slack bot service external traffic policy
-        ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-        ##
-        externalTrafficPolicy: Cluster
-        ## @param notifications.bots.slack.service.annotations Additional custom annotations for Argo CD Slack bot service
-        ##
-        annotations: {}
-        ## @param notifications.bots.slack.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-        ##
-        extraPorts: []
-        ## @param notifications.bots.slack.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-        ## If "ClientIP", consecutive client requests will be directed to the same Pod
-        ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-        ##
-        sessionAffinity: None
-        ## @param notifications.bots.slack.service.sessionAffinityConfig Additional settings for the sessionAffinity
-        ## sessionAffinityConfig:
-        ##   clientIP:
-        ##     timeoutSeconds: 300
-        ##
-        sessionAffinityConfig: {}
-      ## Network Policies
-      ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-      ##
-      networkPolicy:
-        ## @param notifications.bots.slack.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-        ##
-        enabled: true
-        ## @param notifications.bots.slack.networkPolicy.allowExternal Don't require server label for connections
-        ## The Policy model to apply. When set to false, only pods with the correct
-        ## server label will have network access to the ports server is listening
-        ## on. When true, server will accept connections from any source
-        ## (with the correct destination port).
-        ##
-        allowExternal: true
-        ## @param notifications.bots.slack.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-        ##
-        allowExternalEgress: true
-        ## @param notifications.bots.slack.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security)
-        ##
-        kubeAPIServerPorts: [443, 6443, 8443]
-        ## @param notifications.bots.slack.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-        ## e.g:
-        ## extraIngress:
-        ##   - ports:
-        ##       - port: 1234
-        ##     from:
-        ##       - podSelector:
-        ##           - matchLabels:
-        ##               - role: frontend
-        ##       - podSelector:
-        ##           - matchExpressions:
-        ##               - key: role
-        ##                 operator: In
-        ##                 values:
-        ##                   - frontend
-        extraIngress: []
-        ## @param notifications.bots.slack.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true)
-        ## e.g:
-        ## extraEgress:
-        ##   - ports:
-        ##       - port: 1234
-        ##     to:
-        ##       - podSelector:
-        ##           - matchLabels:
-        ##               - role: frontend
-        ##       - podSelector:
-        ##           - matchExpressions:
-        ##               - key: role
-        ##                 operator: In
-        ##                 values:
-        ##                   - frontend
-        ##
-        extraEgress: []
-        ## @param notifications.bots.slack.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-        ## @param notifications.bots.slack.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-        ##
-        ingressNSMatchLabels: {}
-        ingressNSPodMatchLabels: {}
-      ## ServiceAccount configuration for the Argo CD Slack bot
-      ##
-      serviceAccount:
-        ## @param notifications.bots.slack.serviceAccount.create Specifies whether a ServiceAccount should be created
-        ##
-        create: true
-        ## @param notifications.bots.slack.serviceAccount.name The name of the ServiceAccount to use.
-        ## If not set and create is true, a name is generated using the common.names.fullname template
-        ##
-        name: ""
-        ## @param notifications.bots.slack.serviceAccount.automountServiceAccountToken Automount service account token for the notifications controller service account
-        ##
-        automountServiceAccountToken: false
-        ## @param notifications.bots.slack.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-        ##
-        annotations: {}
-      ## @param notifications.bots.slack.podAffinityPreset Pod affinity preset. Ignored if `notifications.bots.slack.affinity` is set. Allowed values: `soft` or `hard`
-      ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-      ##
-      podAffinityPreset: ""
-      ## @param notifications.bots.slack.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `notifications.bots.slack.affinity` is set. Allowed values: `soft` or `hard`
-      ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-      ##
-      podAntiAffinityPreset: soft
-      ## Node notifications.bots.slack.affinity preset
-      ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-      ##
-      nodeAffinityPreset:
-        ## @param notifications.bots.slack.nodeAffinityPreset.type Node affinity preset type. Ignored if `notifications.bots.slack.affinity` is set. Allowed values: `soft` or `hard`
-        ##
-        type: ""
-        ## @param notifications.bots.slack.nodeAffinityPreset.key Node label key to match. Ignored if `notifications.bots.slack.affinity` is set
-        ##
-        key: ""
-        ## @param notifications.bots.slack.nodeAffinityPreset.values Node label values to match. Ignored if `notifications.bots.slack.affinity` is set
-        ## E.g.
-        ## values:
-        ##   - e2e-az1
-        ##   - e2e-az2
-        ##
-        values: []
-      ## Argo CD Slack Bot controller ports
-      ## @param notifications.bots.slack.containerPorts.metrics Slack Bot controller metrics port number
-      ##
-      containerPorts:
-        metrics: 9001
-      ## Configure extra options for Slack Bot containers' liveness and readiness probes
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-      ## @param notifications.bots.slack.startupProbe.enabled Enable startupProbe on Slack Bot nodes
-      ## @param notifications.bots.slack.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-      ## @param notifications.bots.slack.startupProbe.periodSeconds Period seconds for startupProbe
-      ## @param notifications.bots.slack.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-      ## @param notifications.bots.slack.startupProbe.failureThreshold Failure threshold for startupProbe
-      ## @param notifications.bots.slack.startupProbe.successThreshold Success threshold for startupProbe
-      ##
-      startupProbe:
-        enabled: false
-        initialDelaySeconds: 10
-        periodSeconds: 10
-        timeoutSeconds: 1
-        failureThreshold: 3
-        successThreshold: 1
-      ## @param notifications.bots.slack.livenessProbe.enabled Enable livenessProbe on Slack Bot nodes
-      ## @param notifications.bots.slack.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-      ## @param notifications.bots.slack.livenessProbe.periodSeconds Period seconds for livenessProbe
-      ## @param notifications.bots.slack.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-      ## @param notifications.bots.slack.livenessProbe.failureThreshold Failure threshold for livenessProbe
-      ## @param notifications.bots.slack.livenessProbe.successThreshold Success threshold for livenessProbe
-      ##
-      livenessProbe:
-        enabled: true
-        initialDelaySeconds: 10
-        periodSeconds: 10
-        timeoutSeconds: 1
-        failureThreshold: 3
-        successThreshold: 1
-      ## @param notifications.bots.slack.readinessProbe.enabled Enable readinessProbe on Slack Bot nodes
-      ## @param notifications.bots.slack.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-      ## @param notifications.bots.slack.readinessProbe.periodSeconds Period seconds for readinessProbe
-      ## @param notifications.bots.slack.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-      ## @param notifications.bots.slack.readinessProbe.failureThreshold Failure threshold for readinessProbe
-      ## @param notifications.bots.slack.readinessProbe.successThreshold Success threshold for readinessProbe
-      ##
-      readinessProbe:
-        enabled: true
-        initialDelaySeconds: 10
-        periodSeconds: 10
-        timeoutSeconds: 1
-        failureThreshold: 3
-        successThreshold: 1
-      ## @param notifications.bots.slack.customStartupProbe Custom startupProbe that overrides the default one
-      ##
-      customStartupProbe: {}
-      ## @param notifications.bots.slack.customLivenessProbe Custom livenessProbe that overrides the default one
-      ##
-      customLivenessProbe: {}
-      ## @param notifications.bots.slack.customReadinessProbe Custom readinessProbe that overrides the default one
-      ##
-      customReadinessProbe: {}
-      ## @param notifications.bots.slack.affinity Affinity for Argo CD Slack bot pods assignment
-      ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-      ## NOTE: `notifications.bots.slack.podAffinityPreset`, `notifications.bots.slack.podAntiAffinityPreset`, and `notifications.bots.slack.nodeAffinityPreset` will be ignored when it's set
-      ##
-      affinity: {}
-      ## @param notifications.bots.slack.podAnnotations Annotations for Argo CD Slack bot pods
-      ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-      ##
-      podAnnotations: {}
-      ## @param notifications.bots.slack.podLabels Extra labels for Argo CD Slack bot pods
-      ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-      ##
-      podLabels: {}
-      ## Configure Container Security Context
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-      ## @param notifications.bots.slack.containerSecurityContext.enabled Enabled Argo CD Slack bot containers' Security Context
-      ## @param notifications.bots.slack.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-      ## @param notifications.bots.slack.containerSecurityContext.runAsUser Set Argo CD Slack bot containers' Security Context runAsUser
-      ## @param notifications.bots.slack.containerSecurityContext.runAsGroup Set Argo CD Slack bot containers' Security Context runAsGroup
-      ## @param notifications.bots.slack.containerSecurityContext.allowPrivilegeEscalation Set Argo CD Slack bot containers' Security Context allowPrivilegeEscalation
-      ## @param notifications.bots.slack.containerSecurityContext.capabilities.drop Set Argo CD Slack bot containers' Security Context capabilities to be dropped
-      ## @param notifications.bots.slack.containerSecurityContext.readOnlyRootFilesystem Set Argo CD Slack bot containers' Security Context readOnlyRootFilesystem
-      ## @param notifications.bots.slack.containerSecurityContext.runAsNonRoot Set Argo CD Slack bot container's Security Context runAsNonRoot
-      ## @param notifications.bots.slack.containerSecurityContext.privileged Set notifications container's Security Context privileged
-      ## @param notifications.bots.slack.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-      ##
-      containerSecurityContext:
-        enabled: true
-        seLinuxOptions: {}
-        runAsUser: 1001
-        runAsGroup: 1001
-        runAsNonRoot: true
-        readOnlyRootFilesystem: true
-        allowPrivilegeEscalation: false
-        privileged: false
-        capabilities:
-          drop: ["ALL"]
-        seccompProfile:
-          type: "RuntimeDefault"
-      ## Argo CD Slack bot resource requests and limits
-      ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-      ## @param notifications.bots.slack.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if notifications.bots.slack.resources is set (notifications.bots.slack.resources is recommended for production).
-      ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-      ##
-      resourcesPreset: "nano"
-      ## @param notifications.bots.slack.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-      ## Example:
-      ## resources:
-      ##   requests:
-      ##     cpu: 2
-      ##     memory: 512Mi
-      ##   limits:
-      ##     cpu: 3
-      ##     memory: 1024Mi
-      ##
-      resources: {}
-      ## Configure Pods Security Context
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-      ## @param notifications.bots.slack.podSecurityContext.enabled Enabled Argo CD Slack bot pods' Security Context
-      ## @param notifications.bots.slack.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-      ## @param notifications.bots.slack.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-      ## @param notifications.bots.slack.podSecurityContext.supplementalGroups Set filesystem extra groups
-      ## @param notifications.bots.slack.podSecurityContext.fsGroup Set Argo CD Slack bot pod's Security Context fsGroup
-      ##
-      podSecurityContext:
-        enabled: true
-        fsGroupChangePolicy: Always
-        sysctls: []
-        supplementalGroups: []
-        fsGroup: 1001
-      ## @param notifications.bots.slack.nodeSelector Node labels for Argo CD Slack bot pods assignment
-      ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-      ##
-      nodeSelector: {}
-      ## @param notifications.bots.slack.tolerations Tolerations for Argo CD Slack bot pods assignment
-      ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-      ##
-      tolerations: []
-      ## @param notifications.bots.slack.priorityClassName Argo CD Slack bot pods' priorityClassName
-      ##
-      priorityClassName: ""
-      ## @param notifications.bots.slack.extraVolumes Optionally specify extra list of additional volumes for the Argo CD Slack bot pod(s)
-      ##
-      extraVolumes: []
-      ## @param notifications.bots.slack.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD Slack bot container(s)
-      ##
-      extraVolumeMounts: []
-      ## @param notifications.bots.slack.extraEnvVars Array with extra environment variables to add to Argo CD Slack bot nodes
-      ## e.g:
-      ## extraEnvVars:
-      ##   - name: FOO
-      ##     value: "bar"
-      ##
-      extraEnvVars: []
-      ## @param notifications.bots.slack.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD Slack bot nodes
-      ##
-      extraEnvVarsCM: ""
-      ## @param notifications.bots.slack.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD Slack bot nodes
-      ##
-      extraEnvVarsSecret: ""
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param notifications.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param notifications.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param notifications.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `notifications.pdb.minAvailable` and `notifications.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-## @section Argo CD server Parameters
-
-## Argo CD server configuration
-##
-server:
-  ## @param server.replicaCount Number of Argo CD server replicas to deploy
-  ##
-  replicaCount: 1
-  ## Configure extra options for Argo CD server containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param server.startupProbe.enabled Enable startupProbe on Argo CD server nodes
-  ## @param server.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param server.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param server.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param server.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param server.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param server.livenessProbe.enabled Enable livenessProbe on Argo CD server nodes
-  ## @param server.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param server.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param server.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param server.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param server.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param server.readinessProbe.enabled Enable readinessProbe on Argo CD server nodes
-  ## @param server.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param server.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param server.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param server.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param server.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param server.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param server.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param server.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Argo CD server resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if server.resources is set (server.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param server.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param server.podSecurityContext.enabled Enabled Argo CD server pods' Security Context
-  ## @param server.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param server.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param server.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param server.podSecurityContext.fsGroup Set Argo CD server pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param server.containerSecurityContext.enabled Enabled Argo CD server containers' Security Context
-  ## @param server.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param server.containerSecurityContext.runAsUser Set Argo CD server containers' Security Context runAsUser
-  ## @param server.containerSecurityContext.runAsGroup Set Argo CD server containers' Security Context runAsGroup
-  ## @param server.containerSecurityContext.allowPrivilegeEscalation Set Argo CD server containers' Security Context allowPrivilegeEscalation
-  ## @param server.containerSecurityContext.capabilities.drop Set Argo CD containers' server Security Context capabilities to be dropped
-  ## @param server.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' server Security Context readOnlyRootFilesystem
-  ## @param server.containerSecurityContext.runAsNonRoot Set Argo CD server containers' Security Context runAsNonRoot
-  ## @param server.containerSecurityContext.privileged Set server container's Security Context privileged
-  ## @param server.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    privileged: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Argo CD server deployment autoscaling
-  ## @param server.autoscaling.enabled Enable Argo CD server deployment autoscaling
-  ## @param server.autoscaling.minReplicas Argo CD server deployment autoscaling minimum number of replicas
-  ## @param server.autoscaling.maxReplicas Argo CD server deployment autoscaling maximum number of replicas
-  ## @param server.autoscaling.targetCPU Argo CD server deployment autoscaling target CPU percentage
-  ## @param server.autoscaling.targetMemory Argo CD server deployment autoscaling target CPU memory
-  ##
-  autoscaling:
-    enabled: false
-    minReplicas: 1
-    maxReplicas: 5
-    targetCPU: 50
-    targetMemory: 50
-  ## Redirect all request to https
-  ## @param server.insecure Disable HTTPS redirection for Argo CD server
-  ##
-  insecure: false
-  ## @param server.logFormat ArgoCD server logs format. Options: [text, json]
-  ##
-  logFormat: text
-  ## @param server.logLevel ArgoCD server logs level
-  ##
-  logLevel: info
-  ## Argo CD server enable config
-  ## @param server.configEnabled Enable Argo CD server config
-  ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml
-  ##
-  configEnabled: true
-  ## Argo CD server URL
-  ## @param server.url Argo CD server base URL. Required when configuring SSO. Required when enabling dex.
-  ##
-  url: ""
-  ## Argo CD server config. This object will be directly rendered
-  ## @param server.config [object] Argo CD server configuration that will end on the argocd-cm Config Map
-  ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/
-  ## E.g:
-  ## repositories:
-  ##   - url: git@github.com:group/repo.git
-  ##     sshPrivateKeySecret:
-  ##       name: secret-name
-  ##       key: sshPrivateKey
-  ##   - type: helm
-  ##     url: https://charts.helm.sh/stable
-  ##     name: stable
-  ##   - type: helm
-  ##     url: https://argoproj.github.io/argo-helm
-  ##     name: argo
-  ## oidc.config:
-  ##   name: AzureAD
-  ##   issuer: https://login.microsoftonline.com/TENANT_ID/v2.0
-  ##   clientID: CLIENT_ID
-  ##   clientSecret: $oidc.azuread.clientSecret
-  ##   requestedIDTokenClaims:
-  ##     groups:
-  ##       essential: true
-  ##   requestedScopes:
-  ##     - openid
-  ##     - profile
-  ##     - email
-  ## dex.config:
-  ##    connectors:
-  ##      # GitHub example
-  ##      - type: github
-  ##        id: github
-  ##        name: GitHub
-  ##        config:
-  ##          clientID: aabbccddeeff00112233
-  ##          clientSecret: $dex.github.clientSecret
-  ##          orgs:
-  ##          - name: your-github-org
-  config:
-    ## Argo CD external base URL. Required when configuring SSO. Required when enabling dex.
-    ## E.g:
-    ## url: https://argocd.example.com
-    ##
-    url: "{{ `{{ .Values.server.url }}` }}"
-    ## Argo CD instance label key
-    ##
-    application.instanceLabelKey: argocd.argoproj.io/instance
-    ## If Dex is enabled you need to add connectors here
-    ## dex.config: |
-    ##  connectors: []
-    ##
-    dex.config: ""
-  ## Configure the ingress for the Argo CD server
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
-  ## @param server.ingress.enabled Enable the creation of an ingress for the Argo CD server
-  ## @param server.ingress.pathType Path type for the Argo CD server ingress
-  ## @param server.ingress.apiVersion Ingress API version for the Argo CD server ingress
-  ## @param server.ingress.hostname Ingress hostname for the Argo CD server ingress
-  ## @param server.ingress.annotations Annotations for the Argo CD server ingress. To enable certificate autogeneration, place here your cert-manager annotations.
-  ## @param server.ingress.tls Enable TLS for the Argo CD server ingress
-  ## @param server.ingress.extraHosts Extra hosts array for the Argo CD server ingress
-  ## @param server.ingress.path Path array for the Argo CD server ingress
-  ## @param server.ingress.extraPaths Extra paths for the Argo CD server ingress
-  ## @param server.ingress.extraTls Extra TLS configuration for the Argo CD server ingress
-  ## @param server.ingress.secrets Secrets array to mount into the Ingress
-  ## @param server.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
-  ##
-  ingress:
-    ## Set to true to enable ingress record generation
-    ##
-    enabled: true
-    ## @param server.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
-    ##
-    selfSigned: false
-    ## DEPRECATED: Use server.ingress.annotations instead of server.ingress.certManager
-    ## certManager: false
-    ##
-
-    ## Ingress Path type
-    ##
-    pathType: ImplementationSpecific
-    ## Override API Version (automatically detected if not set)
-    ##
-    apiVersion: ""
-    ## When the ingress is enabled, a host pointing to this will be created
-    ##
-    hostname: {{ .Values.globals.argocd.hostname }}
-    ## The Path to Argo CD server. You may need to set this to '/*' in order to use this
-    ## with ALB ingress controllers.
-    ##
-    path: /
-    ## For a full list of possible ingress annotations, please see
-    ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
-    ## Use this parameter to set the required annotations for cert-manager, see
-    ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
-    ##
-    ## e.g:
-    ## annotations:
-    ##   kubernetes.io/ingress.class: nginx
-    ##   cert-manager.io/cluster-issuer: cluster-issuer-name
-    ##
-    annotations:
-      kubernetes.io/ingress.class: {{ .Values.globals.argocd.ingressClass }}
-      cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-    ## Enable TLS configuration for the hostname defined at ingress.hostname parameter
-    ## You can use the ingress.secrets parameter to create this TLS secret or rely on cert-manager to create it
-    ##
-    tls: true
-    ## The list of additional hostnames to be covered with this ingress record.
-    ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
-    extraHosts: []
-    ## - name: argocd.server.local
-    ##   path: /
-    ##
-
-    ## Any additional arbitrary paths that may need to be added to the ingress under the main host.
-    ## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
-    extraPaths: []
-    ## - path: /*
-    ##   backend:
-    ##     serviceName: ssl-redirect
-    ##     servicePort: use-annotation
-    ##
-
-    ## The tls configuration for additional hostnames to be covered with this ingress record.
-    ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-    extraTls: []
-    ## - hosts:
-    ##     - argocd.server.local
-    ##   secretName: argocd.server.local-tls
-    ##
-
-    ## If you're providing your own certificates, please use this to add the certificates as secrets
-    ## key and certificate should start with -----BEGIN CERTIFICATE----- or
-    ## REDACTED
-    ##
-    ## name should line up with a tlsSecret set further up
-    ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
-    ##
-    ## It is also possible to create and manage the certificates outside of this helm chart
-    ## Please see README.md for more information
-    ##
-    secrets: []
-    ## - name: argocd.server.local-tls
-    ##   key:
-    ##   certificate:
-    ##
-
-    ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
-    ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
-    ##
-    ingressClassName: {{ .Values.globals.argocd.ingressClass }}
-    ## @param server.ingress.extraRules Additional rules to be covered with this ingress record
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
-    ## e.g:
-    ## extraRules:
-    ## - host: example.local
-    ##     http:
-    ##       path: /
-    ##       backend:
-    ##         service:
-    ##           name: example-svc
-    ##           port:
-    ##             name: http
-    ##
-    extraRules: []
-  ## Metrics configuration for Argo CD server
-  ##
-  metrics:
-    ## Enable metrics for Argo CD server
-    ## @param server.metrics.enabled Enable metrics for the Argo CD server
-    ##
-    enabled: true
-    service:
-      ## @param server.metrics.service.type Argo CD server service type
-      ##
-      type: ClusterIP
-      ## @param server.metrics.service.ports.metrics Argo CD server metrics service port
-      ##
-      ports:
-        metrics: 8083
-      ## Node ports to expose
-      ## @param server.metrics.service.nodePorts.metrics Node port for Argo CD server metrics service
-      ## NOTE: choose port between <30000-32767>
-      ##
-      nodePorts:
-        metrics: ""
-      ## @param server.metrics.service.clusterIP Argo CD server metrics service Cluster IP
-      ## e.g.:
-      ## clusterIP: None
-      ##
-      clusterIP: ""
-      ## @param server.metrics.service.loadBalancerIP Argo CD server service Load Balancer IP
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-      ##
-      loadBalancerIP: ""
-      ## @param server.metrics.service.loadBalancerSourceRanges Argo CD server service Load Balancer sources
-      ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-      ## e.g:
-      ## loadBalancerSourceRanges:
-      ##   - 10.10.10.0/24
-      ##
-      loadBalancerSourceRanges: []
-      ## @param server.metrics.service.externalTrafficPolicy Argo CD server service external traffic policy
-      ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-      ##
-      externalTrafficPolicy: Cluster
-      ## @param server.metrics.service.annotations Additional custom annotations for Argo CD server service
-      ##
-      annotations: {}
-      ## @param server.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-      ## If "ClientIP", consecutive client requests will be directed to the same Pod
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-      ##
-      sessionAffinity: None
-      ## @param server.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
-      ## sessionAffinityConfig:
-      ##   clientIP:
-      ##     timeoutSeconds: 300
-      sessionAffinityConfig: {}
-    ## Argo CD server metrics service monitor configuration
-    ##
-    serviceMonitor:
-      ## @param server.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-      ##
-      enabled: false
-      ## @param server.metrics.serviceMonitor.namespace Namespace which Prometheus is running in
-      ## e.g:
-      ## namespace: monitoring
-      ##
-      namespace: ""
-      ## @param server.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-      ##
-      jobLabel: ""
-      ## @param server.metrics.serviceMonitor.interval Interval at which metrics should be scraped
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      interval: 30s
-      ## @param server.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      scrapeTimeout: 10s
-      ## @param server.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      ## @param server.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      ## @param server.metrics.serviceMonitor.selector ServiceMonitor selector labels
-      ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-      ##
-      ## selector:
-      ##   prometheus: my-prometheus
-      ##
-      selector: {}
-      ## @param server.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
-      ##
-      honorLabels: false
-  ## Configure the ingress resource that allows you to access the Argo CD gRPC API
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
-  ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/
-  ## @param server.ingressGrpc.enabled Enable the creation of an ingress for the Argo CD gRPC server
-  ## @param server.ingressGrpc.pathType Path type for the Argo CD gRPC server ingress
-  ## @param server.ingressGrpc.apiVersion Ingress API version for the Argo CD gRPC server ingress
-  ## @param server.ingressGrpc.hostname Ingress hostname for the Argo CD gRPC server ingress
-  ## @param server.ingressGrpc.annotations Annotations for the Argo CD gRPC server ingress. To enable certificate autogeneration, place here your cert-manager annotations
-  ## @param server.ingressGrpc.tls Enable TLS for the Argo CD server ingress
-  ## @param server.ingressGrpc.extraHosts Extra hosts array for the Argo CD gRPC server ingress
-  ## @param server.ingressGrpc.path Path array for the Argo CD gRPC server ingress
-  ## @param server.ingressGrpc.extraPaths Extra paths for the Argo CD gRPC server ingress
-  ## @param server.ingressGrpc.extraTls Extra TLS configuration for the Argo CD gRPC server ingress
-  ## @param server.ingressGrpc.secrets Secrets array to mount into the Ingress
-  ## @param server.ingressGrpc.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
-  ##
-  ingressGrpc:
-    ## Set to true to enable ingress record generation
-    ##
-    enabled: false
-    ## @param server.ingressGrpc.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
-    ##
-    selfSigned: false
-    ## DEPRECATED: Use server.ingressGrpc.annotations instead of server.ingressGrpc.certManager
-    ## certManager: false
-    ##
-
-    ## Ingress Path type
-    ##
-    pathType: ImplementationSpecific
-    ## Override API Version (automatically detected if not set)
-    ##
-    apiVersion: ""
-    ## When the ingress is enabled, a host pointing to this will be created
-    ##
-    hostname: argocd.server.local
-    ## The Path to Argo CD server gRPC API. You may need to set this to '/*' in order to use this
-    ## with ALB ingress controllers.
-    ##
-    path: /
-    ## For a full list of possible ingress annotations, please see
-    ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
-    ## Use this parameter to set the required annotations for cert-manager, see
-    ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
-    ##
-    ## e.g:
-    ## annotations:
-    ##   kubernetes.io/ingress.class: nginx
-    ##   cert-manager.io/cluster-issuer: cluster-issuer-name
-    ##
-    annotations: {}
-    ## Enable TLS configuration for the hostname defined at ingress.hostname parameter
-    ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
-    ##
-    tls: false
-    ## The list of additional hostnames to be covered with this ingress record.
-    ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
-    extraHosts: []
-    ## - name: argocd.server.local
-    ##   path: /
-    ##
-
-    ## Any additional arbitrary paths that may need to be added to the ingress under the main host.
-    ## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
-    extraPaths: []
-    ## - path: /*
-    ##   backend:
-    ##     serviceName: ssl-redirect
-    ##     servicePort: use-annotation
-    ##
-
-    ## The tls configuration for additional hostnames to be covered with this ingress record.
-    ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-    extraTls: []
-    ## - hosts:
-    ##     - argocd.server.local
-    ##   secretName: argocd.server.local-tls
-    ##
-
-    ## If you're providing your own certificates, please use this to add the certificates as secrets
-    ## key and certificate should start with -----BEGIN CERTIFICATE----- or
-    ## REDACTED
-    ##
-    ## name should line up with a tlsSecret set further up
-    ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
-    ##
-    ## It is also possible to create and manage the certificates outside of this helm chart
-    ## Please see README.md for more information
-    ##
-    secrets: []
-    ## - name: argocd.server.local-tls
-    ##   key:
-    ##   certificate:
-    ##
-
-    ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
-    ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
-    ##
-    ingressClassName: ""
-    ## @param server.ingressGrpc.extraRules Additional rules to be covered with this ingress record
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
-    ## e.g:
-    ## extraRules:
-    ## - host: example.server.local
-    ##     http:
-    ##       path: /
-    ##       backend:
-    ##         service:
-    ##           name: example-svc
-    ##           port:
-    ##             name: http
-    ##
-    extraRules: []
-  ## Argo CD server container port
-  ## @param server.containerPorts.http Argo CD server HTTP container port
-  ## @param server.containerPorts.https Argo CD server HTTPS container port
-  ## @param server.containerPorts.metrics Argo CD server metrics container port
-  containerPorts:
-    http: 8080
-    https: 8443
-    metrics: 8083
-  ## Argo CD server service parameters
-  ##
-  service:
-    ## @param server.service.type Argo CD service type
-    ##
-    type: ClusterIP
-    ## @param server.service.ports.http HTTP port for the gRPC ingress when enabled
-    ## @param server.service.ports.https HTTPS port for the gRPC ingress when enabled
-    ##
-    ports:
-      http: 80
-      https: 443
-    ## Node ports to expose
-    ## @param server.service.nodePorts.http Node port for HTTP
-    ## @param server.service.nodePorts.https Node port for HTTPS
-    ## NOTE: choose port between <30000-32767>
-    ##
-    nodePorts:
-      http: ""
-      https: ""
-    ## @param server.service.clusterIP Argo CD service Cluster IP
-    ## e.g.:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param server.service.loadBalancerIP Argo CD service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerIP: ""
-    ## @param server.service.loadBalancerSourceRanges Argo CD service Load Balancer sources
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g:
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param server.service.externalTrafficPolicy Argo CD service external traffic policy
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param server.service.annotations Additional custom annotations for Argo CD service
-    ##
-    annotations: {}
-    ## @param server.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param server.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param server.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    sessionAffinityConfig: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param server.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param server.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param server.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param server.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security)
-    ##
-    kubeAPIServerPorts: [443, 6443, 8443]
-    ## @param server.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param server.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true)
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param server.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param server.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## @param server.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param server.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param server.extraArgs concat to the default args
-  ##
-  extraArgs: []
-  ## @param server.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: true
-  ## @param server.hostAliases Argo CD server pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param server.podLabels Extra labels for Argo CD server pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param server.podAnnotations Annotations for Argo CD server pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param server.podAffinityPreset Pod affinity preset. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param server.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node server.affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param server.nodeAffinityPreset.type Node affinity preset type. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param server.nodeAffinityPreset.key Node label key to match. Ignored if `server.affinity` is set
-    ##
-    key: ""
-    ## @param server.nodeAffinityPreset.values Node label values to match. Ignored if `server.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param server.affinity Affinity for Argo CD server pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `server.podAffinityPreset`, `server.podAntiAffinityPreset`, and `server.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param server.nodeSelector Node labels for Argo CD server pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param server.tolerations Tolerations for Argo CD server pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param server.schedulerName Name of the k8s scheduler (other than default)
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param server.shareProcessNamespace Enable shared process namespace in a pod.
-  ## If set to false (default), each container will run in separate namespace, server will have PID=1.
-  ## If set to true, the /pause will run as init process and will reap any zombie PIDs,
-  ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds.
-  ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating.
-  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
-  ##
-  shareProcessNamespace: false
-  ## @param server.topologySpreadConstraints Topology Spread Constraints for pod assignment
-  ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## The value is evaluated as a template
-  ##
-  topologySpreadConstraints: []
-  ## @param server.updateStrategy.type Argo CD server statefulset strategy type
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    ## StrategyType
-    ## Can be set to RollingUpdate or OnDelete
-    ##
-    type: RollingUpdate
-  ## @param server.priorityClassName Argo CD server pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param server.runtimeClassName Name of the runtime class to be used by pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/
-  ##
-  runtimeClassName: ""
-  ## @param server.lifecycleHooks for the Argo CD server container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param server.extraEnvVars Array with extra environment variables to add to Argo CD server nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param server.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD server nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param server.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD server nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param server.extraVolumes Optionally specify extra list of additional volumes for the Argo CD server pod(s)
-  ##
-  extraVolumes: []
-  ## @param server.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD server container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param server.sidecars Add additional sidecar containers to the Argo CD server pod(s)
-  ## e.g:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param server.initContainers Add additional init containers to the Argo CD server pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-  ## e.g:
-  ## initContainers:
-  ##  - name: your-image-name
-  ##    image: your-image
-  ##    imagePullPolicy: Always
-  ##    command: ['sh', '-c', 'echo "hello world"']
-  ##
-  initContainers: []
-  ## ServiceAccount configuration for the Argo CD server
-  ##
-  serviceAccount:
-    ## @param server.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: true
-    ## @param server.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param server.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
-    ##
-    automountServiceAccountToken: false
-    ## @param server.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-    ##
-    annotations: {}
-  ## Enable admin clusterrole resources. Allows Argo CD server to have access to multiple namespaces
-  ## @param server.clusterAdminAccess Enable K8s cluster admin access for the server
-  ##
-  clusterAdminAccess: true
-  ## Enable Custom Rules for Argo CD server cluster role
-  ## @param server.clusterRoleRules Use custom rules for server's cluster role
-  ##
-  clusterRoleRules: []
-
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param server.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param server.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param server.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `server.pdb.minAvailable` and `server.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-## @section Argo CD repo server Parameters
-
-## Argo CD repository server configuration
-##
-repoServer:
-  ## @param repoServer.replicaCount Number of Argo CD repo server replicas to deploy
-  ##
-  replicaCount: 1
-  ## Configure extra options for Argo CD repo server containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param repoServer.startupProbe.enabled Enable startupProbe on Argo CD repo server nodes
-  ## @param repoServer.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param repoServer.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param repoServer.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param repoServer.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param repoServer.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param repoServer.livenessProbe.enabled Enable livenessProbe on Argo CD repo server nodes
-  ## @param repoServer.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param repoServer.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param repoServer.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param repoServer.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param repoServer.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param repoServer.readinessProbe.enabled Enable readinessProbe on Argo CD repo server nodes
-  ## @param repoServer.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param repoServer.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param repoServer.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param repoServer.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param repoServer.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param repoServer.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param repoServer.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param repoServer.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Argo CD repo server resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param repoServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if repoServer.resources is set (repoServer.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param repoServer.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param repoServer.podSecurityContext.enabled Enabled Argo CD repo server pods' Security Context
-  ## @param repoServer.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param repoServer.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param repoServer.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param repoServer.podSecurityContext.fsGroup Set Argo CD repo server pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param repoServer.containerSecurityContext.enabled Enabled Argo CD repo server containers' Security Context
-  ## @param repoServer.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param repoServer.containerSecurityContext.runAsUser Set Argo CD repo server containers' Security Context runAsUser
-  ## @param repoServer.containerSecurityContext.runAsGroup Set Argo CD repo server containers' Security Context runAsGroup
-  ## @param repoServer.containerSecurityContext.allowPrivilegeEscalation Set Argo CD repo server containers' Security Context allowPrivilegeEscalation
-  ## @param repoServer.containerSecurityContext.capabilities.drop Set Argo CD containers' repo server Security Context capabilities to be dropped
-  ## @param repoServer.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' repo server Security Context readOnlyRootFilesystem
-  ## @param repoServer.containerSecurityContext.runAsNonRoot Set Argo CD repo server containers' Security Context runAsNonRoot
-  ## @param repoServer.containerSecurityContext.privileged Set repoServer container's Security Context privileged
-  ## @param repoServer.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    privileged: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Repo server service parameters
-  ##
-  service:
-    ## @param repoServer.service.type Repo server service type
-    ##
-    type: ClusterIP
-    ## @param repoServer.service.ports.repoServer Repo server service port
-    ##
-    ports:
-      repoServer: 8081
-    ## Node ports to expose
-    ## @param repoServer.service.nodePorts.repoServer Node port for the repo server service
-    ## NOTE: choose port between <30000-32767>
-    ##
-    nodePorts:
-      repoServer: ""
-    ## @param repoServer.service.clusterIP Repo server service Cluster IP
-    ## e.g.:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param repoServer.service.loadBalancerIP Repo server service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerIP: ""
-    ## @param repoServer.service.loadBalancerSourceRanges Repo server service Load Balancer sources
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g:
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param repoServer.service.externalTrafficPolicy Repo server service external traffic policy
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param repoServer.service.annotations Additional custom annotations for Repo server service
-    ##
-    annotations: {}
-    ## @param repoServer.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param repoServer.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param repoServer.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    sessionAffinityConfig: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param repoServer.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param repoServer.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param repoServer.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param repoServer.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security)
-    ##
-    kubeAPIServerPorts: [443, 6443, 8443]
-    ## @param repoServer.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param repoServer.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true)
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param repoServer.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param repoServer.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## Argo CD repo server log format: text|json
-  ## @param repoServer.logFormat Format for the Argo CD repo server logs. Options: [text, json]
-  ##
-  logFormat: text
-  ## Argo CD application controller log level
-  ## @param repoServer.logLevel Log level for the Argo CD repo server
-  ##
-  logLevel: info
-  ## Argo CD repo server container port
-  ## @param repoServer.containerPorts.repoServer Container port for Argo CD repo server
-  ## @param repoServer.containerPorts.metrics Metrics port for Argo CD repo server
-  ##
-  containerPorts:
-    repoServer: 8081
-    metrics: 8084
-  ## Metrics configuration for Argo CD repo server
-  ##
-  metrics:
-    ## Enable metrics for Argo CD repo server
-    ## @param repoServer.metrics.enabled Enable metrics for the Argo CD repo server
-    ##
-    enabled: true
-    service:
-      ## @param repoServer.metrics.service.type Argo CD repo server service type
-      ##
-      type: ClusterIP
-      ## @param repoServer.metrics.service.ports.metrics Argo CD repo server metrics service port
-      ##
-      ports:
-        metrics: 8084
-      ## Node ports to expose
-      ## @param repoServer.metrics.service.nodePorts.metrics Node port for the repo server metrics service
-      ## NOTE: choose port between <30000-32767>
-      ##
-      nodePorts:
-        metrics: ""
-      ## @param repoServer.metrics.service.clusterIP Argo CD repo server metrics service Cluster IP
-      ## e.g.:
-      ## clusterIP: None
-      ##
-      clusterIP: ""
-      ## @param repoServer.metrics.service.loadBalancerIP Argo CD repo server service Load Balancer IP
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-      ##
-      loadBalancerIP: ""
-      ## @param repoServer.metrics.service.loadBalancerSourceRanges Argo CD repo server service Load Balancer sources
-      ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-      ## e.g:
-      ## loadBalancerSourceRanges:
-      ##   - 10.10.10.0/24
-      ##
-      loadBalancerSourceRanges: []
-      ## @param repoServer.metrics.service.externalTrafficPolicy Argo CD repo server service external traffic policy
-      ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-      ##
-      externalTrafficPolicy: Cluster
-      ## @param repoServer.metrics.service.annotations Additional custom annotations for Argo CD repo server service
-      ##
-      annotations: {}
-      ## @param repoServer.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-      ## If "ClientIP", consecutive client requests will be directed to the same Pod
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-      ##
-      sessionAffinity: None
-      ## @param repoServer.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
-      ## sessionAffinityConfig:
-      ##   clientIP:
-      ##     timeoutSeconds: 300
-      sessionAffinityConfig: {}
-    ## Argo CD repo server metrics service monitor configuration
-    ##
-    serviceMonitor:
-      ## @param repoServer.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-      ##
-      enabled: false
-      ## @param repoServer.metrics.serviceMonitor.namespace Namespace which Prometheus is running in
-      ## e.g:
-      ## namespace: monitoring
-      ##
-      namespace: ""
-      ## @param repoServer.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-      ##
-      jobLabel: ""
-      ## @param repoServer.metrics.serviceMonitor.interval Interval at which metrics should be scraped
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      interval: 30s
-      ## @param repoServer.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      scrapeTimeout: 10s
-      ## @param repoServer.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      ## @param repoServer.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      ## @param repoServer.metrics.serviceMonitor.selector ServiceMonitor selector labels
-      ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-      ##
-      ## selector:
-      ##   prometheus: my-prometheus
-      ##
-      selector: {}
-      ## @param repoServer.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
-      ##
-      honorLabels: false
-  ## Argo CD repo server deployment autoscaling
-  ## @param repoServer.autoscaling.enabled Enable Argo CD repo server deployment autoscaling
-  ## @param repoServer.autoscaling.minReplicas Argo CD repo server deployment autoscaling minimum number of replicas
-  ## @param repoServer.autoscaling.maxReplicas Argo CD repo server deployment autoscaling maximum number of replicas
-  ## @param repoServer.autoscaling.targetCPU Argo CD repo server deployment autoscaling target CPU percentage
-  ## @param repoServer.autoscaling.targetMemory Argo CD repo server deployment autoscaling target CPU memory
-  ##
-  autoscaling:
-    enabled: false
-    minReplicas: 1
-    maxReplicas: 5
-    targetCPU: 50
-    targetMemory: 50
-  ## ServiceAccount configuration for the Argo CD repo server
-  ##
-  serviceAccount:
-    ## @param repoServer.serviceAccount.create Specifies whether a ServiceAccount for repo server should be created
-    ##
-    create: true
-    ## @param repoServer.serviceAccount.name The name of the ServiceAccount for repo server to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param repoServer.serviceAccount.automountServiceAccountToken Automount service account token for the repo server service account
-    ##
-    automountServiceAccountToken: false
-    ## @param repoServer.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-    ##
-    annotations: {}
-  ## Enable admin clusterrole resources. Allows Argo CD repo server to have access to multiple namespaces
-  ## @param repoServer.clusterAdminAccess Enable K8s cluster admin access for the repo server
-  ##
-  clusterAdminAccess: false
-  ## Enable Custom Rules for Argo CD server cluster role
-  ## @param repoServer.clusterRoleRules Use custom rules for repo server's cluster role
-  ##
-  clusterRoleRules: []
-  ## @param repoServer.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param repoServer.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param repoServer.extraArgs Add extra args to the default repo server args
-  ##
-  extraArgs: []
-  ## @param repoServer.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: true
-  ## @param repoServer.hostAliases Argo CD repo server pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param repoServer.podLabels Extra labels for Argo CD repo server pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param repoServer.podAnnotations Annotations for Argo CD repo server pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param repoServer.podAffinityPreset Pod affinity preset. Ignored if `repoServer.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param repoServer.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `repoServer.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node repoServer.affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param repoServer.nodeAffinityPreset.type Node affinity preset type. Ignored if `repoServer.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param repoServer.nodeAffinityPreset.key Node label key to match. Ignored if `repoServer.affinity` is set
-    ##
-    key: ""
-    ## @param repoServer.nodeAffinityPreset.values Node label values to match. Ignored if `repoServer.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param repoServer.affinity Affinity for Argo CD repo server pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `repoServer.podAffinityPreset`, `repoServer.podAntiAffinityPreset`, and `repoServer.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param repoServer.nodeSelector Node labels for Argo CD repo server pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param repoServer.tolerations Tolerations for Argo CD repo server pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param repoServer.schedulerName Name of the k8s scheduler (other than default)
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param repoServer.shareProcessNamespace Enable shared process namespace in a pod.
-  ## If set to false (default), each container will run in separate namespace, repoServer will have PID=1.
-  ## If set to true, the /pause will run as init process and will reap any zombie PIDs,
-  ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds.
-  ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating.
-  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
-  ##
-  shareProcessNamespace: false
-  ## @param repoServer.topologySpreadConstraints Topology Spread Constraints for pod assignment
-  ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## The value is evaluated as a template
-  ##
-  topologySpreadConstraints: []
-  ## @param repoServer.updateStrategy.type Argo CD repo server statefulset strategy type
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    ## StrategyType
-    ## Can be set to RollingUpdate or OnDelete
-    ##
-    type: RollingUpdate
-  ## @param repoServer.priorityClassName Argo CD repo server pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param repoServer.runtimeClassName Name of the runtime class to be used by pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/
-  ##
-  runtimeClassName: ""
-  ## @param repoServer.lifecycleHooks for the Argo CD repo server container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param repoServer.extraEnvVars Array with extra environment variables to add to Argo CD repo server nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param repoServer.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD repo server nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param repoServer.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD repo server nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param repoServer.extraVolumes Optionally specify extra list of additional volumes for the Argo CD repo server pod(s)
-  ##
-  extraVolumes: []
-  ## @param repoServer.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD repo server container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param repoServer.sidecars Add additional sidecar containers to the Argo CD repo server pod(s)
-  ## e.g:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param repoServer.initContainers Add additional init containers to the Argo CD repo server pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-  ## e.g:
-  ## initContainers:
-  ##  - name: your-image-name
-  ##    image: your-image
-  ##    imagePullPolicy: Always
-  ##    command: ['sh', '-c', 'echo "hello world"']
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param repoServer.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param repoServer.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param repoServer.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `repoServer.pdb.minAvailable` and `repoServer.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-## @section Dex Parameters
-
-## Dex configuration
-##
-dex:
-  ## Bitnami Dex image
-  ## ref: https://hub.docker.com/r/bitnami/argo-cd/tags/
-  ## @param dex.image.registry [default: REGISTRY_NAME] Dex image registry
-  ## @param dex.image.repository [default: REPOSITORY_NAME/dex] Dex image repository
-  ## @skip dex.image.tag Dex image tag (immutable tags are recommended)
-  ## @param dex.image.digest Dex image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param dex.image.pullPolicy Dex image pull policy
-  ## @param dex.image.pullSecrets Dex image pull secrets
-  ## @param dex.image.debug Enable Dex image debug mode
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/dex
-    tag: 2.41.1-debian-12-r12
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## Enable Dex deployment
-  ## @param dex.enabled Enable the creation of a Dex deployment for SSO
-  ##
-  enabled: false
-  ## @param dex.replicaCount Number of Dex replicas to deploy
-  ##
-  replicaCount: 1
-  ## Configure extra options for Dex containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param dex.startupProbe.enabled Enable startupProbe on Dex nodes
-  ## @param dex.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param dex.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param dex.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param dex.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param dex.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param dex.livenessProbe.enabled Enable livenessProbe on Dex nodes
-  ## @param dex.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param dex.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param dex.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param dex.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param dex.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param dex.readinessProbe.enabled Enable readinessProbe on Dex nodes
-  ## @param dex.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param dex.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param dex.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param dex.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param dex.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## @param dex.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param dex.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param dex.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Dex resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param dex.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if dex.resources is set (dex.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param dex.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param dex.podSecurityContext.enabled Enabled Dex pods' Security Context
-  ## @param dex.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param dex.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param dex.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param dex.podSecurityContext.fsGroup Set Dex pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param dex.containerSecurityContext.enabled Enabled Dex containers' Security Context
-  ## @param dex.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param dex.containerSecurityContext.runAsUser Set Dex containers' Security Context runAsUser
-  ## @param dex.containerSecurityContext.runAsGroup Set Dex containers' Security Context runAsGroup
-  ## @param dex.containerSecurityContext.allowPrivilegeEscalation Set Dex containers' Security Context allowPrivilegeEscalation
-  ## @param dex.containerSecurityContext.readOnlyRootFilesystem Set Dex containers' server Security Context readOnlyRootFilesystem
-  ## @param dex.containerSecurityContext.runAsNonRoot Set Dex containers' Security Context runAsNonRoot
-  ## @param dex.containerSecurityContext.capabilities.drop Set Argo CD containers' repo server Security Context capabilities to be dropped
-  ## @param dex.containerSecurityContext.privileged Set dex container's Security Context privileged
-  ## @param dex.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    privileged: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Dex service parameters
-  ##
-  service:
-    ## @param dex.service.type Dex service type
-    ##
-    type: ClusterIP
-    ## @param dex.service.ports.http Dex HTTP service port
-    ## @param dex.service.ports.grpc Dex grpc service port
-    ##
-    ports:
-      http: 5556
-      grpc: 5557
-    ## Node ports to expose
-    ## @param dex.service.nodePorts.http HTTP node port for the Dex service
-    ## @param dex.service.nodePorts.grpc gRPC node port for the Dex service
-    ## NOTE: choose port between <30000-32767>
-    ##
-    nodePorts:
-      http: ""
-      grpc: ""
-    ## @param dex.service.clusterIP Dex service Cluster IP
-    ## e.g.:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param dex.service.loadBalancerIP Dex service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerIP: ""
-    ## @param dex.service.loadBalancerSourceRanges Dex service Load Balancer sources
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g:
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param dex.service.externalTrafficPolicy Dex service external traffic policy
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param dex.service.annotations Additional custom annotations for Dex service
-    ##
-    annotations: {}
-    ## @param dex.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param dex.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param dex.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    sessionAffinityConfig: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param dex.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param dex.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param dex.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param dex.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security)
-    ##
-    kubeAPIServerPorts: [443, 6443, 8443]
-    ## @param dex.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param dex.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true)
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param dex.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param dex.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## Dex container ports
-  ## @param dex.containerPorts.http Dex container HTTP port
-  ## @param dex.containerPorts.grpc Dex gRPC port
-  ## @param dex.containerPorts.metrics Dex metrics port
-  ##
-  containerPorts:
-    http: 5556
-    grpc: 5557
-    metrics: 5558
-  ## Metrics configuration for Dex
-  ##
-  metrics:
-    ## Enable metrics for Argo Dex
-    ## @param dex.metrics.enabled Enable metrics service for Dex
-    ##
-    enabled: false
-    service:
-      ## @param dex.metrics.service.type Dex service type
-      ##
-      type: ClusterIP
-      ## @param dex.metrics.service.ports.metrics Dex metrics service port
-      ##
-      ports:
-        metrics: 5558
-      ## Node ports to expose
-      ## @param dex.metrics.service.nodePorts.metrics Node port for the Dex service
-      ## NOTE: choose port between <30000-32767>
-      ##
-      nodePorts:
-        metrics: ""
-      ## @param dex.metrics.service.clusterIP Dex service metrics service Cluster IP
-      ## e.g.:
-      ## clusterIP: None
-      ##
-      clusterIP: ""
-      ## @param dex.metrics.service.loadBalancerIP Dex service Load Balancer IP
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-      ##
-      loadBalancerIP: ""
-      ## @param dex.metrics.service.loadBalancerSourceRanges Dex service Load Balancer sources
-      ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-      ## e.g:
-      ## loadBalancerSourceRanges:
-      ##   - 10.10.10.0/24
-      ##
-      loadBalancerSourceRanges: []
-      ## @param dex.metrics.service.externalTrafficPolicy Dex service external traffic policy
-      ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-      ##
-      externalTrafficPolicy: Cluster
-      ## @param dex.metrics.service.annotations Additional custom annotations for Dex service
-      ##
-      annotations: {}
-      ## @param dex.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-      ## If "ClientIP", consecutive client requests will be directed to the same Pod
-      ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-      ##
-      sessionAffinity: None
-      ## @param dex.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
-      ## sessionAffinityConfig:
-      ##   clientIP:
-      ##     timeoutSeconds: 300
-      sessionAffinityConfig: {}
-    ## Dex metrics service monitor configuration
-    ##
-    serviceMonitor:
-      ## @param dex.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-      ##
-      enabled: false
-      ## @param dex.metrics.serviceMonitor.namespace Namespace which Prometheus is running in
-      ## e.g:
-      ## namespace: monitoring
-      ##
-      namespace: ""
-      ## @param dex.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-      ##
-      jobLabel: ""
-      ## @param dex.metrics.serviceMonitor.interval Interval at which metrics should be scraped
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      interval: 30s
-      ## @param dex.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-      ##
-      scrapeTimeout: 10s
-      ## @param dex.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      ## @param dex.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      ## @param dex.metrics.serviceMonitor.selector ServiceMonitor selector labels
-      ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-      ##
-      ## selector:
-      ##   prometheus: my-prometheus
-      ##
-      selector: {}
-      ## @param dex.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels
-      ##
-      honorLabels: false
-  ## ServiceAccount configuration for the Dex
-  ##
-  serviceAccount:
-    ## @param dex.serviceAccount.create Specifies whether a ServiceAccount should be created for Dex
-    ##
-    create: true
-    ## @param dex.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param dex.serviceAccount.automountServiceAccountToken Automount service account token for the Dex service account
-    ##
-    automountServiceAccountToken: false
-    ## @param dex.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-    ##
-    annotations: {}
-  ## @param dex.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param dex.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param dex.extraArgs Add extra args to the default args for Dex
-  ##
-  extraArgs: []
-  ## @param dex.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: true
-  ## @param dex.hostAliases Dex pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param dex.podLabels Extra labels for Dex pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param dex.podAnnotations Annotations for Dex pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param dex.podAffinityPreset Pod affinity preset. Ignored if `dex.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param dex.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `dex.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node dex.affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param dex.nodeAffinityPreset.type Node affinity preset type. Ignored if `dex.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param dex.nodeAffinityPreset.key Node label key to match. Ignored if `dex.affinity` is set
-    ##
-    key: ""
-    ## @param dex.nodeAffinityPreset.values Node label values to match. Ignored if `dex.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param dex.affinity Affinity for Dex pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `dex.podAffinityPreset`, `dex.podAntiAffinityPreset`, and `dex.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param dex.nodeSelector Node labels for Dex pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param dex.tolerations Tolerations for Dex pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param dex.schedulerName Name of the k8s scheduler (other than default)
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param dex.shareProcessNamespace Enable shared process namespace in a pod.
-  ## If set to false (default), each container will run in separate namespace, dex will have PID=1.
-  ## If set to true, the /pause will run as init process and will reap any zombie PIDs,
-  ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds.
-  ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating.
-  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
-  ##
-  shareProcessNamespace: false
-  ## @param dex.topologySpreadConstraints Topology Spread Constraints for pod assignment
-  ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## The value is evaluated as a template
-  ##
-  topologySpreadConstraints: []
-  ## @param dex.updateStrategy.type Dex statefulset strategy type
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    ## StrategyType
-    ## Can be set to RollingUpdate or OnDelete
-    ##
-    type: RollingUpdate
-  ## @param dex.priorityClassName Dex pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param dex.runtimeClassName Name of the runtime class to be used by pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/
-  ##
-  runtimeClassName: ""
-  ## @param dex.lifecycleHooks for the Dex container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param dex.extraEnvVars Array with extra environment variables to add to Dex nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param dex.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Dex nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param dex.extraEnvVarsSecret Name of existing Secret containing extra env vars for Dex nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param dex.extraVolumes Optionally specify extra list of additional volumes for the Dex pod(s)
-  ##
-  extraVolumes: []
-  ## @param dex.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Dex container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param dex.sidecars Add additional sidecar containers to the Dex pod(s)
-  ## e.g:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param dex.initContainers Add additional init containers to the Dex pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-  ## e.g:
-  ## initContainers:
-  ##  - name: your-image-name
-  ##    image: your-image
-  ##    imagePullPolicy: Always
-  ##    command: ['sh', '-c', 'echo "hello world"']
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param dex.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param dex.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param dex.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `dex.pdb.minAvailable` and `dex.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-## @section Shared config for Argo CD components
-config:
-  ## @param config.knownHosts [string] Known hosts to be added to the known hosts list by default. Check the values to see the default value
-  ##
-  knownHosts: |
-    bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
-    github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
-    gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
-    gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
-    gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
-    ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
-    vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
-  ## @param config.extraKnownHosts Add extra known hosts to the known hosts list
-  ## E.g.:
-  ##   extraKnownHosts: |
-  ##     gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
-  ##     gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
-  ##
-  extraKnownHosts: ""
-  ## @param config.createExtraKnownHosts Whether to create or not the extra known hosts configmap
-  ##
-  createExtraKnownHosts: true
-  ## @param config.styles Custom CSS styles
-  ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/
-  ## E.g.:
-  ## styles: |
-  ##  .nav-bar {
-  ##    background: linear-gradient(to bottom, #999, #777, #333, #222, #111);
-  ##  }
-  ##
-  styles: ""
-  ## @param config.existingStylesConfigmap Use an existing styles configmap
-  ##
-  existingStylesConfigmap: ""
-  ## @param config.tlsCerts TLS certificates used to verify the authenticity of the repository servers
-  ## Certificates will be generated by default if the values are not set.
-  ## E.g:
-  ## tlsCerts:
-  ##   argocd-1.example.com: |
-  ##     -----BEGIN CERTIFICATE-----
-  ##     (...)
-  ##     -----END CERTIFICATE-----
-  ##   argocd-2.example.com: |
-  ##     -----BEGIN CERTIFICATE-----
-  ##     (...)
-  ##     -----END CERTIFICATE-----
-  ##
-  tlsCerts: {}
-  ## @param config.gpgKeys GnuPG public keys to add to the keyring
-  ## Keys will be generated by default if the values are not set.
-  ## Note: Public keys should be exported with `gpg --export --armor <KEY>`
-  ##
-  gpgKeys: {}
-  # 4AEE18F83AFDEB23: |
-  #   -----BEGIN PGP PUBLIC KEY BLOCK-----
-  #   ...
-  #   -----END PGP PUBLIC KEY BLOCK-----
-
-  ## @param config.rbac Role-based authentication configuration
-  ##
-  rbac: {}
-  # policy.default: role:readonly
-  # policy.csv: |
-  #   # Grant all members of the group 'my-org:team-alpha; the ability to sync apps in 'my-project'
-  #   p, my-org:team-alpha, applications, sync, my-project/*, allow
-  #   # Grant all members of 'my-org:team-beta' admins
-  #   g, my-org:team-beta, role:admin
-
-  ## Argo CD general secret configuration
-  ##
-  secret:
-    ## @param config.secret.create Whether to create or not the secret
-    ##
-    create: true
-    ## Annotations to be added to argocd-secret
-    ## @param config.secret.annotations General secret extra annotations
-    ##
-    annotations: {}
-    ## Webhook Configs
-    ## @param config.secret.githubSecret GitHub secret to configure webhooks
-    ## @param config.secret.gitlabSecret GitLab secret to configure webhooks
-    ## @param config.secret.bitbucketServerSecret BitBucket secret to configure webhooks
-    ## @param config.secret.bitbucketUUID BitBucket UUID to configure webhooks
-    ## @param config.secret.gogsSecret Gogs secret to configure webhooks
-    ##
-    githubSecret: ""
-    gitlabSecret: ""
-    bitbucketServerSecret: ""
-    bitbucketUUID: ""
-    gogsSecret: ""
-    ## Extra keys to add to the general config secret. Useful for injecting SSO secrets into environment variables.
-    ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sso
-    ## @param config.secret.extra Extra keys to add to the configuration secret.
-    ## All values must be non-empty.
-    ## E.g:
-    ##  LDAP_PASSWORD: "mypassword"
-    ##
-    extra: {}
-    ## Argo CD TLS Data.
-    ## @param config.secret.argocdServerTlsConfig.key TLS key for the Argo CD config secret
-    ## @param config.secret.argocdServerTlsConfig.crt TLS certificate for the Argo CD config secret
-    ## E.g:
-    ##   key:
-    ##   crt: |
-    ##     -----BEGIN CERTIFICATE-----
-    ##     <cert data>
-    ##     -----END CERTIFICATE-----
-    ##     -----BEGIN CERTIFICATE-----
-    ##     <ca cert data>
-    ##     -----END CERTIFICATE-----
-    ##
-    argocdServerTlsConfig:
-      key: ""
-      crt: ""
-    ## Argo admin password
-    ## @param config.secret.argocdServerAdminPassword Argo CD server admin password. Autogenerated by default.
-    ##
-    argocdServerAdminPassword: {{ .Values.globals.argocd.adminPassword }}
-    ## Password modification time defaults to current time if not set
-    ## @param config.secret.argocdServerAdminPasswordMtime Argo CD server password modification time
-    ## E.g:
-    ## argocdServerAdminPasswordMtime: "2006-01-02T15:04:05Z"
-    ##
-    argocdServerAdminPasswordMtime: ""
-    ## Create a secret with optional repository credentials
-    ## @param config.secret.repositoryCredentials Repository credentials to add to the Argo CD server confgi secret
-    ## E.g.
-    ## repositoryCredentials:
-    ##   sample-ssh-key: |
-    ##     REDACTED
-    ##     <key content>
-    ##     REDACTED
-    ##
-    repositoryCredentials: {}
-  ## External Cluster Credentials
-  ## Refs:
-  ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters
-  ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/security/#external-cluster-credentials
-  ## @param config.clusterCredentials Configure external cluster credentials
-  ## E.g
-  ##  - name: mycluster
-  ##   server: https://mycluster.com
-  ##   labels: {}
-  ##   annotations: {}
-  ##   config:
-  ##     bearerToken: "<authentication token>"
-  ##     tlsClientConfig:
-  ##       insecure: false
-  ##       caData: "<base64 encoded certificate>"
-  ## - name: mycluster2
-  ##   server: https://mycluster2.com
-  ##   labels: {}
-  ##   annotations: {}
-  ##   namespaces: namespace1,namespace2
-  ##   config:
-  ##     bearerToken: "<authentication token>"
-  ##     tlsClientConfig:
-  ##       insecure: false
-  ##       caData: "<base64 encoded certificate>"
-  ##
-  clusterCredentials: []
-## @section Init Container Parameters
-
-## 'volumePermissions' init container parameters
-## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
-##   based on the *podSecurityContext/*containerSecurityContext parameters
-##
-volumePermissions:
-  ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
-  ##
-  enabled: false
-  ## OS Shell + Utility image
-  ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/
-  ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry
-  ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository
-  ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended)
-  ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy
-  ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r35
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## Init container's resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Init container Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser
-  ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
-  ##   data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
-  ##   "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
-  ##
-  containerSecurityContext:
-    seLinuxOptions: {}
-    runAsUser: 0
-## @section Other Parameters
-
-## RBAC configuration
-##
-rbac:
-  ## @param rbac.create Specifies whether RBAC resources should be created
-  ##
-  create: true
-## Redis parameters
-##
-redis:
-  ## Bitnami Redis image
-  ## ref: https://hub.docker.com/r/bitnami/redis/tags/
-  ## @param redis.image.registry [default: REGISTRY_NAME] Redis image registry
-  ## @param redis.image.repository [default: REPOSITORY_NAME/redis] Redis image repository
-  ## @skip redis.image.tag Redis image tag (immutable tags are recommended)
-  ## @param redis.image.digest Redis image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param redis.image.pullPolicy Redis image pull policy
-  ## @param redis.image.pullSecrets Redis image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/redis
-    tag: 7.4.2-debian-12-r0
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## @param redis.enabled Enable Redis dependency
-  ##
-  enabled: false
-  ## @param redis.nameOverride Name override for the Redis dependency
-  ##
-  nameOverride: ""
-  ## @param redis.service.ports.redis Service port for Redis dependency
-  ##
-  service:
-    ports:
-      redis: 6379
-  ## Use password authentication
-  ## @param redis.auth.enabled Enable Redis dependency authentication
-  ## @param redis.auth.existingSecret Existing secret to load redis dependency password
-  ## @param redis.auth.existingSecretPasswordKey Pasword key name inside the existing secret
-  ##
-  auth:
-    enabled: true
-    ## Name of existing secret object containing the password
-    ##
-    existingSecret: ""
-    ##
-    ## Password key to be retrieved from Redis&reg; secret
-    ##
-    existingSecretPasswordKey: 'redis-password'
-  ## Cluster settings
-  ## @param redis.architecture Redis&reg; architecture. Allowed values: `standalone` or `replication`
-  ## TODO(miguelaeh): We need to test the chart with redis sentinel, it seems to be supported at: https://github.com/argoproj/argo-cd/blob/2a410187565e15633b6f2a8c8d8da22cf02b257d/util/cache/cache.go#L40
-  ##
-  architecture: standalone
-##
-## External Redis&reg;
-##
-externalRedis:
-  ## Can be enabled after redisWait.enabled and redis.enabled are set to false
-  ## @param externalRedis.enabled Enables External Redis
-  ##
-  enabled: true
-  ## Redis&reg; host
-  ## @param externalRedis.host External Redis host
-  ##
-  host: redis-master.{{ .Values.globals.redis.namespace }}.svc.cluster.local
-  ## Redis&reg; port
-  ## @param externalRedis.port External Redis port
-  ##
-  port: 6379
-  ## Redis&reg; password for authentication
-  ## Ignored if existingSecret is set
-  ## @param externalRedis.password External Redis password
-  ##
-  password: {{ .Values.globals.redis.password }}
-  ## Name of existing secret object containing the password
-  ## @param externalRedis.existingSecret Existing secret for the external redis
-  ##
-  existingSecret: ""
-  ## Password key to be retrieved from Redis&reg; secret
-  ## @param externalRedis.existingSecretPasswordKey Password key for the existing secret containing the external redis password
-  ##
-  existingSecretPasswordKey: 'redis-password'
-  ## Specify a label to use with the label selector
-  ## @param externalRedis.selector External Redis selector labels
-  ##
-  selector: {}
-  #
-  # selector:
-    # app.kubernetes.io/component: master
-    # app.kubernetes.io/instance: redis
-    # app.kubernetes.io/name: redis
-## Wait-for-redis init container configuration
-##
-redisWait:
-  ## @param redisWait.enabled Enables waiting for redis
-  ##
-  enabled: false
-  ## @param redisWait.extraArgs Additional arguments for the redis-cli call, such as TLS
-  ##
-  extraArgs: ''
-  ## @param redisWait.containerSecurityContext.enabled Enabled Argo CD repo server containers' Security Context
-  ## @param redisWait.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param redisWait.containerSecurityContext.runAsUser Set Argo CD repo server containers' Security Context runAsUser
-  ## @param redisWait.containerSecurityContext.runAsGroup Set Argo CD repo server containers' Security Context runAsGroup
-  ## @param redisWait.containerSecurityContext.allowPrivilegeEscalation Set Argo CD repo server containers' Security Context allowPrivilegeEscalation
-  ## @param redisWait.containerSecurityContext.capabilities.drop Set Argo CD containers' repo server Security Context capabilities to be dropped
-  ## @param redisWait.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' repo server Security Context readOnlyRootFilesystem
-  ## @param redisWait.containerSecurityContext.runAsNonRoot Set Argo CD repo server containers' Security Context runAsNonRoot
-  ## @param redisWait.containerSecurityContext.privileged Set redisWait container's Security Context privileged
-  ## @param redisWait.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    privileged: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-
diff --git a/k8s/helmfile.d/values/authentik/values.yaml.gotmpl b/k8s/helmfile.d/values/authentik/values.yaml.gotmpl
deleted file mode 100644
index 7846656..0000000
--- a/k8s/helmfile.d/values/authentik/values.yaml.gotmpl
+++ /dev/null
@@ -1,1036 +0,0 @@
----
-# -- Provide a name in place of `authentik`. Prefer using global.nameOverride if possible
-nameOverride: ""
-# -- String to fully override `"authentik.fullname"`. Prefer using global.fullnameOverride if possible
-fullnameOverride: ""
-# -- Override the Kubernetes version, which is used to evaluate certain manifests
-kubeVersionOverride: ""
-
-
-## Globally shared configuration for authentik components.
-global:
-  # -- Provide a name in place of `authentik`
-  nameOverride: ""
-  # -- String to fully override `"authentik.fullname"`
-  fullnameOverride: ""
-  # -- A custom namespace to override the default namespace for the deployed resources.
-  namespaceOverride: ""
-  # -- Common labels for all resources.
-  additionalLabels: {}
-    # app: authentik
-
-  # Number of old deployment ReplicaSets to retain. The rest will be garbage collected.
-  revisionHistoryLimit: 3
-
-  # Default image used by all authentik components. For GeoIP configuration, see the geoip values below.
-  image:
-    # -- If defined, a repository applied to all authentik deployments
-    repository: ghcr.io/goauthentik/server
-    # -- Overrides the global authentik whose default is the chart appVersion
-    tag: ""
-    # -- If defined, an image digest applied to all authentik deployments
-    digest: ""
-    # -- If defined, an imagePullPolicy applied to all authentik deployments
-    pullPolicy: IfNotPresent
-
-  # -- Secrets with credentials to pull images from a private registry
-  imagePullSecrets: []
-
-  # -- Annotations for all deployed Deployments
-  deploymentAnnotations: {}
-
-  # -- Annotations for all deployed pods
-  podAnnotations: {}
-
-  # -- Annotations for all deployed secrets
-  secretAnnotations: {}
-
-  # -- Labels for all deployed pods
-  podLabels: {}
-
-  # -- Add Prometheus scrape annotations to all metrics services. This can be used as an alternative to the ServiceMonitors.
-  addPrometheusAnnotations: false
-
-  # -- Toggle and define pod-level security context.
-  # @default -- `{}` (See [values.yaml])
-  securityContext: {}
-    # runAsUser: 1000
-    # runAsGroup: 1000
-    # fsGroup: 1000
-
-  # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files
-  hostAliases: []
-    # - ip: 10.20.30.40
-    #   hostnames:
-    #     - my.hostname
-
-  # -- Default priority class for all components
-  priorityClassName: ""
-
-  # -- Default node selector for all components
-  nodeSelector: {}
-
-  # -- Default tolerations for all components
-  tolerations: []
-
-  # Default affinity preset for all components
-  affinity:
-    # -- Default pod anti-affinity rules. Either: `none`, `soft` or `hard`
-    podAntiAffinity: soft
-    # Node affinity rules
-    nodeAffinity:
-      # -- Default node affinity rules. Either `none`, `soft` or `hard`
-      type: hard
-      # -- Default match expressions for node affinity
-      matchExpressions: []
-        # - key: topology.kubernetes.io/zone
-        #   operator: In
-        #   values:
-        #     - zonea
-        #     - zoneb
-
-  # -- Default [TopologySpreadConstraints] rules for all components
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  topologySpreadConstraints: []
-    # - maxSkew: 1
-    #   topologyKey: topology.kubernetes.io/zone
-    #   whenUnsatisfiable: DoNotSchedule
-
-  # -- Deployment strategy for all deployed Deployments
-  deploymentStrategy: {}
-    # type: RollingUpdate
-    # rollingUpdate:
-    #   maxSurge: 25%
-    #   maxUnavailable: 25%
-
-  # -- Environment variables to pass to all deployed Deployments. Does not apply to GeoIP
-  # See configuration options at https://goauthentik.io/docs/installation/configuration/
-  # @default -- `[]` (See [values.yaml])
-  env: []
-    # - name: AUTHENTIK_VAR_NAME
-    #   value: VALUE
-    # - name: AUTHENTIK_VAR_OTHER
-    #   valueFrom:
-    #     secretKeyRef:
-    #       name: secret-name
-    #       key: secret-key
-    # - name: AUTHENTIK_VAR_ANOTHER
-    #   valueFrom:
-    #     configMapKeyRef:
-    #       name: config-map-name
-    #       key: config-map-key
-
-  # -- envFrom to pass to all deployed Deployments. Does not apply to GeoIP
-  # @default -- `[]` (See [values.yaml])
-  envFrom: []
-    # - configMapRef:
-    #     name: config-map-name
-    # - secretRef:
-    #     name: secret-name
-
-  # -- Additional volumeMounts to all deployed Deployments. Does not apply to GeoIP
-  # @default -- `[]` (See [values.yaml])
-  volumeMounts: []
-    # - name: custom
-    #   mountPath: /custom
-
-  # -- Additional volumes to all deployed Deployments.
-  # @default -- `[]` (See [values.yaml])
-  volumes: []
-    # - name: custom
-    #   emptyDir: {}
-
-
-## Authentik configuration
-authentik:
-  # -- Log level for server and worker
-  log_level: info
-  # -- Secret key used for cookie singing and unique user IDs,
-  # don't change this after the first install
-  secret_key: {{ .Values.globals.authentik.secretKey }}
-  events:
-    context_processors:
-      # -- Path for the GeoIP City database. If the file doesn't exist, GeoIP features are disabled.
-      geoip: /geoip/GeoLite2-City.mmdb
-      # -- Path for the GeoIP ASN database. If the file doesn't exist, GeoIP features are disabled.
-      asn: /geoip/GeoLite2-ASN.mmdb
-  email:
-    # -- SMTP Server emails are sent from, fully optional
-    host: ""
-    # -- SMTP server port
-    port: 587
-    # -- SMTP credentials, when left empty, no authentication will be done
-    username: ""
-    # -- SMTP credentials, when left empty, no authentication will be done
-    password: ""
-    # -- Enable either use_tls or use_ssl, they can't be enabled at the same time.
-    use_tls: false
-    # -- Enable either use_tls or use_ssl, they can't be enabled at the same time.
-    use_ssl: false
-    # -- Connection timeout
-    timeout: 30
-    # -- Email from address, can either be in the format "foo@bar.baz" or "authentik <foo@bar.baz>"
-    from: ""
-  outposts:
-    # -- Template used for managed outposts. The following placeholders can be used
-    # %(type)s - the type of the outpost
-    # %(version)s - version of your authentik install
-    # %(build_hash)s - only for beta versions, the build hash of the image
-    container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s
-  error_reporting:
-    # -- This sends anonymous usage-data, stack traces on errors and
-    # performance data to sentry.beryju.org, and is fully opt-in
-    enabled: false
-    # -- This is a string that is sent to sentry with your error reports
-    environment: "k8s"
-    # -- Send PII (Personally identifiable information) data to sentry
-    send_pii: false
-  postgresql:
-    # -- set the postgresql hostname to talk to
-    # if unset and .Values.postgresql.enabled == true, will generate the default
-    # @default -- `{{ .Release.Name }}-postgresql`
-    host: "postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local"
-    # -- postgresql Database name
-    # @default -- `authentik`
-    name: "{{ .Values.globals.authentik.postgres.database }}"
-    # -- postgresql Username
-    # @default -- `authentik`
-    user: "{{ .Values.globals.authentik.postgres.username }}"
-    password: "{{ .Values.globals.authentik.postgres.password }}"
-    port: 5432
-  redis:
-    # -- set the redis hostname to talk to
-    # @default -- `{{ .Release.Name }}-redis-master`
-    host: "redis-master.{{ .Values.globals.redis.namespace }}.svc.cluster.local"
-    password: "{{ .Values.globals.redis.password }}"
-
-
-blueprints:
-  # -- List of config maps to mount blueprints from.
-  # Only keys in the configMap ending with `.yaml` will be discovered and applied.
-  configMaps: []
-  # -- List of secrets to mount blueprints from.
-  # Only keys in the secret ending with `.yaml` will be discovered and applied.
-  secrets: []
-
-
-## authentik server
-server:
-  # -- authentik server name
-  name: server
-
-  # -- The number of server pods to run
-  replicas: 1
-
-  ## authentik server Horizontal Pod Autoscaler
-  autoscaling:
-    # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik server
-    enabled: false
-    # -- Minimum number of replicas for the authentik server [HPA]
-    minReplicas: 1
-    # -- Maximum number of replicas for the authentik server [HPA]
-    maxReplicas: 5
-    # -- Average CPU utilization percentage for the authentik server [HPA]
-    targetCPUUtilizationPercentage: 50
-    # -- Average memory utilization percentage for the authentik server [HPA]
-    targetMemoryUtilizationPercentage: ~
-    # -- Configures the scaling behavior of the target in both Up and Down directions.
-    behavior: {}
-      # scaleDown:
-      #   stabilizationWindowSeconds: 300
-      #   policies:
-      #     - type: Pods
-      #       value: 1
-      #       periodSeconds: 180
-      # scaleUp:
-      #   stabilizationWindowSeconds: 300
-      #   policies:
-      #     - type: Pods
-      #       value: 2
-      #       periodSeconds: 60
-    # -- Configures custom HPA metrics for the authentik server
-    # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
-    metrics: []
-
-  ## authentik server Pod Disruption Budget
-  ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
-  pdb:
-    # -- Deploy a [PodDistrubtionBudget] for the authentik server
-    enabled: false
-    # -- Labels to be added to the authentik server pdb
-    labels: {}
-    # -- Annotations to be added to the authentik server pdb
-    annotations: {}
-    # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
-    # @default -- `""` (defaults to 0 if not specified)
-    minAvailable: ""
-    # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%)
-    ## Has higher precedence over `server.pdb.minAvailable`
-    maxUnavailable: ""
-
-  ## authentik server image
-  ## This should match what is deployed in the worker. Prefer using global.image
-  image:
-    # -- Repository to use to the authentik server
-    # @default -- `""` (defaults to global.image.repository)
-    repository: "" # defaults to global.image.repository
-    # -- Tag to use to the authentik server
-    # @default -- `""` (defaults to global.image.tag)
-    tag: "" # defaults to global.image.tag
-    # -- Digest to use to the authentik server
-    # @default -- `""` (defaults to global.image.digest)
-    digest: "" # defaults to global.image.digest
-    # -- Image pull policy to use to the authentik server
-    # @default -- `""` (defaults to global.image.pullPolicy)
-    pullPolicy: "" # defaults to global.image.pullPolicy
-
-  # -- Secrets with credentials to pull images from a private registry
-  # @default -- `[]` (defaults to global.imagePullSecrets)
-  imagePullSecrets: []
-
-  # -- Environment variables to pass to the authentik server. Does not apply to GeoIP
-  # See configuration options at https://goauthentik.io/docs/installation/configuration/
-  # @default -- `[]` (See [values.yaml])
-  env: []
-    # - name: AUTHENTIK_VAR_NAME
-    #   value: VALUE
-    # - name: AUTHENTIK_VAR_OTHER
-    #   valueFrom:
-    #     secretKeyRef:
-    #       name: secret-name
-    #       key: secret-key
-    # - name: AUTHENTIK_VAR_ANOTHER
-    #   valueFrom:
-    #     configMapKeyRef:
-    #       name: config-map-name
-    #       key: config-map-key
-
-  # -- envFrom to pass to the authentik server. Does not apply to GeoIP
-  # @default -- `[]` (See [values.yaml])
-  envFrom: []
-    # - configMapRef:
-    #     name: config-map-name
-    # - secretRef:
-    #     name: secret-name
-
-  # -- Specify postStart and preStop lifecycle hooks for you authentik server container
-  lifecycle: {}
-
-  # -- Additional containers to be added to the authentik server pod
-  ## Note: Supports use of custom Helm templates
-  extraContainers: []
-  # - name: my-sidecar
-  #   image: nginx:latest
-
-  # -- Init containers to add to the authentik server pod
-  ## Note: Supports use of custom Helm templates
-  initContainers: []
-  # - name: download-tools
-  #   image: alpine:3
-  #   command: [sh, -c]
-  #   args:
-  #     - echo init
-
-  # -- Additional volumeMounts to the authentik server main container
-  volumeMounts: []
-    # - name: custom
-    #   mountPath: /custom
-
-  # -- Additional volumes to the authentik server pod
-  volumes: []
-    # - name: custom
-    #   emptyDir: {}
-
-  # -- Annotations to be added to the authentik server Deployment
-  deploymentAnnotations: {}
-
-  # -- Annotations to be added to the authentik server pods
-  podAnnotations: {}
-
-  # -- Labels to be added to the authentik server pods
-  podLabels: {}
-
-  # -- Resource limits and requests for the authentik server
-  resources: {}
-    # requests:
-    #   cpu: 100m
-    #   memory: 512Mi
-    # limits:
-    #   memory: 512Mi
-
-  # authentik server container ports
-  containerPorts:
-    # -- http container port
-    http: 9000
-    # -- https container port
-    https: 9443
-    # -- metrics container port
-    metrics: 9300
-
-  # -- Host Network for authentik server pods
-  hostNetwork: false
-
-  # -- [DNS configuration]
-  dnsConfig: {}
-  # -- Alternative DNS policy for authentik server pods
-  dnsPolicy: ""
-
-  # -- serviceAccount to use for authentik server pods
-  serviceAccountName: ~
-
-  # -- authentik server pod-level security context
-  # @default -- `{}` (See [values.yaml])
-  securityContext: {}
-    # runAsUser: 1000
-    # runAsGroup: 1000
-    # fsGroup: 1000
-
-  # -- authentik server container-level security context
-  # @default -- See [values.yaml]
-  containerSecurityContext: {}
-    # Not all of the following has been tested. Use at your own risk.
-    # runAsNonRoot: true
-    # readOnlyRootFilesystem: true
-    # allowPrivilegeEscalation: false
-    # seccomProfile:
-    #   type: RuntimeDefault
-    # capabilities:
-    #   drop:
-    #     - ALL
-
-  ## Liveness, readiness and startup probes for authentik server
-  ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
-  livenessProbe:
-    # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
-    failureThreshold: 3
-    # -- Number of seconds after the container has started before [probe] is initiated
-    initialDelaySeconds: 5
-    # -- How often (in seconds) to perform the [probe]
-    periodSeconds: 10
-    # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
-    successThreshold: 1
-    # -- Number of seconds after which the [probe] times out
-    timeoutSeconds: 1
-    ## Probe configuration
-    httpGet:
-      path: /-/health/live/
-      port: http
-
-  readinessProbe:
-    # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
-    failureThreshold: 3
-    # -- Number of seconds after the container has started before [probe] is initiated
-    initialDelaySeconds: 5
-    # -- How often (in seconds) to perform the [probe]
-    periodSeconds: 10
-    # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
-    successThreshold: 1
-    # -- Number of seconds after which the [probe] times out
-    timeoutSeconds: 1
-    ## Probe configuration
-    httpGet:
-      path: /-/health/ready/
-      port: http
-
-  startupProbe:
-    # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
-    failureThreshold: 60
-    # -- Number of seconds after the container has started before [probe] is initiated
-    initialDelaySeconds: 5
-    # -- How often (in seconds) to perform the [probe]
-    periodSeconds: 10
-    # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
-    successThreshold: 1
-    # -- Number of seconds after which the [probe] times out
-    timeoutSeconds: 1
-    ## Probe configuration
-    httpGet:
-      path: /-/health/live/
-      port: http
-
-  # -- terminationGracePeriodSeconds for container lifecycle hook
-  terminationGracePeriodSeconds: 30
-
-  # -- Prority class for the authentik server pods
-  # @default -- `""` (defaults to global.priorityClassName)
-  priorityClassName: ""
-
-  # -- [Node selector]
-  # @default -- `{}` (defaults to global.nodeSelector)
-  nodeSelector: {}
-
-  # -- [Tolerations] for use with node taints
-  # @default -- `[]` (defaults to global.tolerations)
-  tolerations: []
-
-  # -- Assign custom [affinity] rules to the deployment
-  # @default -- `{}` (defaults to the global.affinity preset)
-  affinity: {}
-
-  # -- Assign custom [TopologySpreadConstraints] rules to the authentik server
-  # @default -- `[]` (defaults to global.topologySpreadConstraints)
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
-  topologySpreadConstraints: []
-    # - maxSkew: 1
-    #   topologyKey: topology.kubernetes.io/zone
-    #   whenUnsatisfiable: DoNotSchedule
-
-  # -- Deployment strategy to be added to the authentik server Deployment
-  # @default -- `{}` (defaults to global.deploymentStrategy)
-  deploymentStrategy: {}
-    # type: RollingUpdate
-    # rollingUpdate:
-    #   maxSurge: 25%
-    #   maxUnavailable: 25%
-
-  ## authentik server service configuration
-  service:
-    # -- authentik server service annotations
-    annotations: {}
-    # -- authentik server service labels
-    labels: {}
-    # -- authentik server service type
-    type: ClusterIP
-    # -- authentik server service http port for NodePort service type (only if `server.service.type` is set to `NodePort`)
-    nodePortHttp: 30080
-    # -- authentik server service https port for NodePort service type (only if `server.service.type` is set to `NodePort`)
-    nodePortHttps: 30443
-    # -- authentik server service http port
-    servicePortHttp: 80
-    # -- authentik server service https port
-    servicePortHttps: 443
-    # -- authentik server service http port name
-    servicePortHttpName: http
-    # -- authentik server service https port name
-    servicePortHttpsName: https
-    # -- authentik server service http port appProtocol
-    # servicePortHttpAppProtocol: HTTP
-    # -- authentik server service https port appProtocol
-    # servicePortHttpsAppProtocol: HTTPS
-    # -- LoadBalancer will get created with the IP specified in this field
-    loadBalancerIP: ""
-    # -- Source IP ranges to allow access to service from
-    loadBalancerSourceRanges: []
-    # -- authentik server service external IPs
-    externalIPs: []
-    # -- Denotes if this service desires to route external traffic to node-local or cluster-wide endpoints
-    externalTrafficPolicy: ""
-    # -- Used to maintain session affinity. Supports `ClientIP` and `None`
-    sessionAffinity: ""
-    # -- Session affinity configuration
-    sessionAffinityConfig: {}
-
-  ## authentik server metrics service configuration
-  metrics:
-    # -- deploy metrics service
-    enabled: true
-    service:
-      # -- metrics service type
-      type: ClusterIP
-      # -- metrics service clusterIP. `None` makes a "headless service" (no virtual IP)
-      clusterIP: ""
-      # -- metrics service annotations
-      annotations: {}
-      # -- metrics service labels
-      labels: {}
-      # -- metrics service port
-      servicePort: 9300
-      # -- metrics service port name
-      portName: metrics
-    serviceMonitor:
-      # -- enable a prometheus ServiceMonitor
-      enabled: false
-      # -- Prometheus ServiceMonitor interval
-      interval: 30s
-      # -- Prometheus ServiceMonitor scrape timeout
-      scrapeTimeout: 3s
-      # -- Prometheus [RelabelConfigs] to apply to samples before scraping
-      relabelings: []
-      # -- Prometheus [MetricsRelabelConfigs] to apply to samples before ingestion
-      metricRelabelings: []
-      # -- Prometheus ServiceMonitor selector
-      selector: {}
-        # prometheus: kube-prometheus
-
-      # -- Prometheus ServiceMonitor scheme
-      scheme: ""
-      # -- Prometheus ServiceMonitor tlsConfig
-      tlsConfig: {}
-      # -- Prometheus ServiceMonitor namespace
-      namespace: ""
-      # -- Prometheus ServiceMonitor labels
-      labels: {}
-      # -- Prometheus ServiceMonitor annotations
-      annotations: {}
-
-  ingress:
-    # -- enable an ingress resource for the authentik server
-    enabled: true
-    # -- additional ingress annotations
-    annotations:
-      kubernetes.io/ingress.class: {{ .Values.globals.authentik.ingressClass }}
-      cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-    # -- additional ingress labels
-    labels: {}
-    # -- defines which ingress controller will implement the resource
-    ingressClassName: "{{ .Values.globals.authentik.ingressClass }}"
-    # -- List of ingress hosts
-    hosts:
-      {{- range .Values.globals.authentik.hostnames }}
-      - {{ . }}
-      {{- end }}
-
-    # -- List of ingress paths
-    paths:
-      - /
-    # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
-    pathType: Prefix
-    # -- additional ingress paths
-    extraPaths: []
-      # - path: /*
-      #   pathType: Prefix
-      #   backend:
-      #     service:
-      #       name: ssl-redirect
-      #       port:
-      #         name: use-annotation
-
-    # -- ingress TLS configuration
-    tls:
-      - secretName: authentik-tls
-        hosts:
-          {{- range .Values.globals.authentik.hostnames }}
-          - {{ . }}
-          {{- end }}
-
-    # -- uses `server.service.servicePortHttps` instead of `server.service.servicePortHttp`
-    https: false
-
-
-## authentik worker
-worker:
-  # -- authentik worker name
-  name: worker
-
-  # -- The number of worker pods to run
-  replicas: 1
-
-  ## authentik worker Horizontal Pod Autoscaler
-  autoscaling:
-    # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik worker
-    enabled: false
-    # -- Minimum number of replicas for the authentik worker [HPA]
-    minReplicas: 1
-    # -- Maximum number of replicas for the authentik worker [HPA]
-    maxReplicas: 5
-    # -- Average CPU utilization percentage for the authentik worker [HPA]
-    targetCPUUtilizationPercentage: 50
-    # -- Average memory utilization percentage for the authentik worker [HPA]
-    targetMemoryUtilizationPercentage: ~
-    # -- Configures the scaling behavior of the target in both Up and Down directions.
-    behavior: {}
-      # scaleDown:
-      #   stabilizationWindowSeconds: 300
-      #   policies:
-      #     - type: Pods
-      #       value: 1
-      #       periodSeconds: 180
-      # scaleUp:
-      #   stabilizationWindowSeconds: 300
-      #   policies:
-      #     - type: Pods
-      #       value: 2
-      #       periodSeconds: 60
-    # -- Configures custom HPA metrics for the authentik worker
-    # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
-    metrics: []
-
-  ## authentik worker Pod Disruption Budget
-  ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
-  pdb:
-    # -- Deploy a [PodDistrubtionBudget] for the authentik worker
-    enabled: false
-    # -- Labels to be added to the authentik worker pdb
-    labels: {}
-    # -- Annotations to be added to the authentik worker pdb
-    annotations: {}
-    # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
-    # @default -- `""` (defaults to 0 if not specified)
-    minAvailable: ""
-    # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%)
-    ## Has higher precedence over `worker.pdb.minAvailable`
-    maxUnavailable: ""
-
-  ## authentik worker image
-  ## This should match what is deployed in the server. Prefer using global.image
-  image:
-    # -- Repository to use to the authentik worker
-    # @default -- `""` (defaults to global.image.repository)
-    repository: "" # defaults to global.image.repository
-    # -- Tag to use to the authentik worker
-    # @default -- `""` (defaults to global.image.tag)
-    tag: "" # defaults to global.image.tag
-    # -- Digest to use to the authentik worker
-    # @default -- `""` (defaults to global.image.digest)
-    digest: "" # defaults to global.image.digest
-    # -- Image pull policy to use to the authentik worker
-    # @default -- `""` (defaults to global.image.pullPolicy)
-    pullPolicy: "" # defaults to global.image.pullPolicy
-
-  # -- Secrets with credentials to pull images from a private registry
-  # @default -- `[]` (defaults to global.imagePullSecrets)
-  imagePullSecrets: []
-
-  # -- Environment variables to pass to the authentik worker. Does not apply to GeoIP
-  # See configuration options at https://goauthentik.io/docs/installation/configuration/
-  # @default -- `[]` (See [values.yaml])
-  env: []
-    # - name: AUTHENTIK_VAR_NAME
-    #   value: VALUE
-    # - name: AUTHENTIK_VAR_OTHER
-    #   valueFrom:
-    #     secretKeyRef:
-    #       name: secret-name
-    #       key: secret-key
-    # - name: AUTHENTIK_VAR_ANOTHER
-    #   valueFrom:
-    #     configMapKeyRef:
-    #       name: config-map-name
-    #       key: config-map-key
-
-  # -- envFrom to pass to the authentik worker. Does not apply to GeoIP
-  # @default -- `[]` (See [values.yaml])
-  envFrom: []
-    # - configMapRef:
-    #     name: config-map-name
-    # - secretRef:
-    #     name: secret-name
-
-  # -- Specify postStart and preStop lifecycle hooks for you authentik worker container
-  lifecycle: {}
-
-  # -- Additional containers to be added to the authentik worker pod
-  ## Note: Supports use of custom Helm templates
-  extraContainers: []
-  # - name: my-sidecar
-  #   image: nginx:latest
-
-  # -- Init containers to add to the authentik worker pod
-  ## Note: Supports use of custom Helm templates
-  initContainers: []
-  # - name: download-tools
-  #   image: alpine:3
-  #   command: [sh, -c]
-  #   args:
-  #     - echo init
-
-  # -- Additional volumeMounts to the authentik worker main container
-  volumeMounts: []
-    # - name: custom
-    #   mountPath: /custom
-
-  # -- Additional volumes to the authentik worker pod
-  volumes: []
-    # - name: custom
-    #   emptyDir: {}
-
-  # -- Annotations to be added to the authentik worker Deployment
-  deploymentAnnotations: {}
-
-  # -- Annotations to be added to the authentik worker pods
-  podAnnotations: {}
-
-  # -- Labels to be added to the authentik worker pods
-  podLabels: {}
-
-  # -- Resource limits and requests for the authentik worker
-  resources: {}
-    # requests:
-    #   cpu: 100m
-    #   memory: 512Mi
-    # limits:
-    #   memory: 512Mi
-
-  # -- Host Network for authentik worker pods
-  hostNetwork: false
-
-  # -- [DNS configuration]
-  dnsConfig: {}
-  # -- Alternative DNS policy for authentik worker pods
-  dnsPolicy: ""
-
-  # -- serviceAccount to use for authentik worker pods. If set, overrides the value used when serviceAccount.create is true
-  serviceAccountName: ~
-
-  # -- authentik worker pod-level security context
-  # @default -- `{}` (See [values.yaml])
-  securityContext: {}
-    # runAsUser: 1000
-    # runAsGroup: 1000
-    # fsGroup: 1000
-
-  # -- authentik worker container-level security context
-  # @default -- See [values.yaml]
-  containerSecurityContext: {}
-    # Not all of the following has been tested. Use at your own risk.
-    # runAsNonRoot: true
-    # readOnlyRootFilesystem: true
-    # allowPrivilegeEscalation: false
-    # seccomProfile:
-    #   type: RuntimeDefault
-    # capabilities:
-    #   drop:
-    #     - ALL
-
-  livenessProbe:
-    # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
-    failureThreshold: 3
-    # -- Number of seconds after the container has started before [probe] is initiated
-    initialDelaySeconds: 5
-    # -- How often (in seconds) to perform the [probe]
-    periodSeconds: 10
-    # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
-    successThreshold: 1
-    # -- Number of seconds after which the [probe] times out
-    timeoutSeconds: 1
-    ## Probe configuration
-    exec:
-      command:
-        - ak
-        - healthcheck
-
-  readinessProbe:
-    # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
-    failureThreshold: 3
-    # -- Number of seconds after the container has started before [probe] is initiated
-    initialDelaySeconds: 5
-    # -- How often (in seconds) to perform the [probe]
-    periodSeconds: 10
-    # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
-    successThreshold: 1
-    # -- Number of seconds after which the [probe] times out
-    timeoutSeconds: 1
-    ## Probe configuration
-    exec:
-      command:
-        - ak
-        - healthcheck
-
-  startupProbe:
-    # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
-    failureThreshold: 60
-    # -- Number of seconds after the container has started before [probe] is initiated
-    initialDelaySeconds: 30
-    # -- How often (in seconds) to perform the [probe]
-    periodSeconds: 10
-    # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
-    successThreshold: 1
-    # -- Number of seconds after which the [probe] times out
-    timeoutSeconds: 1
-    ## Probe configuration
-    exec:
-      command:
-        - ak
-        - healthcheck
-
-  # -- terminationGracePeriodSeconds for container lifecycle hook
-  terminationGracePeriodSeconds: 30
-
-  # -- Prority class for the authentik worker pods
-  # @default -- `""` (defaults to global.priorityClassName)
-  priorityClassName: ""
-
-  # -- [Node selector]
-  # @default -- `{}` (defaults to global.nodeSelector)
-  nodeSelector: {}
-
-  # -- [Tolerations] for use with node taints
-  # @default -- `[]` (defaults to global.tolerations)
-  tolerations: []
-
-  # -- Assign custom [affinity] rules to the deployment
-  # @default -- `{}` (defaults to the global.affinity preset)
-  affinity: {}
-
-  # -- Assign custom [TopologySpreadConstraints] rules to the authentik worker
-  # @default -- `[]` (defaults to global.topologySpreadConstraints)
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
-  topologySpreadConstraints: []
-    # - maxSkew: 1
-    #   topologyKey: topology.kubernetes.io/zone
-    #   whenUnsatisfiable: DoNotSchedule
-
-  # -- Deployment strategy to be added to the authentik worker Deployment
-  # @default -- `{}` (defaults to global.deploymentStrategy)
-  deploymentStrategy: {}
-    # type: RollingUpdate
-    # rollingUpdate:
-    #   maxSurge: 25%
-    #   maxUnavailable: 25%
-
-
-serviceAccount:
-  # -- Create service account. Needed for managed outposts
-  create: true
-  # -- additional service account annotations
-  annotations: {}
-  serviceAccountSecret:
-    # As we use the authentik-remote-cluster chart as subchart, and that chart
-    # creates a service account secret by default which we don't need here,
-    # disable its creation
-    enabled: false
-  fullnameOverride: authentik
-
-
-geoip:
-  # -- enable GeoIP sidecars for the authentik server and worker pods
-  enabled: false
-
-  editionIds: "GeoLite2-City GeoLite2-ASN"
-  # -- GeoIP update frequency, in hours
-  updateInterval: 8
-  # -- sign up under https://www.maxmind.com/en/geolite2/signup
-  accountId: ""
-  # -- sign up under https://www.maxmind.com/en/geolite2/signup
-  licenseKey: ""
-  ## use existing secret instead of values above
-  existingSecret:
-    # -- name of an existing secret to use instead of values above
-    secretName: ""
-    # -- key in the secret containing the account ID
-    accountId: "account_id"
-    # -- key in the secret containing the license key
-    licenseKey: "license_key"
-
-  image:
-    # -- If defined, a repository for GeoIP images
-    repository: ghcr.io/maxmind/geoipupdate
-    # -- If defined, a tag for GeoIP images
-    tag: v7.1.0
-    # -- If defined, an image digest for GeoIP images
-    digest: ""
-    # -- If defined, an imagePullPolicy for GeoIP images
-    pullPolicy: IfNotPresent
-
-  # -- Environment variables to pass to the GeoIP containers
-  # @default -- `[]` (See [values.yaml])
-  env: []
-    # - name: GEOIPUPDATE_VAR_NAME
-    #   value: VALUE
-    # - name: GEOIPUPDATE_VAR_OTHER
-    #   valueFrom:
-    #     secretKeyRef:
-    #       name: secret-name
-    #       key: secret-key
-    # - name: GEOIPUPDATE_VAR_ANOTHER
-    #   valueFrom:
-    #     configMapKeyRef:
-    #       name: config-map-name
-    #       key: config-map-key
-
-  # -- envFrom to pass to the GeoIP containers
-  # @default -- `[]` (See [values.yaml])
-  envFrom: []
-    # - configMapRef:
-    #     name: config-map-name
-    # - secretRef:
-    #     name: secret-name
-
-  # -- Additional volumeMounts to the GeoIP containers. Make sure the volumes exists for the server and the worker.
-  volumeMounts: []
-    # - name: custom
-    #   mountPath: /custom
-
-  # -- Resource limits and requests for GeoIP containers
-  resources: {}
-    # requests:
-    #   cpu: 100m
-    #   memory: 128Mi
-    # limits:
-    #   memory: 128Mi
-
-  # -- GeoIP container-level security context
-  # @default -- See [values.yaml]
-  containerSecurityContext: {}
-    # Not all of the following has been tested. Use at your own risk.
-    # runAsNonRoot: true
-    # readOnlyRootFilesystem: true
-    # allowPrivilegeEscalation: false
-    # seccomProfile:
-    #   type: RuntimeDefault
-    # capabilities:
-    #   drop:
-    #     - ALL
-
-
-prometheus:
-  rules:
-    enabled: false
-    # -- PrometheusRule namespace
-    namespace: ""
-    # -- PrometheusRule selector
-    selector: {}
-      # prometheus: kube-prometheus
-
-    # -- PrometheusRule labels
-    labels: {}
-    # -- PrometheusRule annotations
-    annotations: {}
-
-
-postgresql:
-  # -- enable the Bitnami PostgreSQL chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/postgresql/ for possible values.
-  enabled: false
-  image:
-    repository: bitnami/postgresql
-    tag: 15.8.0-debian-12-r18
-  auth:
-    username: authentik
-    database: authentik
-    # password: ""
-  primary:
-    extendedConfiguration: |
-      max_connections = 500
-    resourcesPreset: "none"
-    # persistence:
-    #   enabled: true
-    #   storageClass:
-    #   accessModes:
-    #     - ReadWriteOnce
-  readReplicas:
-    resourcesPreset: "none"
-  backup:
-    resourcesPreset: "none"
-  passwordUpdateJob:
-    resourcesPreset: "none"
-  volumePermissions:
-    resourcesPreset: "none"
-  metrics:
-    resourcesPreset: "none"
-
-
-redis:
-  # -- enable the Bitnami Redis chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/redis/ for possible values.
-  enabled: false
-  architecture: standalone
-  auth:
-    enabled: false
-  master:
-    resourcesPreset: "none"
-  replica:
-    resourcesPreset: "none"
-  sentinel:
-    resourcesPreset: "none"
-  metrics:
-    resourcesPreset: "none"
-  volumePermissions:
-    resourcesPreset: "none"
-  sysctl:
-    resourcesPreset: "none"
-
-
-# -- additional resources to deploy. Those objects are templated.
-additionalObjects: []
-
diff --git a/k8s/helmfile.d/values/cert-manager/values.yml b/k8s/helmfile.d/values/cert-manager/values.yml
deleted file mode 100644
index fe409db..0000000
--- a/k8s/helmfile.d/values/cert-manager/values.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-crds:
-  enabled: true
-replicaCount: 3
-extraArgs:
-  - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53
-  - --dns01-recursive-nameservers-only
-podDnsPolicy: None
-podDnsConfig:
-  nameservers:
-    - 1.1.1.1
-    - 9.9.9.9
diff --git a/k8s/helmfile.d/values/certs/values.yaml.gotmpl b/k8s/helmfile.d/values/certs/values.yaml.gotmpl
deleted file mode 100644
index a85c518..0000000
--- a/k8s/helmfile.d/values/certs/values.yaml.gotmpl
+++ /dev/null
@@ -1,28 +0,0 @@
-acmeEmail: {{ .Values.globals.certs.acmeEmail }}
-cloudflareEmail: {{ .Values.globals.certs.cloudflareEmail }}
-
-# staging or production
-issuerMode: {{ .Values.globals.certs.certIssuerMode }}
-
-issuerName: {{ .Values.globals.certs.issuerName }}
-privateKeySecretRef: {{ .Values.globals.certs.privateKeySecretRef }}
-
-certManagerNamespace: {{ .Values.globals.certManager.namespace }}
-
-cloudflareSecretToken: {{ .Values.globals.certs.cloudflareSecretToken }}
-cloudflareTokenSecretName: {{ .Values.globals.certs.cloudflareTokenSecretName }}
-
-dnsZones:
-  {{- range .Values.globals.certs.hlMnkeOrg.dnsZones }}
-  - {{ . | quote }}
-  {{- end}}
-
-certificateName: {{ .Values.globals.certs.hlMnkeOrg.certificateName }}
-certificateSecretName: {{ .Values.globals.certs.hlMnkeOrg.certificateSecretName }}
-certificateNamespace: {{ .Values.globals.certs.hlMnkeOrg.certificateNamespace }}
-
-commonName: {{ .Values.globals.certs.hlMnkeOrg.commonName }}
-dnsNames:
-  {{- range .Values.globals.certs.hlMnkeOrg.dnsNames }}
-  - {{ . | quote }}
-  {{- end}}
diff --git a/k8s/helmfile.d/values/ghost/values.yaml.gotmpl b/k8s/helmfile.d/values/ghost/values.yaml.gotmpl
deleted file mode 100644
index 940b09b..0000000
--- a/k8s/helmfile.d/values/ghost/values.yaml.gotmpl
+++ /dev/null
@@ -1,876 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
-
-## @param global.imageRegistry Global Docker image registry
-## @param global.imagePullSecrets Global Docker registry secret names as an array
-## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-##
-global:
-  imageRegistry: ""
-  ## E.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  defaultStorageClass: {{ .Values.globals.ghost.storageClass }}
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: auto
-## @section Common parameters
-
-## @param kubeVersion Override Kubernetes version
-##
-kubeVersion: ""
-## @param nameOverride String to partially override common.names.fullname
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override common.names.fullname
-##
-fullnameOverride: ""
-## @param commonLabels Labels to add to all deployed objects
-##
-commonLabels: {}
-## @param commonAnnotations Annotations to add to all deployed objects
-##
-commonAnnotations: {}
-## @param clusterDomain Kubernetes cluster domain name
-##
-clusterDomain: cluster.local
-## @param extraDeploy Array of extra objects to deploy with the release
-##
-extraDeploy: []
-## Enable diagnostic mode in the deployment
-##
-diagnosticMode:
-  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
-  ##
-  enabled: false
-  ## @param diagnosticMode.command Command to override all containers in the deployment
-  ##
-  command:
-    - sleep
-  ## @param diagnosticMode.args Args to override all containers in the deployment
-  ##
-  args:
-    - infinity
-## @section Ghost Image parameters
-
-## Bitnami Ghost image
-## ref: https://hub.docker.com/r/bitnami/ghost/tags/
-## @param image.registry [default: REGISTRY_NAME] Ghost image registry
-## @param image.repository [default: REPOSITORY_NAME/ghost] Ghost image repository
-## @skip image.tag Ghost image tag (immutable tags are recommended)
-## @param image.digest Ghost image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-## @param image.pullPolicy Ghost image pull policy
-## @param image.pullSecrets Ghost image pull secrets
-## @param image.debug Enable image debug mode
-##
-image:
-  registry: docker.io
-  repository: bitnami/ghost
-  tag: 5.108.1-debian-12-r0
-  digest: ""
-  ## Specify a imagePullPolicy
-  ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ## e.g:
-  ## pullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  pullSecrets: []
-  ## Enable debug mode
-  ##
-  debug: true
-## @section Ghost Configuration parameters
-## Ghost settings based on environment variables
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost#configuration
-
-## @param ghostUsername Ghost user name
-##
-ghostUsername: user
-## @param ghostPassword Ghost user password
-## Defaults to a random 10-character alphanumeric string if not set
-##
-ghostPassword: "{{ .Values.globals.ghost.ghostPassword }}"
-## @param existingSecret Name of existing secret containing Ghost credentials
-## NOTE: Must contain key `ghost-password`
-## NOTE: When it's set, the `ghostPassword` parameter is ignored
-##
-existingSecret: ""
-## @param ghostEmail Ghost user email
-##
-ghostEmail: {{ .Values.globals.ghost.ghostEmail }}
-## @param ghostBlogTitle Ghost Blog title
-##
-ghostBlogTitle: User's Blog
-## @param ghostHost Ghost host to create application URLs
-##
-ghostHost: {{ .Values.globals.ghost.primaryHost }}
-## @param ghostPath URL sub path where to server the Ghost application
-##
-ghostPath: /
-## @param ghostEnableHttps Configure Ghost to build application URLs using https
-##
-## This turns on whether URLs like the home page button use HTTPS URL schemes.
-## If you turn this on, then shit will break because it seems like
-## the reverse proxy (or any client) encounters a 301 redirect to https from
-## ghost, but then ghost doesn't actually serve HTTPS so the reverse proxy
-## ends up 501 internal server error'ing. We _do_ want to have HTTPS URLS in
-## Ghost, so we need to turn this on, and for some ABSOLUTELY UNGODLY REASON,
-## if I turn this on (and thereby making every request 501 error) AND THEN turn
-## it off again (allowing normal traffic again), we continue generating HTTPS
-## URLs. I'm going to fucking punch my monitor.
-##
-## - Tony
-ghostEnableHttps: false
-## SMTP mail delivery configuration
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost/#smtp-configuration
-## @param smtpHost SMTP server host
-## @param smtpPort SMTP server port
-## @param smtpUser SMTP username
-## @param smtpPassword SMTP user password
-## @param smtpService SMTP service
-## @param smtpProtocol SMTP protocol (ssl or tls)
-##
-smtpHost: ""
-smtpPort: ""
-smtpUser: ""
-smtpPassword: ""
-smtpService: ""
-smtpProtocol: ""
-## @param smtpExistingSecret The name of an existing secret with SMTP credentials
-## NOTE: Must contain key `smtp-password`
-## NOTE: When it's set, the `smtpPassword` parameter is ignored
-##
-smtpExistingSecret: ""
-## @param allowEmptyPassword Allow the container to be started with blank passwords
-##
-allowEmptyPassword: false
-## @param ghostSkipInstall Skip performing the initial bootstrapping for Ghost
-##
-ghostSkipInstall: false
-## @param command Override default container command (useful when using custom images)
-##
-command: []
-## @param args Override default container args (useful when using custom images)
-##
-args: []
-## @param extraEnvVars Array with extra environment variables to add to the Ghost container
-## e.g:
-## extraEnvVars:
-##   - name: FOO
-##     value: "bar"
-##
-extraEnvVars: []
-## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars
-##
-extraEnvVarsCM: ""
-## @param extraEnvVarsSecret Name of existing Secret containing extra env vars
-##
-extraEnvVarsSecret: ""
-## @section Ghost deployment parameters
-
-## @param replicaCount Number of Ghost replicas to deploy
-## NOTE: ReadWriteMany PVC(s) are required if replicaCount > 1
-##
-replicaCount: 1
-## @param updateStrategy.type Ghost deployment strategy type
-## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
-## NOTE: Set it to `Recreate` if you use a PV that cannot be mounted on multiple pods
-## e.g:
-## updateStrategy:
-##  type: RollingUpdate
-##  rollingUpdate:
-##    maxSurge: 25%
-##    maxUnavailable: 25%
-##
-updateStrategy:
-  type: RollingUpdate
-## @param priorityClassName Ghost pod priority class name
-##
-priorityClassName: ""
-## @param schedulerName Name of the k8s scheduler (other than default)
-## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-##
-schedulerName: ""
-## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
-## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-## The value is evaluated as a template
-##
-topologySpreadConstraints: []
-## @param automountServiceAccountToken Mount Service Account token in pod
-##
-automountServiceAccountToken: false
-## @param hostAliases Ghost pod host aliases
-## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-##
-hostAliases: []
-## @param extraVolumes Optionally specify extra list of additional volumes for Ghost pods
-##
-extraVolumes: []
-## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for Ghost container(s)
-##
-extraVolumeMounts: []
-## @param sidecars Add additional sidecar containers to the Ghost pod
-## e.g:
-## sidecars:
-##   - name: your-image-name
-##     image: your-image
-##     imagePullPolicy: Always
-##     ports:
-##       - name: portname
-##         containerPort: 1234
-##
-sidecars: []
-## @param initContainers Add additional init containers to the Ghost pods
-## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-## e.g:
-## initContainers:
-##  - name: your-image-name
-##    image: your-image
-##    imagePullPolicy: Always
-##    ports:
-##      - name: portname
-##        containerPort: 1234
-##
-initContainers: []
-## Pod Disruption Budget configuration
-## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-## @param pdb.create Enable/disable a Pod Disruption Budget creation
-## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
-##
-pdb:
-  create: true
-  minAvailable: ""
-  maxUnavailable: ""
-## @param lifecycleHooks Add lifecycle hooks to the Ghost deployment
-##
-lifecycleHooks: {}
-## @param podLabels Extra labels for Ghost pods
-## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-##
-podLabels: {}
-## @param podAnnotations Annotations for Ghost pods
-## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-##
-podAnnotations: {}
-## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-##
-podAffinityPreset: ""
-## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-##
-podAntiAffinityPreset: soft
-## Node affinity preset
-## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-##
-nodeAffinityPreset:
-  ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-  ##
-  type: ""
-  ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
-  ##
-  key: ""
-  ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
-  ## E.g.
-  ## values:
-  ##   - e2e-az1
-  ##   - e2e-az2
-  ##
-  values: []
-## @param affinity Affinity for pod assignment
-## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
-##
-affinity: {}
-## @param nodeSelector Node labels for pod assignment
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-##
-nodeSelector: {}
-## @param tolerations Tolerations for pod assignment
-## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-##
-tolerations: []
-## Ghost containers' resource requests and limits
-## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
-## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-##
-resourcesPreset: "medium"
-## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-## Example:
-## resources:
-##   requests:
-##     cpu: 2
-##     memory: 512Mi
-##   limits:
-##     cpu: 3
-##     memory: 1024Mi
-##
-resources: {}
-## Container ports
-## @param containerPorts.http Ghost HTTP container port
-## @param containerPorts.https Ghost HTTPS container port
-##
-containerPorts:
-  http: 2368
-  https: 2368
-## @param extraContainerPorts Optionally specify extra list of additional ports for WordPress container(s)
-## e.g:
-## extraContainerPorts:
-##   - name: myservice
-##     containerPort: 9090
-##
-extraContainerPorts: []
-## Configure Pods Security Context
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-## @param podSecurityContext.enabled Enabled Ghost pods' Security Context
-## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
-## @param podSecurityContext.supplementalGroups Set filesystem extra groups
-## @param podSecurityContext.fsGroup Set Ghost pod's Security Context fsGroup
-##
-podSecurityContext:
-  enabled: true
-  fsGroupChangePolicy: Always
-  sysctls: []
-  supplementalGroups: []
-  fsGroup: 1001
-## Configure Container Security Context (only main container)
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-## @param containerSecurityContext.enabled Enabled containers' Security Context
-## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-## @param containerSecurityContext.privileged Set container's Security Context privileged
-## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
-## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-containerSecurityContext:
-  enabled: true
-  seLinuxOptions: {}
-  runAsUser: 1001
-  runAsGroup: 1001
-  runAsNonRoot: true
-  privileged: false
-  readOnlyRootFilesystem: true
-  allowPrivilegeEscalation: false
-  # capabilities:
-    # drop: ["ALL"]
-  seccompProfile:
-    type: "RuntimeDefault"
-## Configure extra options for Ghost containers' liveness, readiness and startup probes
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-## @param startupProbe.enabled Enable startupProbe
-## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-## @param startupProbe.periodSeconds Period seconds for startupProbe
-## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
-## @param startupProbe.failureThreshold Failure threshold for startupProbe
-## @param startupProbe.successThreshold Success threshold for startupProbe
-##
-startupProbe:
-  enabled: false
-  initialDelaySeconds: 120
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 6
-  successThreshold: 1
-## @param livenessProbe.enabled Enable livenessProbe
-## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-## @param livenessProbe.periodSeconds Period seconds for livenessProbe
-## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
-## @param livenessProbe.successThreshold Success threshold for livenessProbe
-##
-livenessProbe:
-  enabled: true
-  initialDelaySeconds: 120
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 6
-  successThreshold: 1
-## @param readinessProbe.enabled Enable readinessProbe
-## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-## @param readinessProbe.periodSeconds Period seconds for readinessProbe
-## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
-## @param readinessProbe.successThreshold Success threshold for readinessProbe
-##
-readinessProbe:
-  enabled: true
-  initialDelaySeconds: 30
-  periodSeconds: 5
-  timeoutSeconds: 3
-  failureThreshold: 6
-  successThreshold: 1
-## @param customLivenessProbe Custom livenessProbe that overrides the default one
-##
-customLivenessProbe: {}
-## @param customReadinessProbe Custom readinessProbe that overrides the default one
-#
-# The default httpGet probe fails, and I think it's because of this:
-# - We enabled ghostEnableHttps
-# - The httpGet probe probes for http://xyz:2368, which then redirects to
-#   https://xyz:2368
-# - However, Ghost itself does not provide TLS. That option just makes HTTP
-#   redirect to HTTPS
-# - The probe is now expecting TLS, but Ghost is still sending regular HTTP
-#   and the probe thus fails
-#
-# So we're just gonna do a TCP port check. The alternative is curl'ing and
-# expecting a 301 response, but that doesn't seem much better than the TCP
-# check, especially since it's so simple.
-customReadinessProbe:
-  exec:
-    command:
-      - "true"
-  # tcpSocket:
-    # port: 2368
-
-## @section Traffic Exposure Parameters
-
-## Ghost service parameters
-##
-service:
-  ## @param service.type Ghost service type
-  ##
-  type: ClusterIP
-  ## @param service.ports.http Ghost service HTTP port
-  ## @param service.ports.https Ghost service HTTPS port
-  ##
-  ports:
-    http: 80
-    https: 443
-  ## Node ports to expose
-  ## @param service.nodePorts.http Node port for HTTP
-  ## @param service.nodePorts.https Node port for HTTPS
-  ## NOTE: choose port between <30000-32767>
-  ##
-  nodePorts:
-    http: ""
-    https: ""
-  ## @param service.clusterIP Ghost service Cluster IP
-  ## e.g.:
-  ## clusterIP: None
-  ##
-  clusterIP: ""
-  ## @param service.loadBalancerIP Ghost service Load Balancer IP
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-  ##
-  loadBalancerIP: ""
-  ## @param service.loadBalancerSourceRanges Ghost service Load Balancer sources
-  ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-  ## e.g:
-  ## loadBalancerSourceRanges:
-  ##   - 10.10.10.0/24
-  ##
-  loadBalancerSourceRanges: []
-  ## @param service.externalTrafficPolicy Ghost service external traffic policy
-  ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-  ##
-  externalTrafficPolicy: Cluster
-  ## @param service.annotations Additional custom annotations for Ghost service
-  ##
-  annotations: {}
-  ## @param service.extraPorts Extra port to expose on Ghost service
-  ##
-  extraPorts: []
-  ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-  ## If "ClientIP", consecutive client requests will be directed to the same Pod
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-  ##
-  sessionAffinity: None
-  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
-  ## sessionAffinityConfig:
-  ##   clientIP:
-  ##     timeoutSeconds: 300
-  sessionAffinityConfig: {}
-## Configure the ingress resource that allows you to access the Ghost installation
-## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
-##
-ingress:
-  ## @param ingress.enabled Enable ingress record generation for Ghost
-  ##
-  enabled: true
-  ## @param ingress.pathType Ingress path type
-  ##
-  pathType: ImplementationSpecific
-  ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
-  ##
-  apiVersion: ""
-  ## @param ingress.hostname Default host for the ingress record
-  ##
-  hostname: {{ .Values.globals.ghost.primaryHost }}
-  ## @param ingress.path Default path for the ingress record
-  ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
-  ##
-  path: /
-  ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
-  ## For a full list of possible ingress annotations, please see
-  ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
-  ## Use this parameter to set the required annotations for cert-manager, see
-  ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
-  ##
-  ## e.g:
-  ## annotations:
-  ##   kubernetes.io/ingress.class: nginx
-  ##   cert-manager.io/cluster-issuer: cluster-issuer-name
-  ##
-  annotations:
-    kubernetes.io/ingress.class: {{ .Values.globals.ghost.ingressClass }}
-    cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-  ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
-  ## TLS certificates will be retrieved from a TLS secret with name: `\{\{- printf "%s-tls" .Values.ingress.hostname \}\}`
-  ## You can:
-  ##   - Use the `ingress.secrets` parameter to create this TLS secret
-  ##   - Rely on cert-manager to create it by setting the corresponding annotations
-  ##   - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
-  ##
-  tls: true
-  ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
-  ##
-  selfSigned: false
-  ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
-  ## e.g:
-  ## extraHosts:
-  ##   - name: ghost.local
-  ##     path: /
-  ##
-  extraHosts: []
-  ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
-  ## e.g:
-  ## extraPaths:
-  ## - path: /*
-  ##   backend:
-  ##     serviceName: ssl-redirect
-  ##     servicePort: use-annotation
-  ##
-  extraPaths: []
-  ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-  ## e.g:
-  ## extraTls:
-  ## - hosts:
-  ##     - ghost.local
-  ##   secretName: ghost.local-tls
-  ##
-  extraTls: []
-  ## @param ingress.secrets Custom TLS certificates as secrets
-  ## NOTE: 'key' and 'certificate' are expected in PEM format
-  ## NOTE: 'name' should line up with a 'secretName' set further up
-  ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
-  ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
-  ## It is also possible to create and manage the certificates outside of this helm chart
-  ## Please see README.md for more information
-  ## e.g:
-  ## secrets:
-  ##   - name: ghost.local-tls
-  ##     key: |-
-  ##       REDACTED
-  ##       ...
-  ##       REDACTED
-  ##     certificate: |-
-  ##       -----BEGIN CERTIFICATE-----
-  ##       ...
-  ##       -----END CERTIFICATE-----
-  ##
-  secrets: []
-  ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
-  ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
-  ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
-  ##
-  ingressClassName: {{ .Values.globals.ghost.ingressClass }}
-  ## @param ingress.extraRules Additional rules to be covered with this ingress record
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
-  ## e.g:
-  ## extraRules:
-  ## - host: example.local
-  ##     http:
-  ##       path: /
-  ##       backend:
-  ##         service:
-  ##           name: example-svc
-  ##           port:
-  ##             name: http
-  ##
-  extraRules: []
-## @section Persistence Parameters
-
-## Persistence Parameters
-## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-##
-persistence:
-  ## @param persistence.enabled Enable persistence using Persistent Volume Claims
-  ##
-  enabled: true
-  ## @param persistence.storageClass Persistent Volume storage class
-  ## If defined, storageClassName: <storageClass>
-  ## If set to "-", storageClassName: "", which disables dynamic provisioning
-  ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
-  ##
-  storageClass: ""
-  ## @param persistence.annotations Additional custom annotations for the PVC
-  ##
-  annotations: {}
-  ## @param persistence.accessModes [array] Persistent Volume access modes
-  ##
-  accessModes:
-    - ReadWriteMany
-  ## @param persistence.size Persistent Volume size
-  ##
-  size: 8Gi
-  ## @param persistence.existingClaim The name of an existing PVC to use for persistence
-  ##
-  existingClaim: ""
-  ## @param persistence.subPath The name of a volume's sub path to mount for persistence
-  ##
-  subPath: ""
-## 'volumePermissions' init container parameters
-## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
-##   based on the podSecurityContext/containerSecurityContext parameters
-##
-volumePermissions:
-  ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
-  ##
-  enabled: false
-  ## OS Shell + Utility image
-  ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/
-  ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry
-  ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository
-  ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended)
-  ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy
-  ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r35
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## Init container's resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "none"
-  ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Init container Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param volumePermissions.securityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param volumePermissions.securityContext.runAsUser Set init container's Security Context runAsUser
-  ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
-  ##   data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
-  ##   "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
-  ##
-  securityContext:
-    seLinuxOptions: {}
-    runAsUser: 0
-## @section Database Parameters
-
-## MySQL chart configuration
-## ref: https://github.com/bitnami/charts/blob/main/bitnami/mysql/values.yaml
-##
-mysql:
-  ## @param mysql.enabled Deploy a MySQL server to satisfy the applications database requirements
-  ## To use an external database set this to false and configure the `externalDatabase` parameters
-  ##
-  enabled: false
-  ## @param mysql.architecture MySQL architecture. Allowed values: `standalone` or `replication`
-  ##
-  architecture: standalone
-  ## MySQL Authentication parameters
-  ## @param mysql.auth.rootPassword MySQL root password
-  ## @param mysql.auth.database MySQL custom database
-  ## @param mysql.auth.username MySQL custom user name
-  ## @param mysql.auth.password MySQL custom user password
-  ## @param mysql.auth.existingSecret Existing secret with MySQL credentials
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-the-root-password-on-first-run
-  ##      https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-on-first-run
-  ##      https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-user-on-first-run
-  auth:
-    rootPassword: "password"
-    database: bitnami_ghost
-    username: bn_ghost
-    password: "password"
-    existingSecret: ""
-  ## MySQL Primary configuration
-  ##
-  primary:
-    ## MySQL Primary Persistence parameters
-    ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-    ## @param mysql.primary.persistence.enabled Enable persistence on MySQL using PVC(s)
-    ## @param mysql.primary.persistence.storageClass Persistent Volume storage class
-    ## @param mysql.primary.persistence.accessModes [array] Persistent Volume access modes
-    ## @param mysql.primary.persistence.size Persistent Volume size
-    ##
-    persistence:
-      enabled: true
-      storageClass: ""
-      accessModes:
-        - ReadWriteOnce
-      size: 8Gi
-    ## MySQL primary container's resource requests and limits
-    ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-    ## We usually recommend not to specify default resources and to leave this as a conscious
-    ## choice for the user. This also increases chances charts run on environments with little
-    ## resources, such as Minikube. If you do want to specify resources, uncomment the following
-    ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-    ## @param mysql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
-    ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-    ##
-    resourcesPreset: "small"
-    ## @param mysql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-    ## Example:
-    ## resources:
-    ##   requests:
-    ##     cpu: 2
-    ##     memory: 512Mi
-    ##   limits:
-    ##     cpu: 3
-    ##     memory: 1024Mi
-    ##
-    resources: {}
-## External Database Configuration
-## All of these values are only used if `mysql.enabled=false`
-##
-externalDatabase:
-  ## @param externalDatabase.host External Database server host
-  ##
-  host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local
-  ## @param externalDatabase.port External Database server port
-  ##
-  port: 3306
-  ## @param externalDatabase.user External Database username
-  ##
-  user: {{ .Values.globals.ghost.mysql.username }}
-  ## @param externalDatabase.password External Database user password
-  ##
-  password: {{ .Values.globals.ghost.mysql.password }}
-  ## @param externalDatabase.database External Database database name
-  ##
-  database: {{ .Values.globals.ghost.mysql.database }}
-  ## @param externalDatabase.existingSecret The name of an existing secret with database credentials
-  ## NOTE: Must contain key `mysql-password`
-  ## NOTE: When it's set, the `externalDatabase.password` parameter is ignored
-  ##
-  existingSecret: ""
-  ## @param externalDatabase.ssl External Database ssl
-  ##
-  ssl: false
-  ## @param externalDatabase.sslCaFile External Database ssl CA filepath
-  ##
-  sslCaFile: ""
-## @section NetworkPolicy parameters
-
-## Network Policy configuration
-## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-##
-networkPolicy:
-  ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-  ##
-  enabled: true
-  ## @param networkPolicy.allowExternal Don't require server label for connections
-  ## The Policy model to apply. When set to false, only pods with the correct
-  ## server label will have network access to the ports server is listening
-  ## on. When true, server will accept connections from any source
-  ## (with the correct destination port).
-  ##
-  allowExternal: true
-  ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-  ##
-  allowExternalEgress: true
-  ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraIngress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     from:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  extraIngress: []
-  ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraEgress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     to:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  ##
-  extraEgress: []
-  ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-  ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-  ##
-  ingressNSMatchLabels: {}
-  ingressNSPodMatchLabels: {}
-
-## Pods Service Account
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-## @param serviceAccount.create Specifies whether a ServiceAccount should be created
-## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template.
-## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account
-## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
-##
-serviceAccount:
-  create: true
-  name: ""
-  automountServiceAccountToken: false
-  annotations: {}
-
diff --git a/k8s/helmfile.d/values/gitea/values.yaml.gotmpl b/k8s/helmfile.d/values/gitea/values.yaml.gotmpl
deleted file mode 100644
index c688966..0000000
--- a/k8s/helmfile.d/values/gitea/values.yaml.gotmpl
+++ /dev/null
@@ -1,782 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
-##
-
-## @param global.imageRegistry Global Docker image registry
-## @param global.imagePullSecrets Global Docker registry secret names as an array
-## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
-##
-global:
-  imageRegistry: ""
-  ## E.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  defaultStorageClass: ""
-  storageClass: ""
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: auto
-## @section Common parameters
-##
-
-## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
-##
-kubeVersion: ""
-## @param nameOverride String to partially override gitea.fullname template (will maintain the release name)
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override gitea.fullname template
-##
-fullnameOverride: ""
-## @param namespaceOverride String to fully override common.names.namespace
-##
-namespaceOverride: ""
-## @param commonAnnotations Common annotations to add to all Gitea resources (sub-charts are not considered). Evaluated as a template
-##
-commonAnnotations: {}
-## @param commonLabels Common labels to add to all Gitea resources (sub-charts are not considered). Evaluated as a template
-##
-commonLabels: {}
-## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template).
-##
-extraDeploy: []
-## @section Gitea parameters
-##
-
-## Bitnami Gitea image version
-## ref: https://hub.docker.com/r/bitnami/gitea/tags/
-## @param image.registry [default: REGISTRY_NAME] Gitea image registry
-## @param image.repository [default: REPOSITORY_NAME/gitea] Gitea Image name
-## @skip image.tag Gitea Image tag
-## @param image.digest Gitea image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-## @param image.pullPolicy Gitea image pull policy
-## @param image.pullSecrets Specify docker-registry secret names as an array
-## @param image.debug Specify if debug logs should be enabled
-##
-image:
-  registry: docker.io
-  repository: bitnami/gitea
-  tag: 1.23.1-debian-12-r3
-  digest: ""
-  ## Specify a imagePullPolicy
-  ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ## e.g:
-  ## pullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  pullSecrets: []
-  ## Set to true if you would like to see extra information on logs
-  ##
-  debug: false
-## @param adminUsername User of the application
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
-##
-adminUsername: bn_user
-## @param adminPassword Application password
-## Defaults to a random 10-character alphanumeric string if not set
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
-##
-adminPassword: ""
-## @param adminEmail Admin email
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
-##
-adminEmail: user@example.com
-## @param appName Gitea application name
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
-##
-appName: example
-## @param runMode Gitea application host
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
-##
-runMode: prod
-## @param exposeSSH Make the SSH server accesible
-##
-exposeSSH: true
-## @param rootURL UI Root URL (for link generation)
-##
-rootURL: ""
-## @param command Override default container command (useful when using custom images)
-##
-command: []
-## @param args Override default container args (useful when using custom images)
-##
-args: []
-## @param updateStrategy.type Update strategy - only really applicable for deployments with RWO PVs attached
-## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-##
-updateStrategy:
-  type: RollingUpdate
-## @param priorityClassName Gitea pods' priorityClassName
-##
-priorityClassName: ""
-## @param schedulerName Name of the k8s scheduler (other than default)
-## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-##
-schedulerName: ""
-## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
-## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-## The value is evaluated as a template
-##
-topologySpreadConstraints: []
-## @param automountServiceAccountToken Mount Service Account token in pod
-##
-automountServiceAccountToken: false
-## @param hostAliases [array] Add deployment host aliases
-## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-##
-hostAliases: []
-## @param extraEnvVars Extra environment variables
-## For example:
-##
-extraEnvVars: []
-#  - name: BEARER_AUTH
-#    value: true
-## @param extraEnvVarsCM ConfigMap containing extra env vars
-##
-extraEnvVarsCM: ""
-## @param extraEnvVarsSecret Secret containing extra env vars (in case of sensitive data)
-##
-extraEnvVarsSecret: ""
-## @param extraVolumes Array of extra volumes to be added to the deployment (evaluated as template). Requires setting `extraVolumeMounts`
-##
-extraVolumes: []
-## @param extraVolumeMounts Array of extra volume mounts to be added to the container (evaluated as template). Normally used with `extraVolumes`.
-##
-extraVolumeMounts: []
-## @param initContainers Add additional init containers to the pod (evaluated as a template)
-##
-initContainers: []
-## Pod Disruption Budget configuration
-## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-## @param pdb.create Enable/disable a Pod Disruption Budget creation
-## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
-##
-pdb:
-  create: true
-  minAvailable: ""
-  maxUnavailable: ""
-## @param sidecars Attach additional containers to the pod (evaluated as a template)
-##
-sidecars: []
-## @param tolerations Tolerations for pod assignment
-## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-##
-tolerations: []
-## @param existingSecret Name of a secret with the application password
-##
-existingSecret: ""
-## @param existingSecretKey Key inside the existing secret containing the password
-##
-existingSecretKey: "admin-password"
-## SMTP mail delivery configuration
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea/#smtp-configuration
-## @param smtpHost SMTP host
-## @param smtpPort SMTP port
-## @param smtpUser SMTP user
-## @param smtpPassword SMTP password
-##
-smtpHost: ""
-smtpPort: ""
-smtpUser: ""
-smtpPassword: ""
-## @param smtpExistingSecret The name of an existing secret with SMTP credentials
-## NOTE: Must contain key `smtp-password`
-## NOTE: When it's set, the `smtpPassword` parameter is ignored
-##
-smtpExistingSecret: ""
-## @param containerPorts [object] Container ports
-##
-containerPorts:
-  http: 3000
-  ssh: 2222
-## @param extraContainerPorts Optionally specify extra list of additional ports for Gitea container(s)
-## e.g:
-## extraContainerPorts:
-##   - name: myservice
-##     containerPort: 9090
-##
-extraContainerPorts: []
-## Enable OpenID Configurations
-## @param openid.enableSignIn Enable sign in with OpenID
-## @param openid.enableSignUp Enable sign up with OpenID
-openid:
-  enableSignIn: false
-  enableSignUp: false
-## Enable persistence using Persistent Volume Claims
-## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-##
-persistence:
-  ## @param persistence.enabled Enable persistence using PVC
-  ##
-  enabled: true
-  ## @param persistence.storageClass PVC Storage Class for Gitea volume
-  ## If defined, storageClassName: <storageClass>
-  ## If set to "-", storageClassName: "", which disables dynamic provisioning
-  ## If undefined (the default) or set to null, no storageClassName spec is
-  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-  ##   GKE, AWS & OpenStack)
-  ##
-  storageClass: ""
-  ## @param persistence.accessModes PVC Access Mode for Gitea volume
-  ## Requires persistence.enabled: true
-  ## If defined, PVC must be created manually before volume will be bound
-  ##
-  accessModes:
-    - ReadWriteOnce
-  ## @param persistence.size PVC Storage Request for Gitea volume
-  ##
-  size: 8Gi
-  ## @param persistence.dataSource Custom PVC data source
-  ##
-  dataSource: {}
-  ## @param persistence.existingClaim A manually managed Persistent Volume Claim
-  ## Requires persistence.enabled: true
-  ## If defined, PVC must be created manually before volume will be bound
-  ##
-  existingClaim: ""
-  ## @param persistence.hostPath If defined, the gitea-data volume will mount to the specified hostPath.
-  ## Requires persistence.enabled: true
-  ## Requires persistence.existingClaim: nil|false
-  ## Default: nil.
-  ##
-  hostPath: ""
-  ## @param persistence.annotations Persistent Volume Claim annotations
-  ##
-  annotations: {}
-  ## @param persistence.selector Selector to match an existing Persistent Volume for Gitea data PVC
-  ## If set, the PVC can't have a PV dynamically provisioned for it
-  ## E.g.
-  ## selector:
-  ##   matchLabels:
-  ##     app: my-app
-  ##
-  selector: {}
-## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-##
-podAffinityPreset: ""
-## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-##
-podAntiAffinityPreset: soft
-## Node affinity preset
-## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
-## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
-##
-nodeAffinityPreset:
-  type: ""
-  ## E.g.
-  ## key: "kubernetes.io/e2e-az-name"
-  ##
-  key: ""
-  ## E.g.
-  ## values:
-  ##   - e2e-az1
-  ##   - e2e-az2
-  ##
-  values: []
-## @param affinity Affinity for pod assignment
-## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
-##
-affinity: {}
-## @param nodeSelector Node labels for pod assignment. Evaluated as a template.
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-##
-nodeSelector: {}
-## Gitea container's resource requests and limits
-## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
-## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-##
-resourcesPreset: "micro"
-## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-## Example:
-## resources:
-##   requests:
-##     cpu: 2
-##     memory: 512Mi
-##   limits:
-##     cpu: 3
-##     memory: 1024Mi
-##
-resources: {}
-## Configure Pods Security Context
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-## @param podSecurityContext.enabled Enable Gitea pods' Security Context
-## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
-## @param podSecurityContext.supplementalGroups Set filesystem extra groups
-## @param podSecurityContext.fsGroup Gitea pods' group ID
-##
-podSecurityContext:
-  enabled: true
-  fsGroupChangePolicy: Always
-  sysctls: []
-  supplementalGroups: []
-  fsGroup: 1001
-## Configure Container Security Context (only main container)
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-## @param containerSecurityContext.enabled Enabled containers' Security Context
-## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-## @param containerSecurityContext.privileged Set container's Security Context privileged
-## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
-## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-##
-containerSecurityContext:
-  enabled: true
-  seLinuxOptions: {}
-  runAsUser: 1001
-  runAsGroup: 1001
-  runAsNonRoot: true
-  privileged: false
-  readOnlyRootFilesystem: true
-  allowPrivilegeEscalation: false
-  capabilities:
-    drop: ["ALL"]
-  seccompProfile:
-    type: "RuntimeDefault"
-## Configure extra options for startup probe
-## Gitea core exposes / to unauthenticated requests, making it a good
-## default startup and readiness path. However, that may not always be the
-## case. For example, if the image value is overridden to an image containing a
-## module that alters that route, or an image that does not auto-install Gitea.
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-## @param startupProbe.enabled Enable startupProbe
-## @param startupProbe.path Request path for startupProbe
-## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-## @param startupProbe.periodSeconds Period seconds for startupProbe
-## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
-## @param startupProbe.failureThreshold Failure threshold for startupProbe
-## @param startupProbe.successThreshold Success threshold for startupProbe
-##
-startupProbe:
-  enabled: false
-  path: /
-  initialDelaySeconds: 600
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 5
-  successThreshold: 1
-## Configure extra options for liveness probe
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-## @param livenessProbe.enabled Enable livenessProbe
-## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-## @param livenessProbe.periodSeconds Period seconds for livenessProbe
-## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
-## @param livenessProbe.successThreshold Success threshold for livenessProbe
-##
-livenessProbe:
-  enabled: true
-  initialDelaySeconds: 600
-  periodSeconds: 10
-  timeoutSeconds: 5
-  failureThreshold: 5
-  successThreshold: 1
-## Configure extra options for readiness probe
-## Gitea core exposes / to unauthenticated requests, making it a good
-## default startup and readiness path. However, that may not always be the
-## case. For example, if the image value is overridden to an image containing a
-## module that alters that route, or an image that does not auto-install Gitea.
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-## @param readinessProbe.enabled Enable readinessProbe
-## @param readinessProbe.path Request path for readinessProbe
-## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-## @param readinessProbe.periodSeconds Period seconds for readinessProbe
-## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
-## @param readinessProbe.successThreshold Success threshold for readinessProbe
-##
-readinessProbe:
-  enabled: true
-  path: /
-  initialDelaySeconds: 30
-  periodSeconds: 5
-  timeoutSeconds: 1
-  failureThreshold: 5
-  successThreshold: 1
-## @param customStartupProbe Override default startup probe
-##
-customStartupProbe: {}
-## @param customLivenessProbe Override default liveness probe
-##
-customLivenessProbe: {}
-## @param customReadinessProbe Override default readiness probe
-##
-customReadinessProbe: {}
-## @param lifecycleHooks LifecycleHook to set additional configuration at startup Evaluated as a template
-##
-lifecycleHooks: {}
-## @param podAnnotations Pod annotations
-## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-##
-podAnnotations: {}
-## @param podLabels Add additional labels to the pod (evaluated as a template)
-## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-##
-podLabels: {}
-## @section Traffic Exposure Parameters
-##
-
-## Kubernetes configuration. For minikube, set this to NodePort, elsewhere use LoadBalancer
-##
-service:
-  ## @param service.type Kubernetes Service type
-  ##
-  type: LoadBalancer
-  ## @param service.ports.http Service HTTP port
-  ## @param service.ports.ssh Service SSH port
-  ##
-  ports:
-    http: 80
-    ssh: 22
-  ## @param service.loadBalancerSourceRanges Restricts access for LoadBalancer (only with `service.type: LoadBalancer`)
-  ## e.g:
-  ## loadBalancerSourceRanges:
-  ##   - 0.0.0.0/0
-  ##
-  loadBalancerSourceRanges: []
-  ## @param service.loadBalancerIP loadBalancerIP for the Gitea Service (optional, cloud specific)
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-  ##
-  loadBalancerIP: ""
-  ## @param service.nodePorts [object] Kubernetes node port
-  ## nodePorts:
-  ##   http: <to set explicitly, choose port between 30000-32767>
-  ##   https: <to set explicitly, choose port between 30000-32767>
-  ##
-  nodePorts:
-    http: ""
-    ssh: ""
-  ## @param service.externalTrafficPolicy Enable client source IP preservation
-  ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-  ##
-  externalTrafficPolicy: Cluster
-  ## @param service.clusterIP Gitea service Cluster IP
-  ## e.g.:
-  ## clusterIP: None
-  ##
-  clusterIP: ""
-  ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-  ##
-  extraPorts: []
-  ## @param service.annotations Additional custom annotations for Gitea service
-  ##
-  annotations: {}
-  ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-  ## If "ClientIP", consecutive client requests will be directed to the same Pod
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-  ##
-  sessionAffinity: None
-  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
-  ## sessionAffinityConfig:
-  ##   clientIP:
-  ##     timeoutSeconds: 300
-  ##
-  sessionAffinityConfig: {}
-
-## Network Policy configuration
-## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-##
-networkPolicy:
-  ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-  ##
-  enabled: true
-  ## @param networkPolicy.allowExternal Don't require server label for connections
-  ## The Policy model to apply. When set to false, only pods with the correct
-  ## server label will have network access to the ports server is listening
-  ## on. When true, server will accept connections from any source
-  ## (with the correct destination port).
-  ##
-  allowExternal: true
-  ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-  ##
-  allowExternalEgress: true
-  ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraIngress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     from:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  extraIngress: []
-  ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraEgress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     to:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  ##
-  extraEgress: []
-  ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-  ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-  ##
-  ingressNSMatchLabels: {}
-  ingressNSPodMatchLabels: {}
-
-## Configure the ingress resource that allows you to access the
-## Gitea installation. Set up the URL
-## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
-##
-ingress:
-  ## @param ingress.enabled Enable ingress controller resource
-  ##
-  enabled: false
-  ## @param ingress.pathType Ingress Path type
-  ##
-  pathType: ImplementationSpecific
-  ## @param ingress.apiVersion Override API Version (automatically detected if not set)
-  ##
-  apiVersion: ""
-  ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
-  ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
-  ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
-  ##
-  ingressClassName: ""
-  ## @param ingress.hostname Default host for the ingress resource
-  ##
-  hostname: "gitea.local"
-  ## @param ingress.path The Path to Gitea. You may need to set this to '/*' in order to use this
-  ## with ALB ingress controllers.
-  ##
-  path: /
-  ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
-  ## For a full list of possible ingress annotations, please see
-  ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
-  ## Use this parameter to set the required annotations for cert-manager, see
-  ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
-  ##
-  ## e.g:
-  ## annotations:
-  ##   kubernetes.io/ingress.class: nginx
-  ##   cert-manager.io/cluster-issuer: cluster-issuer-name
-  ##
-  annotations: {}
-  ## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
-  ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
-  ##
-  tls: false
-  ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
-  ##
-  selfSigned: false
-  ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
-  ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
-  ## extraHosts:
-  ## - name: gitea.local
-  ##   path: /
-  ##
-  extraHosts: []
-  ## @param ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host.
-  ## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
-  ## extraPaths:
-  ## - path: /*
-  ##   backend:
-  ##     serviceName: ssl-redirect
-  ##     servicePort: use-annotation
-  ##
-  extraPaths: []
-  ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
-  ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-  ## extraTls:
-  ## - hosts:
-  ##     - gitea.local
-  ##   secretName: gitea.local-tls
-  ##
-  extraTls: []
-  ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
-  ## key and certificate should start with -----BEGIN CERTIFICATE----- or
-  ## REDACTED
-  ##
-  ## name should line up with a tlsSecret set further up
-  ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
-  ##
-  ## It is also possible to create and manage the certificates outside of this helm chart
-  ## Please see README.md for more information
-  ## Example:
-  ## - name: gitea.local-tls
-  ##   key:
-  ##   certificate:
-  ##
-  secrets: []
-  ## @param ingress.extraRules Additional rules to be covered with this ingress record
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
-  ## e.g:
-  ## extraRules:
-  ## - host: example.local
-  ##     http:
-  ##       path: /
-  ##       backend:
-  ##         service:
-  ##           name: example-svc
-  ##           port:
-  ##             name: http
-  ##
-  extraRules: []
-## @section Other Parameters
-##
-
-## Service account for Gitea to use.
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-##
-serviceAccount:
-  ## @param serviceAccount.create Enable creation of ServiceAccount for Gitea pod
-  ##
-  create: true
-  ## @param serviceAccount.name The name of the ServiceAccount to use.
-  ## If not set and create is true, a name is generated using the common.names.fullname template
-  ##
-  name: ""
-  ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-  ## Can be set to false if pods using this serviceAccount do not need to use K8s API
-  ##
-  automountServiceAccountToken: false
-  ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
-  ##
-  annotations: {}
-## @section Database parameters
-##
-
-## PostgreSQL chart configuration
-## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml
-## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart
-## @param postgresql.auth.username Name for a custom user to create
-## @param postgresql.auth.password Password for the custom user to create
-## @param postgresql.auth.database Name for a custom database to create
-## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials
-## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`)
-## @param postgresql.service.ports.postgresql PostgreSQL service port
-##
-postgresql:
-  enabled: false
-  auth:
-    username: bn_gitea
-    password: ""
-    database: bitnami_gitea
-    existingSecret: ""
-  architecture: standalone
-  service:
-    ports:
-      postgresql: 5432
-## External PostgreSQL configuration
-## All of these values are only used when postgresql.enabled is set to false
-## @param externalDatabase.host Database host
-## @param externalDatabase.port Database port number
-## @param externalDatabase.user Non-root username for JupyterHub
-## @param externalDatabase.password Password for the non-root username for JupyterHub
-## @param externalDatabase.database JupyterHub database name
-## @param externalDatabase.existingSecret Name of an existing secret resource containing the database credentials
-## @param externalDatabase.existingSecretPasswordKey Name of an existing secret key containing the database credentials
-##
-externalDatabase:
-  host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
-  port: 5432
-  user: {{ .Values.globals.gitea.postgres.username }}
-  database: {{ .Values.globals.gitea.postgres.database }}
-  password: {{ .Values.globals.gitea.postgres.password }}
-  existingSecret: ""
-  existingSecretPasswordKey: "db-password"
-## @section Volume Permissions parameters
-##
-
-## Init containers parameters:
-## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
-##
-volumePermissions:
-  ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work)
-  ##
-  enabled: false
-  ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
-  ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name
-  ## @skip volumePermissions.image.tag Init container volume-permissions image tag
-  ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
-  ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r35
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## Init containers' resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## We usually recommend not to specify default resources and to leave this as a conscious
-  ## choice for the user. This also increases chances charts run on environments with little
-  ## resources, such as Minikube. If you do want to specify resources, uncomment the following
-  ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-  ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-
diff --git a/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl b/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl
deleted file mode 100644
index dbcda86..0000000
--- a/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl
+++ /dev/null
@@ -1,1398 +0,0 @@
-## NOTICE
-#
-# Due to the scope and complexity of this chart, all possible values are
-# not documented in this file. Extensive documentation is available.
-#
-# Please read the docs: https://docs.gitlab.com/charts/
-#
-# Because properties are regularly added, updated, or relocated, it is
-# _strongly suggest_ to not "copy and paste" this YAML. Please provide
-# Helm only those properties you need, and allow the defaults to be
-# provided by the version of this chart at the time of deployment.
-
-## Advanced Configuration
-## https://docs.gitlab.com/charts/advanced
-#
-# Documentation for advanced configuration, such as
-# - External PostgreSQL
-# - External Gitaly
-# - External Redis
-# - External NGINX
-# - External Object Storage providers
-# - PersistentVolume configuration
-
-## The global properties are used to configure multiple charts at once.
-## https://docs.gitlab.com/charts/charts/globals
-global:
-  common:
-    labels: {}
-
-  image: {}
-    # Registry value override is only available for the following Charts:
-    # - Spamcheck
-    # - Mailroom
-    # If specifying a value here, be sure to also configure
-    # `gitlab.<subchart>.image.repository` to a value that does not
-    # include the default registry domain `registry.gitlab.com`.
-    # Support for other charts is coming as an iterative rollout.
-    # See https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2859
-    # for more information.
-    # registry:
-
-    # pullPolicy: IfNotPresent
-    # pullSecrets: []
-    # tagSuffix: ""
-
-  ## Supplemental Pod labels. Will not be used for selectors.
-  pod:
-    labels: {}
-
-  ## https://docs.gitlab.com/charts/installation/deployment#deploy-the-community-edition
-  edition: ee
-
-  ## https://docs.gitlab.com/charts/charts/globals#gitlab-version
-  gitlabVersion: "17.8.1"
-
-  ## https://docs.gitlab.com/charts/charts/globals#application-resource
-  application:
-    create: false
-    links: []
-    allowClusterRoles: true
-  ## https://docs.gitlab.com/charts/charts/globals#configure-host-settings
-  hosts:
-    domain: example.com
-    hostSuffix:
-    https: true
-    externalIP:
-    ssh:
-    gitlab: {}
-    minio: {}
-    registry: {}
-    tls: {}
-    smartcard: {}
-    kas: {}
-    pages: {}
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-ingress-settings
-  ingress:
-    apiVersion: ""
-    configureCertmanager: true
-    useNewIngressForCerts: false
-    provider: nginx
-    # class:
-    annotations: {}
-    enabled: true
-    tls: {}
-    #   enabled: true
-    #   secretName:
-    path: /
-    pathType: Prefix
-
-  # Override the API version to use for HorizontalPodAutoscaler
-  hpa:
-    apiVersion: ""
-
-  # Enable KEDA globally (https://keda.sh/)
-  keda:
-    enabled: false
-
-  # Override the API version to use for PodDisruptionBudget
-  pdb:
-    apiVersion: ""
-
-  # Override the API version to use for CronJob
-  batch:
-    cronJob:
-      apiVersion: ""
-
-  # Override enablement of ServiceMonitor and PodMonitor objects.
-  monitoring:
-    enabled: false
-
-  gitlab:
-    ## Enterprise license for this GitLab installation
-    ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#initial-enterprise-license
-    ## If allowing shared-secrets generation, this is OPTIONAL.
-    license: {}
-      # secret: RELEASE-gitlab-license
-      # key: license
-
-  ## Initial root password for this GitLab installation
-  ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#initial-root-password
-  ## If allowing shared-secrets generation, this is OPTIONAL.
-  initialRootPassword: {}
-    # secret: RELEASE-gitlab-initial-root-password
-    # key: password
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-postgresql-settings
-  psql:
-    connectTimeout:
-    keepalives:
-    keepalivesIdle:
-    keepalivesInterval:
-    keepalivesCount:
-    tcpUserTimeout:
-    password: {}
-      # useSecret:
-      # secret:
-      # key:
-      # file:
-    # host: postgresql.hostedsomewhere.else
-    # port: 123
-    # username: gitlab
-    # database: gitlabhq_production
-    # applicationName:
-    # preparedStatements: false
-    # databaseTasks: true
-    main: {}
-      # host: postgresql.hostedsomewhere.else
-      # port: 123
-      # username: gitlab
-      # database: gitlabhq_production
-      # applicationName:
-      # preparedStatements: false
-      # databaseTasks: true
-    ci: {}
-      # host: postgresql.hostedsomewhere.else
-      # port: 123
-      # username: gitlab
-      # database: gitlabhq_production_ci
-      # applicationName:
-      # preparedStatements: false
-      # databaseTasks: false
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-redis-settings
-  redis:
-    auth:
-      enabled: true
-      # secret:
-      # key:
-    # connectTimeout: 1
-    # readTimeout: 1
-    # writeTimeout: 1
-    # host: redis.hostedsomewhere.else
-    # port: 6379
-    # database: 0
-    # user: webservice
-    # sentinels:
-    #   - host:
-    #     port:
-    sentinelAuth:
-      enabled: false
-      # secret:
-      # key:
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-gitaly-settings
-  gitaly:
-    enabled: true
-    authToken: {}
-      # secret:
-      # key:
-    # serviceName:
-    internal:
-      names: [default]
-    external: []
-    service:
-      name: gitaly
-      type: ClusterIP
-      externalPort: 8075
-      internalPort: 8075
-      tls:
-        externalPort: 8076
-        internalPort: 8076
-    tls:
-      enabled: false
-      # secretName:
-
-  praefect:
-    enabled: false
-    ntpHost: pool.ntp.org
-    replaceInternalGitaly: true
-    authToken: {}
-    autoMigrate: true
-    dbSecret: {}
-    virtualStorages:
-      - name: default
-        gitalyReplicas: 3
-        maxUnavailable: 1
-    psql:
-      sslMode: disable
-    # serviceName:
-    service:
-      name: praefect
-      type: ClusterIP
-      externalPort: 8075
-      internalPort: 8075
-      tls:
-        externalPort: 8076
-        internalPort: 8076
-    tls:
-      enabled: false
-      # secretName:
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-minio-settings
-  minio:
-    enabled: true
-    credentials: {}
-      # secret:
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-appconfig-settings
-  ## Rails based portions of this chart share many settings
-  appConfig:
-    ## https://docs.gitlab.com/charts/charts/globals#general-application-settings
-    # cdnHost:
-    enableUsagePing: true
-    enableSeatLink: true
-    enableImpersonation:
-    applicationSettingsCacheSeconds: 60
-    usernameChangingEnabled: true
-    issueClosingPattern:
-    defaultTheme:
-    defaultColorMode: 
-    defaultSyntaxHighlightingTheme: 
-    defaultProjectsFeatures:
-      issues: true
-      mergeRequests: true
-      wiki: true
-      snippets: true
-      builds: true
-    graphQlTimeout:
-    webhookTimeout:
-    maxRequestDurationSeconds:
-
-    ## https://docs.gitlab.com/charts/charts/globals#cron-jobs-related-settings
-    cron_jobs: {}
-      ## Flag stuck CI builds as failed
-      # stuck_ci_jobs_worker:
-      #   cron: "0 * * * *"
-      ## Schedule pipelines in the near future
-      # pipeline_schedule_worker:
-      #   cron: "19 * * * *"
-      ## Remove expired build artifacts
-      # expire_build_artifacts_worker:
-      #   cron: "*/7 * * * *"
-      ## Periodically run 'git fsck' on all repositories.
-      # repository_check_worker:
-      #   cron: "20 * * * *"
-      ## Send admin emails once a week
-      # admin_email_worker:
-      #   cron: "0 0 * * 0"
-      ## Remove outdated repository archives
-      # repository_archive_cache_worker:
-      #   cron: "0 * * * *"
-      ## Verify custom GitLab Pages domains
-      # pages_domain_verification_cron_worker:
-      #   cron: "*/15 * * * *"
-      # schedule_migrate_external_diffs_worker:
-      #   cron: "15 * * * *"
-      ## Prune stale group runners on opted-in namespaces
-      # ci_runners_stale_group_runners_prune_worker_cron:
-      #   cron: "30 * * * *"
-      ## Periodically update ci_runner_versions table with up-to-date versions and status
-      # ci_runner_versions_reconciliation_worker:
-      #   cron: "@daily"
-      ## Periodically clean up stale ci_runner_machines records
-      # ci_runners_stale_machines_cleanup_worker:
-      #   cron: "36 * * * *"
-      # ci_click_house_finished_pipelines_sync_worker:
-      #   cron: "*/4 * * * *"
-      #   args: [0, 1]
-      ### GitLab Geo
-      # Geo Primary only!
-      # geo_prune_event_log_worker:
-      #   cron: "*/5 * * * *"
-      ## GitLab Geo repository sync worker
-      # geo_repository_sync_worker:
-      #   cron: "*/5 * * * *"
-      ## GitLab Geo file download dispatch worker
-      # geo_file_download_dispatch_worker:
-      #  cron: "*/10 * * * *"
-      ## GitLab Geo repository verification primary batch worker
-      # geo_repository_verification_primary_batch_worker:
-      #   cron: "*/5 * * * *"
-      ## GitLab Geo repository verification secondary scheduler worker
-      # geo_repository_verification_secondary_scheduler_worker:
-      #   cron: "*/5 * * * *"
-      ## GitLab Geo migrated local files clean up worker
-      # geo_migrated_local_files_clean_up_worker:
-      #   cron: "15 */6 * * *"
-      ### LDAP
-      # ldap_sync_worker:
-      #   cron: "30 1 * * *"
-      # ldap_group_sync_worker:
-      #   cron: "0 * * * *"
-      ### Snapshot active user statistics
-      # historical_data_worker:
-      #   cron: "0 12 * * *"
-      # loose_foreign_keys_cleanup_worker_cron:
-      #   cron: "*/5 * * * *"
-
-    ## https://docs.gitlab.com/charts/charts/globals#content-security-policy
-    contentSecurityPolicy:
-      enabled: false
-      report_only: true
-      # directives: {}
-
-    ## https://docs.gitlab.com/charts/charts/globals#gravatarlibravatar-settings
-    gravatar:
-      plainUrl:
-      sslUrl:
-
-    ## https://docs.gitlab.com/charts/charts/globals#hooking-analytics-services-to-the-gitlab-instance
-    extra:
-      googleAnalyticsId:
-      matomoUrl:
-      matomoSiteId:
-      matomoDisableCookies:
-      oneTrustId:
-      googleTagManagerNonceId:
-      bizible:
-
-    ## https://docs.gitlab.com/charts/charts/globals#lfs-artifacts-uploads-packages-external-mr-diffs-and-dependency-proxy
-    object_store:
-      enabled: false
-      proxy_download: true
-      storage_options: {}
-        # server_side_encryption:
-        # server_side_encryption_kms_key_id
-      connection: {}
-        # secret:
-        # key:
-    lfs:
-      enabled: true
-      proxy_download: true
-      bucket: git-lfs
-      connection: {}
-        # secret:
-        # key:
-    artifacts:
-      enabled: true
-      proxy_download: true
-      bucket: gitlab-artifacts
-      connection: {}
-        # secret:
-        # key:
-    uploads:
-      enabled: true
-      proxy_download: true
-      bucket: gitlab-uploads
-      connection: {}
-        # secret:
-        # key:
-    packages:
-      enabled: true
-      proxy_download: true
-      bucket: gitlab-packages
-      connection: {}
-    externalDiffs:
-      enabled: false
-      when:
-      proxy_download: true
-      bucket: gitlab-mr-diffs
-      connection: {}
-    terraformState:
-      enabled: false
-      bucket: gitlab-terraform-state
-      connection: {}
-    ciSecureFiles:
-      enabled: false
-      bucket: gitlab-ci-secure-files
-      connection: {}
-    dependencyProxy:
-      enabled: false
-      proxy_download: true
-      bucket: gitlab-dependency-proxy
-      connection: {}
-
-    backups:
-      bucket: gitlab-backups
-      tmpBucket: tmp
-
-    ## https://docs.gitlab.com/charts/charts/globals#outgoing-email
-    ## Microsoft Graph Mailer settings
-    microsoft_graph_mailer:
-      enabled: false
-      user_id: ""
-      tenant: ""
-      client_id: ""
-      client_secret:
-        secret: ""
-        key: secret
-      azure_ad_endpoint: "https://login.microsoftonline.com"
-      graph_endpoint: "https://graph.microsoft.com"
-
-    ## https://docs.gitlab.com/charts/installation/command-line-options.html#incoming-email-configuration
-    ## https://docs.gitlab.com/charts/charts/gitlab/mailroom/index.html#incoming-email
-    incomingEmail:
-      enabled: false
-      address: ""
-      host: "imap.gmail.com"
-      port: 993
-      ssl: true
-      startTls: false
-      user: ""
-      password:
-        secret: ""
-        key: password
-      deleteAfterDelivery: true
-      expungeDeleted: false
-      logger:
-        logPath: "/dev/stdout"
-      mailbox: inbox
-      idleTimeout: 60
-      inboxMethod: "imap"
-      clientSecret:
-        key: secret
-      pollInterval: 60
-      deliveryMethod: webhook
-      authToken: {}
-        # secret:
-        # key:
-
-    ## https://docs.gitlab.com/charts/charts/gitlab/mailroom/index.html#service-desk-email
-    serviceDeskEmail:
-      enabled: false
-      address: ""
-      host: "imap.gmail.com"
-      port: 993
-      ssl: true
-      startTls: false
-      user: ""
-      password:
-        secret: ""
-        key: password
-      deleteAfterDelivery: true
-      expungeDeleted: false
-      logger:
-        logPath: "/dev/stdout"
-      mailbox: inbox
-      idleTimeout: 60
-      inboxMethod: "imap"
-      clientSecret:
-        key: secret
-      pollInterval: 60
-      deliveryMethod: webhook
-      authToken: {}
-        # secret:
-        # key:
-
-    ## https://docs.gitlab.com/charts/charts/globals#ldap
-    ldap:
-      # prevent the use of LDAP for sign-in via web.
-      preventSignin: false
-      servers: {}
-      ## See documentation for complete example of a configured LDAP server
-
-    duoAuth:
-      enabled: false
-      # hostname:
-      # integrationKey:
-      # secretKey:
-      #   secret:
-      #   key:
-
-    ## https://docs.gitlab.com/charts/charts/globals#kas-settings
-    gitlab_kas: {}
-      # secret:
-      # key:
-      # enabled:
-      # externalUrl:
-      # internalUrl:
-
-    ## https://docs.gitlab.com/charts/charts/globals#suggested-reviewers-settings
-    suggested_reviewers: {}
-      # secret:
-      # key:
-
-    ## https://docs.gitlab.com/charts/charts/globals#omniauth
-    omniauth:
-      enabled: false
-      autoSignInWithProvider:
-      syncProfileFromProvider: []
-      syncProfileAttributes: [email]
-      allowSingleSignOn: [saml]
-      blockAutoCreatedUsers: true
-      autoLinkLdapUser: false
-      autoLinkSamlUser: false
-      autoLinkUser: []
-      externalProviders: []
-      allowBypassTwoFactor: []
-      providers: []
-      # - secret: gitlab-google-oauth2
-      #   key: provider
-
-    ## https://docs.gitlab.com/charts/charts/globals#kerberos
-    kerberos:
-      enabled: false
-      keytab:
-        # secret:
-        key: keytab
-      servicePrincipalName: ""
-      krb5Config: ""
-      dedicatedPort:
-        enabled: false
-        port: 8443
-        https: true
-      simpleLdapLinkingAllowedRealms: []
-
-    ## https://docs.gitlab.com/charts/charts/globals#configure-appconfig-settings
-    sentry:
-      enabled: false
-      dsn:
-      clientside_dsn:
-      environment:
-
-    gitlab_docs:
-      enabled: false
-      host: ""
-
-    smartcard:
-      enabled: false
-      CASecret:
-      clientCertificateRequiredHost:
-      sanExtensions: false
-      requiredForGitAccess: false
-
-    sidekiq:
-      routingRules: []
-
-    # Config that only applies to the defaults on initial install
-    initialDefaults: {}
-      # signupEnabled:
-  ## End of global.appConfig
-
-  oauth:
-    gitlab-pages: {}
-      # secret:
-      # appIdKey:
-      # appSecretKey:
-      # redirectUri:
-      # authScope:
-
-  ## https://docs.gitlab.com/charts/advanced/geo/
-  geo:
-    enabled: false
-    # Valid values: primary, secondary
-    role: primary
-    ## Geo Secondary only
-    # nodeName allows multiple instances behind a load balancer.
-    nodeName: # defaults to `gitlab.gitlab.host`
-    # ingressClass:
-    # PostgreSQL connection details only needed for `secondary`
-    psql:
-      password: {}
-      #   secret:
-      #   key:
-      # host: postgresql.hostedsomewhere.else
-      # port: 123
-      # username: gitlab_replicator
-      # database: gitlabhq_geo_production
-      # ssl:
-      #   secret:
-      #   clientKey:
-      #   clientCertificate:
-      #   serverCA:
-    registry:
-      replication:
-        enabled: false
-        primaryApiUrl:
-        ## Consumes global.registry.notificationSecret
-
-  ## https://docs.gitlab.com/charts/charts/gitlab/kas/
-  kas:
-    enabled: true
-    service:
-      apiExternalPort: 8153 # port for connections from the GitLab backend
-    tls:
-      enabled: false
-      verify: true
-      # secretName:
-      # caSecretName:
-
-  ## https://docs.gitlab.com/charts/charts/gitlab/spamcheck/
-  spamcheck:
-    enabled: false
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-gitlab-shell
-  shell:
-    authToken: {}
-    # secret:
-    # key:
-    hostKeys: {}
-      # secret:
-    ## https://docs.gitlab.com/charts/charts/globals#tcp-proxy-protocol
-    tcp:
-      proxyProtocol: false
-
-  ## Rails application secrets
-  ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#gitlab-rails-secret
-  ## If allowing shared-secrets generation, this is OPTIONAL.
-  railsSecrets: {}
-    # secret:
-
-  ## Rails generic setting, applicable to all Rails-based containers
-  rails:
-    bootsnap: # Enable / disable Shopify/Bootsnap cache
-      enabled: true
-    sessionStore:
-      sessionCookieTokenPrefix: ""
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-registry-settings
-  registry:
-    bucket: registry
-
-    certificate: {}
-      # secret:
-    httpSecret: {}
-      # secret:
-      # key:
-    notificationSecret: {}
-      # secret:
-      # key:
-    tls:
-      enabled: false
-      # secretName:
-    redis:
-      cache:
-        password: {}
-      rateLimiting:
-        password: {}
-    # https://docs.docker.com/registry/notifications/#configuration
-    notifications: {}
-      # endpoints:
-      #   - name: FooListener
-      #     url: https://foolistener.com/event
-      #     timeout: 500ms
-      #     threshold: 10 # DEPRECATED: use maxretries instead https://gitlab.com/gitlab-org/container-registry/-/issues/1243.
-      #     maxretries: 5
-      #     backoff: 1s
-      #     headers:
-      #       FooBar: ['1', '2']
-      #       Authorization:
-      #         secret: gitlab-registry-authorization-header
-      #       SpecificPassword:
-      #         secret: gitlab-registry-specific-password
-      #         key: password
-      # events: {}
-
-    # Settings utilized by other services referencing registry:
-    enabled: true
-    host:
-    # port: 443
-    api:
-      protocol: http
-      serviceName: registry
-      port: 5000
-    tokenIssuer: gitlab-issuer
-
-  pages:
-    enabled: false
-    accessControl: false
-    path:
-    host:
-    port:
-    https: # default true
-    externalHttp: []
-    externalHttps: []
-    artifactsServer: true
-    localStore:
-      enabled: false
-      # path: /srv/gitlab/shared/pages
-    objectStore:
-      enabled: true
-      bucket: gitlab-pages
-      # proxy_download: true
-      connection: {}
-        # secret:
-        # key:
-    apiSecret: {}
-      # secret:
-      # key:
-    authSecret: {}
-      # secret:
-      # key:
-    namespaceInPath: false
-
-  ## GitLab Runner
-  ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#gitlab-runner-secret
-  ## If allowing shared-secrets generation, this is OPTIONAL.
-  runner:
-    registrationToken: {}
-      # secret:
-
-  ## https://docs.gitlab.com/charts/charts/globals#outgoing-email
-  ## Outgoing email server settings
-  smtp:
-    enabled: false
-    address: smtp.mailgun.org
-    port: 2525
-    user_name: ""
-    ## https://docs.gitlab.com/charts/installation/secrets#smtp-password
-    password:
-      secret: ""
-      key: password
-    # domain:
-    authentication: "plain"
-    starttls_auto: false
-    openssl_verify_mode: "peer"
-    open_timeout: 30
-    read_timeout: 60
-    pool: false
-
-  ## https://docs.gitlab.com/charts/charts/globals#outgoing-email
-  ## Email persona used in email sent by GitLab
-  email:
-    from: ""
-    display_name: GitLab
-    reply_to: ""
-    subject_suffix: ""
-    smime:
-      enabled: false
-      secretName: ""
-      keyName: "tls.key"
-      certName: "tls.crt"
-
-  ## Timezone for containers.
-  time_zone: UTC
-
-  ## Global Service Annotations and Labels
-  service:
-    labels: {}
-    annotations: {}
-
-  ## Global Deployment Annotations
-  deployment:
-    annotations: {}
-
-
-  # Setting a global nodeAffinity only applies to the registry chart for now.
-  # See issue https://gitlab.com/gitlab-com/gl-infra/production-engineering/-/issues/25403 for more information
-
-  nodeAffinity:
-
-  antiAffinity: soft
-  affinity:
-    podAntiAffinity:
-      topologyKey: "kubernetes.io/hostname"
-    nodeAffinity:
-      key: topology.kubernetes.io/zone
-      values: []
-
-  # Priority class assigned to pods, may be overridden for individual components
-  # https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
-  priorityClassName: ""
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-workhorse-settings
-  ## Global settings related to Workhorse
-  workhorse:
-    serviceName: webservice-default
-    # scheme:
-    # host:
-    # port:
-    ## https://docs.gitlab.com/charts/installation/secrets#gitlab-workhorse-secret
-    # secret:
-    # key:
-    tls:
-      enabled: false
-
-  ## https://docs.gitlab.com/charts/charts/globals#configure-webservice
-  webservice:
-    workerTimeout: 60
-
-  ## https://docs.gitlab.com/charts/charts/globals#custom-certificate-authorities
-  # configuration of certificates container & custom CA injection
-  certificates:
-    image:
-      repository: registry.gitlab.com/gitlab-org/build/cng/certificates
-      # Default tag is `global.gitlabVersion` or `master` if the former one is undefined.
-      # tag: master
-      # pullPolicy: IfNotPresent
-      # pullSecrets: []
-    customCAs: []
-    # - secret: custom-CA
-    # - secret: more-custom-CAs
-    #   keys:
-    #     - custom-ca-1.crt
-    # - configMap: custom-CA-cm
-    # - configMap: more-custom-CAs-cm
-    #   keys:
-    #     - custom-ca-2.crt
-    #     - custom-ca-3.crt
-
-  ## kubectl image used by hooks to carry out specific jobs
-  kubectl:
-    image:
-      repository: registry.gitlab.com/gitlab-org/build/cng/kubectl
-      # Default tag is `global.gitlabVersion` or `master` if the former one is undefined.
-      # tag: master
-      # pullPolicy: IfNotPresent
-      # pullSecrets: []
-    securityContext:
-      # in most base images, this is `nobody:nogroup`
-      runAsUser: 65534
-      fsGroup: 65534
-      seccompProfile:
-        type: "RuntimeDefault"
-  gitlabBase:
-    image:
-      repository: registry.gitlab.com/gitlab-org/build/cng/gitlab-base
-      # Default tag is `global.gitlabVersion` or `master` if the former one is undefined.
-      # Charts using this image as init container support further overrides with `init.image.tag`.
-      # tag: master
-      # pullPolicy: IfNotPresent
-      # pullSecrets: []
-
-  ## https://docs.gitlab.com/charts/charts/globals#service-accounts
-  serviceAccount:
-    enabled: false
-    create: true
-    annotations: {}
-    automountServiceAccountToken: false
-    ## Name to be used for serviceAccount, otherwise defaults to chart fullname
-    # name:
-
-  ## https://docs.gitlab.com/charts/charts/globals/#tracing
-  tracing:
-    connection:
-      string: ""
-    urlTemplate: ""
-
-  zoekt:
-    gateway:
-      basicAuth: {}
-    indexer:
-      internalApi: {}
-
-  ## https://docs.gitlab.com/charts/charts/globals
-  extraEnv: {}
-  #   SOME_KEY: some_value
-  #   SOME_OTHER_KEY: some_other_value
-
-  ## https://docs.gitlab.com/charts/charts/globals
-  extraEnvFrom: {}
-  #   MY_NODE_NAME:
-  #     fieldRef:
-  #       fieldPath: spec.nodeName
-  #   MY_CPU_REQUEST:
-  #     resourceFieldRef:
-  #       containerName: test-container
-  #       resource: requests.cpu
-  #   SECRET_THING:
-  #     secretKeyRef:
-  #       name: special-secret
-  #       key: special_token
-  #       # optional: boolean
-  #   CONFIG_STRING:
-  #     configMapKeyRef:
-  #       name: useful-config
-  #       key: some-string
-  #       # optional: boolean
-
-  ## https://docs.gitlab.com/charts/charts/globals/#jobs
-  job:
-    nameSuffixOverride:
-
-  traefik:
-    apiVersion: "" # newer apiVersion: "traefik.io/v1alpha1"
-
-## End of global
-
-upgradeCheck:
-  enabled: true
-  image: {}
-    # repository:
-    # tag:
-    # pullPolicy: IfNotPresent
-    # pullSecrets: []
-  securityContext:
-    # in alpine/debian/busybox based images, this is `nobody:nogroup`
-    runAsUser: 65534
-    fsGroup: 65534
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Allow to overwrite the specific security context under which the container is running.
-  containerSecurityContext:
-    runAsUser: 65534
-    allowPrivilegeEscalation: false
-    runAsNonRoot: true
-    capabilities:
-      drop: [ "ALL" ]
-  tolerations: []
-  annotations: {}
-  configMapAnnotations: {}
-  resources:
-    requests:
-      cpu: 50m
-  priorityClassName: ""
-
-## Settings to for the Let's Encrypt ACME Issuer
-# certmanager-issuer:
-#   # The email address to register certificates requested from Let's Encrypt.
-#   # Required if using Let's Encrypt.
-#   email: email@example.com
-
-## Installation & configuration of jetstack/cert-manager
-## See requirements.yaml for current version
-certmanager:
-  installCRDs: true
-  nameOverride: certmanager
-  # Install cert-manager chart. Set to false if you already have cert-manager
-  # installed or if you are not using cert-manager.
-  install: true
-  # Other cert-manager configurations from upstream
-  # See https://github.com/jetstack/cert-manager/blob/master/deploy/charts/cert-manager/README#configuration
-  rbac:
-    create: true
-
-## https://docs.gitlab.com/charts/charts/nginx/
-## https://docs.gitlab.com/charts/architecture/decisions#nginx-ingress
-## Installation & configuration of charts/ingress-nginx:
-nginx-ingress: &nginx-ingress
-  enabled: true
-  tcpExternalConfig: "true"
-  controller: &nginx-ingress-controller
-    addHeaders:
-      Referrer-Policy: strict-origin-when-cross-origin
-    config: &nginx-ingress-controller-config
-      annotation-value-word-blocklist: "load_module,lua_package,_by_lua,location,root,proxy_pass,serviceaccount,{,},',\""
-      hsts: "true"
-      hsts-include-subdomains: "false"
-      hsts-max-age: "63072000"
-      server-name-hash-bucket-size: "256"
-      use-http2: "true"
-      ssl-ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4"
-      ssl-protocols: "TLSv1.3 TLSv1.2"
-      server-tokens: "false"
-      # Configure smaller defaults for upstream-keepalive-*, see https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration
-      upstream-keepalive-connections: 100 # Limit of 100 held-open connections
-      upstream-keepalive-time:        30s # 30 second limit for connection reuse
-      upstream-keepalive-timeout:       5 # 5 second timeout to hold open idle connections
-      upstream-keepalive-requests:   1000 # 1000 requests per connection, before recycling
-
-    service:
-      externalTrafficPolicy: "Local"
-    ingressClassByName: false
-    ingressClassResource:
-      name: '{{ include "ingress.class.name" $ | quote }}'
-    resources:
-      requests:
-        cpu: 100m
-        memory: 100Mi
-    publishService:
-      enabled: true
-    replicaCount: 2
-    minAvailable: 1
-    scope:
-      enabled: true
-    metrics:
-      enabled: true
-      service:
-        annotations:
-          gitlab.com/prometheus_scrape: "true"
-          gitlab.com/prometheus_port: "10254"
-          prometheus.io/scrape: "true"
-          prometheus.io/port: "10254"
-    admissionWebhooks:
-      enabled: false
-  defaultBackend:
-    resources:
-      requests:
-        cpu: 5m
-        memory: 5Mi
-  rbac:
-    create: true
-    # Needed for k8s 1.20 and 1.21
-    # https://github.com/kubernetes/ingress-nginx/issues/7510
-    # https://github.com/kubernetes/ingress-nginx/issues/7519
-    scope: false
-  serviceAccount:
-    create: true
-
-# Ingress controller to handle requests forwarded from other Geo sites.
-# Configuration differences compared to the main nginx ingress:
-#   - Pass X-Forwarded-For headers as is
-#   - Use a different IngressClass name
-nginx-ingress-geo:
-  <<: *nginx-ingress
-  enabled: false
-  controller:
-    <<: *nginx-ingress-controller
-    config:
-      <<: *nginx-ingress-controller-config
-      # Pass incoming X-Forwarded-* headers to upstream. Required to handle requests
-      # from other Geo sites.
-      # https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#use-forwarded-headers
-      use-forwarded-headers: true
-    electionID: ingress-controller-leader-geo
-    ingressClassResource:
-      name: '{{ include "gitlab.geo.ingress.class.name" $ | quote }}'
-      controllerValue: 'k8s.io/nginx-ingress-geo'
-  # A pre-defined/static external IP can be configured with global.hosts.externalGeoIP.
-  externalIpTpl: '{{ .Values.global.hosts.externalGeoIP }}'
-
-haproxy:
-  install: false
-  controller:
-    service:
-      type: LoadBalancer
-      tcpPorts:
-        - name: ssh
-          port: 22
-          targetPort: 22
-    extraArgs:
-      - --configmap-tcp-services=$(POD_NAMESPACE)/$(POD_NAMESPACE)-haproxy-tcp
-
-## Installation & configuration of stable/prometheus
-## See requirements.yaml for current version
-prometheus:
-  install: true
-  rbac:
-    create: true
-  alertmanager:
-    enabled: false
-  alertmanagerFiles:
-    alertmanager.yml: {}
-  kubeStateMetrics:
-    enabled: false
-  nodeExporter:
-    enabled: false
-  pushgateway:
-    enabled: false
-  server:
-    retention: 15d
-    strategy:
-      type: Recreate
-    image:
-      tag: v2.38.0
-    containerSecurityContext:
-      runAsUser: 1000
-      allowPrivilegeEscalation: false
-      runAsNonRoot: true
-      capabilities:
-        drop: [ "ALL" ]
-      seccompProfile:
-        type: "RuntimeDefault"
-  podSecurityPolicy:
-    enabled: false
-  configmapReload:
-    prometheus:
-      containerSecurityContext:
-        runAsUser: 1000
-        allowPrivilegeEscalation: false
-        runAsNonRoot: true
-        capabilities:
-          drop: [ "ALL" ]
-        seccompProfile:
-          type: "RuntimeDefault"
-  serverFiles:
-    prometheus.yml:
-      scrape_configs:
-        - job_name: prometheus
-          static_configs:
-            - targets:
-                - localhost:9090
-        - job_name: kubernetes-apiservers
-          kubernetes_sd_configs:
-            - role: endpoints
-          scheme: https
-          tls_config:
-            ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-            insecure_skip_verify: true
-          bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-          relabel_configs:
-            - source_labels:
-                [
-                  __meta_kubernetes_namespace,
-                  __meta_kubernetes_service_name,
-                  __meta_kubernetes_endpoint_port_name,
-                ]
-              action: keep
-              regex: default;kubernetes;https
-        - job_name: kubernetes-pods
-          kubernetes_sd_configs:
-            - role: pod
-          relabel_configs:
-            - source_labels:
-                [__meta_kubernetes_pod_annotation_gitlab_com_prometheus_scrape]
-              action: keep
-              regex: true
-            - source_labels:
-                [__meta_kubernetes_pod_annotation_gitlab_com_prometheus_scheme]
-              action: replace
-              regex: (https?)
-              target_label: __scheme__
-            - source_labels:
-                [__meta_kubernetes_pod_annotation_gitlab_com_prometheus_path]
-              action: replace
-              target_label: __metrics_path__
-              regex: (.+)
-            - source_labels:
-                [
-                  __address__,
-                  __meta_kubernetes_pod_annotation_gitlab_com_prometheus_port,
-                ]
-              action: replace
-              regex: ([^:]+)(?::\d+)?;(\d+)
-              replacement: $1:$2
-              target_label: __address__
-            - action: labelmap
-              regex: __meta_kubernetes_pod_label_(.+)
-            - source_labels: [__meta_kubernetes_namespace]
-              action: replace
-              target_label: kubernetes_namespace
-            - source_labels: [__meta_kubernetes_pod_name]
-              action: replace
-              target_label: kubernetes_pod_name
-        - job_name: kubernetes-service-endpoints
-          kubernetes_sd_configs:
-            - role: endpoints
-          relabel_configs:
-            - action: keep
-              regex: true
-              source_labels:
-                - __meta_kubernetes_service_annotation_gitlab_com_prometheus_scrape
-            - action: replace
-              regex: (https?)
-              source_labels:
-                - __meta_kubernetes_service_annotation_gitlab_com_prometheus_scheme
-              target_label: __scheme__
-            - action: replace
-              regex: (.+)
-              source_labels:
-                - __meta_kubernetes_service_annotation_gitlab_com_prometheus_path
-              target_label: __metrics_path__
-            - action: replace
-              regex: ([^:]+)(?::\d+)?;(\d+)
-              replacement: $1:$2
-              source_labels:
-                - __address__
-                - __meta_kubernetes_service_annotation_gitlab_com_prometheus_port
-              target_label: __address__
-            - action: labelmap
-              regex: __meta_kubernetes_service_label_(.+)
-            - action: replace
-              source_labels:
-                - __meta_kubernetes_namespace
-              target_label: kubernetes_namespace
-            - action: replace
-              source_labels:
-                - __meta_kubernetes_service_name
-              target_label: kubernetes_name
-            - action: replace
-              source_labels:
-                - __meta_kubernetes_pod_node_name
-              target_label: kubernetes_node
-        - job_name: kubernetes-services
-          metrics_path: /probe
-          params:
-            module: [http_2xx]
-          kubernetes_sd_configs:
-            - role: service
-          relabel_configs:
-            - source_labels:
-                [
-                  __meta_kubernetes_service_annotation_gitlab_com_prometheus_probe,
-                ]
-              action: keep
-              regex: true
-            - source_labels: [__address__]
-              target_label: __param_target
-            - target_label: __address__
-              replacement: blackbox
-            - source_labels: [__param_target]
-              target_label: instance
-            - action: labelmap
-              regex: __meta_kubernetes_service_label_(.+)
-            - source_labels: [__meta_kubernetes_namespace]
-              target_label: kubernetes_namespace
-            - source_labels: [__meta_kubernetes_service_name]
-              target_label: kubernetes_name
-
-## Configuration of Redis
-## https://docs.gitlab.com/charts/architecture/decisions#redis
-## https://docs.gitlab.com/charts/installation/deployment.html#redis
-redis:
-  install: true
-  image:
-    tag: "6.2.16-debian-12-r1"
-  auth:
-    existingSecret: gitlab-redis-secret
-    existingSecretKey: redis-password
-    usePasswordFiles: true
-  architecture: standalone
-  cluster:
-    enabled: false
-  metrics:
-    enabled: true
-    image:
-      tag: "1.46.0-debian-11-r8"
-
-## Installation & configuration of stable/postgresql
-## See requirements.yaml for current version
-postgresql:
-  install: true
-  auth:
-    ## These need to be set, for the sake of bitnami/postgresql upgrade patterns.
-    ## They are overridden by use of `existingSecret`
-    password: bogus-satisfy-upgrade
-    postgresPassword: bogus-satisfy-upgrade
-    ##
-    usePasswordFiles: false
-    existingSecret: '{{ include "gitlab.psql.password.secret" . }}'
-    secretKeys:
-      adminPasswordKey: postgresql-postgres-password
-      userPasswordKey: '{{ include "gitlab.psql.password.key" $ }}'
-  image:
-    tag: 14.8.0
-  primary:
-    initdb:
-      scriptsConfigMap: '{{ include "gitlab.psql.initdbscripts" $}}'
-    extraVolumeMounts:
-      - name: custom-init-scripts
-        mountPath: /docker-entrypoint-preinitdb.d/init_revision.sh
-        subPath: init_revision.sh
-    podAnnotations:
-      postgresql.gitlab/init-revision: "1"
-  metrics:
-    enabled: true
-    service:
-      annotations:
-        prometheus.io/scrape: "true"
-        prometheus.io/port: "9187"
-        gitlab.com/prometheus_scrape: "true"
-        gitlab.com/prometheus_port: "9187"
-
-    ## Optionally define additional custom metrics
-    ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
-
-## Installation & configuration charts/registry
-## https://docs.gitlab.com/charts/architecture/decisions#registry
-## https://docs.gitlab.com/charts/charts/registry/
-# registry:
-#   enabled: false
-
-## Automatic shared secret generation
-## https://docs.gitlab.com/charts/installation/secrets
-## https://docs.gitlab.com/charts/charts/shared-secrets.html
-shared-secrets:
-  enabled: true
-  rbac:
-    create: true
-  selfsign:
-    image:
-      # pullPolicy: IfNotPresent
-      # pullSecrets: []
-      repository: registry.gitlab.com/gitlab-org/build/cng/cfssl-self-sign
-      # Default tag is `master`, overridable by `global.gitlabVersion`.
-      # tag: master
-    keyAlgorithm: "rsa"
-    keySize: "4096"
-    expiry: "3650d"
-    caSubject: "GitLab Helm Chart"
-  env: production
-  serviceAccount:
-    enabled: true
-    create: true
-    name: # Specify a pre-existing ServiceAccount name
-  resources:
-    requests:
-      cpu: 50m
-  securityContext:
-    # in debian/alpine based images, this is `nobody:nogroup`
-    runAsUser: 65534
-    fsGroup: 65534
-    seccompProfile:
-      type: "RuntimeDefault"
-  containerSecurityContext:
-    allowPrivilegeEscalation: false
-    runAsNonRoot: true
-    capabilities:
-      drop: [ "ALL" ]
-  tolerations: []
-  podLabels: {}
-  annotations: {}
-
-## Installation & configuration of gitlab/gitlab-runner
-## See requirements.yaml for current version
-gitlab-runner:
-  install: true
-  rbac:
-    create: true
-  runners:
-    locked: false
-    # Set secret to an arbitrary value because the runner chart renders the gitlab-runner.secret template only if it is not empty.
-    # The parent/GitLab chart overrides the template to render the actual secret name.
-    secret: "nonempty"
-    config: |
-      [[runners]]
-        [runners.kubernetes]
-        image = "ubuntu:22.04"
-        {{- if .Values.global.minio.enabled }}
-        [runners.cache]
-          Type = "s3"
-          Path = "gitlab-runner"
-          Shared = true
-          [runners.cache.s3]
-            ServerAddress = {{ include "gitlab-runner.cache-tpl.s3ServerAddress" . }}
-            BucketName = "runner-cache"
-            BucketLocation = "us-east-1"
-            Insecure = false
-        {{ end }}
-  podAnnotations:
-    gitlab.com/prometheus_scrape: "true"
-    gitlab.com/prometheus_port: 9252
-  podSecurityContext:
-    seccompProfile:
-      type: "RuntimeDefault"
-
-traefik:
-  install: false
-  ports:
-    gitlab-shell:
-      expose: true
-      port: 2222
-      exposedPort: 22
-
-## Settings for individual sub-charts under GitLab
-## Note: Many of these settings are configurable via globals
-gitlab:
-  ## https://docs.gitlab.com/charts/charts/gitlab/toolbox
-  toolbox:
-    replicas: 1
-    antiAffinityLabels:
-      matchLabels:
-        app: gitaly
-  ## https://docs.gitlab.com/charts/charts/gitlab/migrations
-  #   migrations:
-  #     enabled: false
-  ## https://docs.gitlab.com/charts/charts/gitlab/webservice
-  #   webservice:
-  #     enabled: false
-  ## https://docs.gitlab.com/charts/charts/gitlab/sidekiq
-  #   sidekiq:
-  #     enabled: false
-  ## https://docs.gitlab.com/charts/charts/gitlab/gitaly
-  #   gitaly:
-  ## https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell
-  #   gitlab-shell:
-  #     enabled: false
-  ## https://docs.gitlab.com/charts/charts/gitlab/gitlab-pages
-  #   gitlab-pages:
-  ## https://docs.gitlab.com/charts/charts/gitlab/kas
-  #   kas:
-  ## https://docs.gitlab.com/charts/charts/gitlab/praefect
-  #   praefect:
-
-## Installation & configuration of gitlab/gitlab-zoekt
-gitlab-zoekt:
-  install: false
-  gateway:
-    basicAuth:
-      enabled: true
-      secretName: '{{ include "gitlab.zoekt.gateway.basicAuth.secretName" $ }}'
-  indexer:
-    internalApi:
-      enabled: true
-      secretName: '{{ include "gitlab.zoekt.indexer.internalApi.secretName" $ }}'
-      secretKey: '{{ include "gitlab.zoekt.indexer.internalApi.secretKey" $ }}'
-      gitlabUrl: '{{ include "gitlab.zoekt.indexer.internalApi.gitlabUrl" $ }}'
diff --git a/k8s/helmfile.d/values/globals/.gitignore b/k8s/helmfile.d/values/globals/.gitignore
deleted file mode 100644
index 03cdeec..0000000
--- a/k8s/helmfile.d/values/globals/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-secrets.yaml
diff --git a/k8s/helmfile.d/values/globals/staging.yaml.gotmpl b/k8s/helmfile.d/values/globals/staging.yaml.gotmpl
deleted file mode 100644
index 7d7df2c..0000000
--- a/k8s/helmfile.d/values/globals/staging.yaml.gotmpl
+++ /dev/null
@@ -1,216 +0,0 @@
-{{ $email := "tonydu121@hotmail.com" }}
-{{ $domain := "mnke.org" }}
-{{ $subdomain := "dolo" }}
-{{ $appDomain := print $subdomain "." $domain }}
-# This should be an IP in the MetalLB range
-{{ $primaryLoadBalancerIP := "10.0.185.128" }}
-{{ $environment := "staging" }}
-{{ $ingressClass := "traefik" }}
-{{ $nfsStorageClass := "nfs-client" }}
-{{ $longhornStorageClass := "longhorn" }}
-
-{{
-    $ghostDatabase := dict
-    "database" "ghost"
-    "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/username" )
-    "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/password" )
-}}
-{{
-  $mysqlDatabases := list
-  $ghostDatabase
-}}
-
-{{
-  $authentikDatabase := dict
-  "database" "authentik"
-  "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/username" )
-  "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/password" )
-}}
-{{
-  $harborDatabase := dict
-  "database" "harborcore"
-  "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/username" )
-  "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/password" )
-}}
-{{
-  $giteaDatabase := dict
-  "database" "gitea"
-  "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/username" )
-  "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/password" )
-}}
-{{
-  $postgresDatabases := list
-  $authentikDatabase
-  $harborDatabase
-  $giteaDatabase
-}}
-
-globals:
-  email: {{ $email }}
-  environment: {{ $environment }}
-
-  certManager:
-    namespace: cert-manager
-
-  traefik:
-    namespace: traefik
-    ingressClass: {{ $ingressClass }}
-    loadBalancerIP: {{ $primaryLoadBalancerIP }}
-
-  certs:
-    acmeEmail: {{ $email }}
-    cloudflareEmail: {{ $email }}
-    certIssuerMode: {{ $environment }}
-
-    cloudflareSecretToken: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#cloudflare/secretToken" }}
-    cloudflareTokenSecretName: cloudflare-token-secret
-
-    issuerName: letsencrypt
-    privateKeySecretRef: letsencrypt
-
-    hlMnkeOrg:
-      certificateName: {{ $subdomain }}.{{ $domain }}
-      certificateSecretName: {{ $subdomain }}.{{ $domain }}-tls
-      certificateNamespace: default
-      commonName: "{{ $appDomain }}"
-      dnsZones:
-        - "{{ $domain }}"
-      dnsNames:
-        - "{{ $appDomain }}"
-        - "*.{{ $appDomain }}"
-
-  longhorn:
-    namespace: longhorn-system
-    storageClass: {{ $longhornStorageClass }}
-
-  nfsSubdirExternalProvisioner:
-    namespace: nfs-subdir-external-provisioner
-    replicaCount: 1
-    nfs:
-      server: truenas.local
-      path: /mnt/emc14t9/k8s-pv
-    storageClass: {{ $nfsStorageClass }}
-    accessModes: ReadWriteMany
-
-  rancher:
-    namespace: cattle-system
-    ingressClass: {{ $ingressClass }}
-    hostname: rancher.{{ $appDomain }}
-    replicas: 3
-    bootstrapPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#rancher/bootstrapPassword" }}
-
-  uptimeKuma:
-    namespace: uptime-kuma
-    ingressClass: {{ $ingressClass }}
-    hosts:
-      - uptime.{{ $appDomain }}
-    storageClass: {{ $longhornStorageClass }}
-
-  mysql:
-    namespace: db
-    storageClass: {{ $longhornStorageClass }}
-    username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/username" }}
-    password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/password" }}
-    rootPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/rootPassword" }}
-    databases:
-      {{ $mysqlDatabases | toYaml | nindent 4 }}
-
-  postgres:
-    namespace: db
-    storageClass: {{ $longhornStorageClass }}
-    username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/username" }}
-    password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/password" }}
-    postgresPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/postgresPassword" }}
-    databases:
-      {{ $postgresDatabases | toYaml | nindent 4 }}
-
-  phpmyadmin:
-    namespace: db
-    hostname: pma.{{ $appDomain }}
-    storageClass: {{ $longhornStorageClass }}
-    ingressClass: {{ $ingressClass }}
-
-  pgadmin4:
-    namespace: db
-    hostname: pg.{{ $appDomain }}
-    ingressClass: {{ $ingressClass }}
-    storageClass: {{ $longhornStorageClass }}
-    storageSize: 2Gi
-    accessMode: ReadWriteOnce
-    # can be email or nickname
-    email: tony@mnke.org
-    password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#pgadmin4/password" }}
-
-  redis:
-    namespace: redis
-    storageClass: {{ $longhornStorageClass }}
-    storageSize: 8Gi
-    accessMode: ReadWriteMany
-    password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#redis/password" }}
-
-  ghost:
-    namespace: ghost
-    primaryHost: blog.mnke.org
-    storageClass: {{ $longhornStorageClass }}
-    ingressClass: {{ $ingressClass }}
-    ghostEmail: {{ $email }}
-    ghostPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/password" }}
-    mysql:
-      {{ $ghostDatabase | toYaml | nindent 6 }}
-
-  authentik:
-    namespace: authentik
-    storageClass: {{ $longhornStorageClass }}
-    ingressClass: {{ $ingressClass }}
-    secretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/secretKey" }}
-    hostnames:
-      - auth.{{ $appDomain }}
-      - auth.{{ $domain }}
-    postgres:
-      {{ $authentikDatabase | toYaml | nindent 6 }}
-
-  harbor:
-    namespace: harbor
-    hostname: harbor.{{ $appDomain }}
-    ingressClass: {{ $ingressClass }}
-    storageClass: {{ $nfsStorageClass }}
-    username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/username" }}
-    password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/password" }}
-    htpasswd: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/htpasswd" }}
-    registrySecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/registrySecret" }}
-    jobserviceSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/jobserviceSecret" }}
-    coreSecretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecretKey" }}
-    coreSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecret" }}
-    coreCsrfKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreCsrfKey" }}
-    coreTlsKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsKey" | quote }}
-    coreTlsCert: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsCert" | quote }}
-
-    postgres:
-      {{ $harborDatabase | toYaml | nindent 6 }}
-
-  kubePrometheusStack:
-    namespace: kube-prometheus-stack
-    ingressClass: {{ $ingressClass }}
-    storageClass: {{ $nfsStorageClass }}
-    thanosRuler:
-      storageSize: 4Gi
-    prometheus:
-      storageSize: 4Gi
-    grafana:
-      storageSize: 4Gi
-      adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#kubePrometheusStack/grafana/adminPassword" }}
-      hosts:
-        - gf.{{ $appDomain }}
-
-  argocd:
-    namespace: argo-cd
-    ingressClass: {{ $ingressClass }}
-    storageClass: {{ $nfsStorageClass }}
-    hostname: argocd.{{ $appDomain }}
-    adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#argocd/adminPassword" }}
-
-  gitea:
-    namespace: gitea
-    ingressClass: {{ $ingressClass }}
-    postgres:
-      {{ $giteaDatabase | toYaml | nindent 6 }}
diff --git a/k8s/helmfile.d/values/harbor/values.yaml.gotmpl b/k8s/helmfile.d/values/harbor/values.yaml.gotmpl
deleted file mode 100644
index a0e3988..0000000
--- a/k8s/helmfile.d/values/harbor/values.yaml.gotmpl
+++ /dev/null
@@ -1,3815 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
-##
-
-## @param global.imageRegistry Global Docker image registry
-## @param global.imagePullSecrets Global Docker registry secret names as an array
-## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
-##
-global:
-  imageRegistry: ""
-  ## E.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  defaultStorageClass: {{ .Values.globals.harbor.storageClass }}
-  storageClass: ""
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: auto
-## @section Common Parameters
-##
-
-## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override common.names.fullname template with a string
-##
-fullnameOverride: ""
-## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
-##
-kubeVersion: ""
-## @param clusterDomain Kubernetes Cluster Domain
-##
-clusterDomain: cluster.local
-## @param commonAnnotations Annotations to add to all deployed objects
-##
-commonAnnotations: {}
-## @param commonLabels Labels to add to all deployed objects
-##
-commonLabels: {}
-## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template).
-##
-extraDeploy: []
-## Enable diagnostic mode in the deployment(s)/statefulset(s)
-##
-diagnosticMode:
-  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
-  ##
-  enabled: false
-  ## @param diagnosticMode.command Command to override all containers in the the deployment(s)/statefulset(s)
-  ##
-  command:
-    - sleep
-  ## @param diagnosticMode.args Args to override all containers in the the deployment(s)/statefulset(s)
-  ##
-  args:
-    - infinity
-## @section Harbor common parameters
-##
-
-## @param adminPassword The initial password of Harbor admin. Change it from portal after launching Harbor
-##
-adminPassword: {{ .Values.globals.harbor.password }}
-## @param externalURL The external URL for Harbor Core service
-## It is used to
-## 1) populate the docker/helm commands showed on portal
-##
-## Format: protocol://domain[:port]. Usually:
-## 1) if "exposureType" is "ingress", the "domain" should be
-## the value of "ingress.hostname"
-## 2) if "exposureType" is "proxy" and "service.type" is "ClusterIP",
-## the "domain" should be the value of "service.clusterIP"
-## 3) if "exposureType" is "proxy" and "service.type" is "NodePort",
-## the "domain" should be the IP address of k8s node
-## 4) if "exposureType" is "proxy" and "service.type" is "LoadBalancer",
-## the "domain" should be the LoadBalancer IP
-##
-externalURL: https://{{ .Values.globals.harbor.hostname }}
-## Note: If Harbor is exposed via Ingress, the NGINX server will not be used
-## @param proxy.httpProxy The URL of the HTTP proxy server
-## @param proxy.httpsProxy The URL of the HTTPS proxy server
-## @param proxy.noProxy The URLs that the proxy settings not apply to
-## @param proxy.components The component list that the proxy settings apply to
-##
-proxy:
-  httpProxy: ""
-  httpsProxy: ""
-  noProxy: 127.0.0.1,localhost,.local,.internal
-  components:
-    - core
-    - jobservice
-    - trivy
-## @param logLevel The log level used for Harbor services. Allowed values are [ fatal \| error \| warn \| info \| debug \| trace ]
-##
-logLevel: debug
-## TLS settings
-## Note: TLS cert files need to provided in each components in advance.
-##
-internalTLS:
-  ## @param internalTLS.enabled Use TLS in all the supported containers: core, jobservice, portal, registry and trivy
-  ##
-  enabled: false
-  ## @param internalTLS.caBundleSecret Name of an existing secret with a custom CA that will be injected into the trust store for core, jobservice, registry, trivy components
-  ## The secret must contain the key "ca.crt"
-  ##
-  caBundleSecret: ""
-## IP family parameters
-##
-ipFamily:
-  ## @param ipFamily.ipv6.enabled Enable listening on IPv6 ([::]) for NGINX-based components (NGINX,portal)
-  ## Note: enabling IPv6 will cause NGINX to crash on start on systems with IPv6 disabled (`ipv6.disable` kernel flag)
-  ##
-  ipv6:
-    enabled: false
-  ## @param ipFamily.ipv4.enabled Enable listening on IPv4 for NGINX-based components (NGINX,portal)
-  ##
-  ipv4:
-    enabled: true
-## @section Traffic Exposure Parameters
-##
-
-## @param exposureType The way to expose Harbor. Allowed values are [ ingress \| proxy ]
-## Use "proxy" to use a deploy NGINX proxy in front of Harbor services
-## Use "ingress" to use an Ingress Controller as proxy
-##
-exposureType: ingress
-## Service parameters
-##
-service:
-  ## @param service.type NGINX proxy service type
-  ##
-  type: ClusterIP
-  ## @param service.ports.http NGINX proxy service HTTP port
-  ## @param service.ports.https NGINX proxy service HTTPS port
-  ##
-  ports:
-    http: 80
-    https: 443
-  ## Node ports to expose
-  ## @param service.nodePorts.http Node port for HTTP
-  ## @param service.nodePorts.https Node port for HTTPS
-  ## NOTE: choose port between <30000-32767>
-  ##
-  nodePorts:
-    http: ""
-    https: ""
-  ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
-  ## Values: ClientIP or None
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/
-  ##
-  sessionAffinity: None
-  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
-  ## sessionAffinityConfig:
-  ##   clientIP:
-  ##     timeoutSeconds: 300
-  ##
-  sessionAffinityConfig: {}
-  ## @param service.clusterIP NGINX proxy service Cluster IP
-  ## e.g.:
-  ## clusterIP: None
-  ##
-  clusterIP: ""
-  ## @param service.loadBalancerIP NGINX proxy service Load Balancer IP
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-  ##
-  loadBalancerIP: ""
-  ## @param service.loadBalancerSourceRanges NGINX proxy service Load Balancer sources
-  ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-  ## e.g:
-  ## loadBalancerSourceRanges:
-  ##   - 10.10.10.0/24
-  ##
-  loadBalancerSourceRanges: []
-  ## @param service.externalTrafficPolicy NGINX proxy service external traffic policy
-  ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-  ##
-  externalTrafficPolicy: Cluster
-  ## @param service.annotations Additional custom annotations for NGINX proxy service
-  ##
-  annotations: {}
-  ## @param service.extraPorts Extra port to expose on NGINX proxy service
-  ##
-  extraPorts: []
-ingress:
-  ## Configure the ingress resource that allows you to access Harbor Core
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
-  ##
-  core:
-    ## @param ingress.core.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
-    ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
-    ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
-    ##
-    ingressClassName: {{ .Values.globals.harbor.ingressClass }}
-    ## @param ingress.core.pathType Ingress path type
-    ##
-    pathType: ImplementationSpecific
-    ## @param ingress.core.apiVersion Force Ingress API version (automatically detected if not set)
-    ##
-    apiVersion: ""
-    ## @param ingress.core.controller The ingress controller type. Currently supports `default`, `gce` and `ncp`
-    ## leave as `default` for most ingress controllers.
-    ## set to `gce` if using the GCE ingress controller
-    ## set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
-    ##
-    controller: default
-    ## @param ingress.core.hostname Default host for the ingress record
-    ##
-    hostname: {{ .Values.globals.harbor.hostname }}
-    ## @param ingress.core.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
-    ## Use this parameter to set the required annotations for cert-manager, see
-    ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
-    ## e.g:
-    ## annotations:
-    ##   kubernetes.io/ingress.class: nginx
-    ##   cert-manager.io/cluster-issuer: cluster-issuer-name
-    ##
-    annotations:
-      cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-      kubernetes.io/ingress.class: {{ .Values.globals.harbor.ingressClass }}
-    ## @param ingress.core.tls Enable TLS configuration for the host defined at `ingress.core.hostname` parameter
-    ## TLS certificates will be retrieved from a TLS secret with name:
-    ## You can:
-    ##   - Use the `ingress.core.secrets` parameter to create this TLS secret
-    ##   - Rely on cert-manager to create it by setting the corresponding annotations
-    ##   - Rely on Helm to create self-signed certificates by setting `ingress.core.selfSigned=true`
-    ##
-    tls: true
-    ## @param ingress.core.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
-    ##
-    selfSigned: false
-    ## @param ingress.core.extraHosts An array with additional hostname(s) to be covered with the ingress record
-    ## e.g:
-    ## extraHosts:
-    ##   - name: core.harbor.domain
-    ##     path: /
-    ##
-    extraHosts: []
-    ## @param ingress.core.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
-    ## e.g:
-    ## extraPaths:
-    ## - path: /*
-    ##   backend:
-    ##     serviceName: ssl-redirect
-    ##     servicePort: use-annotation
-    ##
-    extraPaths: []
-    ## @param ingress.core.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-    ## e.g:
-    ## extraTls:
-    ## - hosts:
-    ##     - core.harbor.domain
-    ##   secretName: core.harbor.domain-tls
-    ##
-    extraTls: []
-    ## @param ingress.core.secrets Custom TLS certificates as secrets
-    ## NOTE: 'key' and 'certificate' are expected in PEM format
-    ## NOTE: 'name' should line up with a 'secretName' set further up
-    ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
-    ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
-    ## It is also possible to create and manage the certificates outside of this helm chart
-    ## Please see README.md for more information
-    ## e.g:
-    ## secrets:
-    ##   - name: core.harbor.domain-tls
-    ##     key: |-
-    ##       REDACTED
-    ##       ...
-    ##       REDACTED
-    ##     certificate: |-
-    ##       -----BEGIN CERTIFICATE-----
-    ##       ...
-    ##       -----END CERTIFICATE-----
-    ##
-    secrets: []
-    ## @param ingress.core.extraRules Additional rules to be covered with this ingress record
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
-    ## e.g:
-    ## extraRules:
-    ## - host: example.local
-    ##     http:
-    ##       path: /
-    ##       backend:
-    ##         service:
-    ##           name: example-svc
-    ##           port:
-    ##             name: http
-    ##
-    extraRules: []
-    ##
-## @section Persistence Parameters
-##
-
-## The persistence is enabled by default and a default StorageClass
-## is needed in the k8s cluster to provision volumes dynamically.
-## Specify another StorageClass in the "storageClass" or set "existingClaim"
-## if you have already existing persistent volumes to use
-##
-## For storing images and charts, you can also use "azure", "gcs", "s3",
-## "swift" or "oss". Set it in the "imageChartStorage" section
-##
-persistence:
-  ## @param persistence.enabled Enable the data persistence or not
-  ##
-  enabled: true
-  ## Resource Policy
-  ## @param persistence.resourcePolicy Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted
-  ##
-  resourcePolicy: "keep"
-  persistentVolumeClaim:
-    ## @param persistence.persistentVolumeClaim.registry.existingClaim Name of an existing PVC to use
-    ## @param persistence.persistentVolumeClaim.registry.storageClass PVC Storage Class for Harbor Registry data volume
-    ## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning
-    ## @param persistence.persistentVolumeClaim.registry.subPath The sub path used in the volume
-    ## @param persistence.persistentVolumeClaim.registry.accessModes The access mode of the volume
-    ## @param persistence.persistentVolumeClaim.registry.size The size of the volume
-    ## @param persistence.persistentVolumeClaim.registry.annotations Annotations for the PVC
-    ## @param persistence.persistentVolumeClaim.registry.selector Selector to match an existing Persistent Volume
-    ##
-    registry:
-      existingClaim: ""
-      storageClass: ""
-      subPath: ""
-      accessModes:
-        - ReadWriteOnce
-      size: 5Gi
-      annotations: {}
-      selector: {}
-    ## @param persistence.persistentVolumeClaim.jobservice.existingClaim Name of an existing PVC to use
-    ## @param persistence.persistentVolumeClaim.jobservice.storageClass PVC Storage Class for Harbor Jobservice data volume
-    ## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning
-    ## @param persistence.persistentVolumeClaim.jobservice.subPath The sub path used in the volume
-    ## @param persistence.persistentVolumeClaim.jobservice.accessModes The access mode of the volume
-    ## @param persistence.persistentVolumeClaim.jobservice.size The size of the volume
-    ## @param persistence.persistentVolumeClaim.jobservice.annotations Annotations for the PVC
-    ## @param persistence.persistentVolumeClaim.jobservice.selector Selector to match an existing Persistent Volume
-    ##
-    jobservice:
-      existingClaim: ""
-      storageClass: ""
-      subPath: ""
-      accessModes:
-        - ReadWriteOnce
-      size: 1Gi
-      annotations: {}
-      selector: {}
-    ## @param persistence.persistentVolumeClaim.trivy.storageClass PVC Storage Class for Trivy data volume
-    ## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning
-    ## @param persistence.persistentVolumeClaim.trivy.accessModes The access mode of the volume
-    ## @param persistence.persistentVolumeClaim.trivy.size The size of the volume
-    ## @param persistence.persistentVolumeClaim.trivy.annotations Annotations for the PVC
-    ## @param persistence.persistentVolumeClaim.trivy.selector Selector to match an existing Persistent Volume
-    ##
-    trivy:
-      storageClass: ""
-      accessModes:
-        - ReadWriteOnce
-      size: 5Gi
-      annotations: {}
-      selector: {}
-  ## Define which storage backend is used for registry to store
-  ## images and charts.
-  ## ref: https://github.com/docker/distribution/blob/master/docs/configuration.md#storage
-  ##
-  imageChartStorage:
-    ## @param persistence.imageChartStorage.caBundleSecret Specify the `caBundleSecret` if the storage service uses a self-signed certificate. The secret must contain keys named `ca.crt` which will be injected into the trust store  of registry's containers.
-    ##
-    caBundleSecret: ""
-    ## @param persistence.imageChartStorage.disableredirect The configuration for managing redirects from content backends. For backends which do not supported it (such as using MinIO&reg; for `s3` storage type), please set it to `true` to disable redirects. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect) for more information about the detail
-    ##
-    disableredirect: false
-    ## @param persistence.imageChartStorage.type The type of storage for images and charts: `filesystem`, `azure`, `gcs`, `s3`, `swift` or `oss`. The type must be `filesystem` if you want to use persistent volumes for registry. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) for more information about the detail
-    ##
-    type: filesystem
-    ## Images/charts storage parameters when type is "filesystem"
-    ## @param persistence.imageChartStorage.filesystem.rootdirectory Filesystem storage type setting: Storage root directory
-    ## @param persistence.imageChartStorage.filesystem.maxthreads Filesystem storage type setting: Maximum threads directory
-    ##
-    filesystem:
-      rootdirectory: /storage
-      maxthreads: ""
-    ## Images/charts storage parameters when type is "azure"
-    ## @param persistence.imageChartStorage.azure.accountname Azure storage type setting: Name of the Azure account
-    ## @param persistence.imageChartStorage.azure.accountkey Azure storage type setting: Key of the Azure account
-    ## @param persistence.imageChartStorage.azure.container Azure storage type setting: Container
-    ## @param persistence.imageChartStorage.azure.storagePrefix Azure storage type setting: Storage prefix
-    ## @param persistence.imageChartStorage.azure.realm Azure storage type setting: Realm of the Azure account
-    ##
-    azure:
-      accountname: accountname
-      accountkey: base64encodedaccountkey
-      container: containername
-      storagePrefix: /azure/harbor/charts
-      ## Example realm
-      ## realm: core.windows.net
-      ##
-      realm: ""
-    ## Images/charts storage parameters when type is "gcs"
-    ## @param persistence.imageChartStorage.gcs.bucket GCS storage type setting: Bucket name
-    ## @param persistence.imageChartStorage.gcs.encodedkey GCS storage type setting: Base64 encoded key
-    ## @param persistence.imageChartStorage.gcs.rootdirectory GCS storage type setting: Root directory name
-    ## @param persistence.imageChartStorage.gcs.chunksize GCS storage type setting: Chunk size name
-    ##
-    gcs:
-      bucket: bucketname
-      ## The base64 encoded json file which contains the gcs key (file's content)
-      ##
-      encodedkey: ""
-      rootdirectory: ""
-      chunksize: ""
-    ## Images/charts storage parameters when type is "s3"
-    ## ref: https://docs.docker.com/registry/storage-drivers/s3/
-    ## @param persistence.imageChartStorage.s3.region S3 storage type setting: Region
-    ## @param persistence.imageChartStorage.s3.bucket S3 storage type setting: Bucket name
-    ## @param persistence.imageChartStorage.s3.accesskey S3 storage type setting: Access key name
-    ## @param persistence.imageChartStorage.s3.secretkey S3 storage type setting: Secret Key name
-    ## @param persistence.imageChartStorage.s3.regionendpoint S3 storage type setting: Region Endpoint
-    ## @param persistence.imageChartStorage.s3.encrypt S3 storage type setting: Encrypt
-    ## @param persistence.imageChartStorage.s3.keyid S3 storage type setting: Key ID
-    ## @param persistence.imageChartStorage.s3.secure S3 storage type setting: Secure
-    ## @param persistence.imageChartStorage.s3.skipverify S3 storage type setting: TLS skip verification
-    ## @param persistence.imageChartStorage.s3.v4auth S3 storage type setting: V4 authorization
-    ## @param persistence.imageChartStorage.s3.chunksize S3 storage type setting: V4 authorization
-    ## @param persistence.imageChartStorage.s3.rootdirectory S3 storage type setting: Root directory name
-    ## @param persistence.imageChartStorage.s3.storageClass S3 storage type setting: Storage class
-    ## @param persistence.imageChartStorage.s3.sse S3 storage type setting: SSE name
-    ## @param persistence.imageChartStorage.s3.multipartcopythresholdsize S3 storage type setting: Threshold size for multipart copy
-    ##
-    s3:
-      region: us-west-1
-      bucket: bucketname
-      accesskey: ""
-      secretkey: ""
-      regionendpoint: ""
-      encrypt: ""
-      keyid: ""
-      secure: ""
-      skipverify: ""
-      v4auth: ""
-      chunksize: ""
-      rootdirectory: ""
-      storageClass: ""
-      sse: ""
-      multipartcopythresholdsize: ""
-    ## Images/charts storage parameters when type is "swift"
-    ## @param persistence.imageChartStorage.swift.authurl Swift storage type setting: Authentication url
-    ## @param persistence.imageChartStorage.swift.username Swift storage type setting: Authentication url
-    ## @param persistence.imageChartStorage.swift.password Swift storage type setting: Password
-    ## @param persistence.imageChartStorage.swift.container Swift storage type setting: Container
-    ## @param persistence.imageChartStorage.swift.region Swift storage type setting: Region
-    ## @param persistence.imageChartStorage.swift.tenant Swift storage type setting: Tenant
-    ## @param persistence.imageChartStorage.swift.tenantid Swift storage type setting: TenantID
-    ## @param persistence.imageChartStorage.swift.domain Swift storage type setting: Domain
-    ## @param persistence.imageChartStorage.swift.domainid Swift storage type setting: DomainID
-    ## @param persistence.imageChartStorage.swift.trustid Swift storage type setting: TrustID
-    ## @param persistence.imageChartStorage.swift.insecureskipverify Swift storage type setting: Verification
-    ## @param persistence.imageChartStorage.swift.chunksize Swift storage type setting: Chunk
-    ## @param persistence.imageChartStorage.swift.prefix Swift storage type setting: Prefix
-    ## @param persistence.imageChartStorage.swift.secretkey Swift storage type setting: Secre Key
-    ## @param persistence.imageChartStorage.swift.accesskey Swift storage type setting: Access Key
-    ## @param persistence.imageChartStorage.swift.authversion Swift storage type setting: Auth
-    ## @param persistence.imageChartStorage.swift.endpointtype Swift storage type setting: Endpoint
-    ## @param persistence.imageChartStorage.swift.tempurlcontainerkey Swift storage type setting: Temp URL container key
-    ## @param persistence.imageChartStorage.swift.tempurlmethods Swift storage type setting: Temp URL methods
-    ##
-    swift:
-      authurl: https://storage.myprovider.com/v3/auth
-      username: ""
-      password: ""
-      container: ""
-      region: ""
-      tenant: ""
-      tenantid: ""
-      domain: ""
-      domainid: ""
-      trustid: ""
-      insecureskipverify: ""
-      chunksize: ""
-      prefix: ""
-      secretkey: ""
-      accesskey: ""
-      authversion: ""
-      endpointtype: ""
-      tempurlcontainerkey: ""
-      tempurlmethods: ""
-    ## Images/charts storage parameters when type is "oss"
-    ## @param persistence.imageChartStorage.oss.accesskeyid OSS storage type setting: Access key ID
-    ## @param persistence.imageChartStorage.oss.accesskeysecret OSS storage type setting: Access key secret name containing the token
-    ## @param persistence.imageChartStorage.oss.region OSS storage type setting: Region name
-    ## @param persistence.imageChartStorage.oss.bucket OSS storage type setting: Bucket name
-    ## @param persistence.imageChartStorage.oss.endpoint OSS storage type setting: Endpoint
-    ## @param persistence.imageChartStorage.oss.internal OSS storage type setting: Internal
-    ## @param persistence.imageChartStorage.oss.encrypt OSS storage type setting: Encrypt
-    ## @param persistence.imageChartStorage.oss.secure OSS storage type setting: Secure
-    ## @param persistence.imageChartStorage.oss.chunksize OSS storage type setting: Chunk
-    ## @param persistence.imageChartStorage.oss.rootdirectory OSS storage type setting: Directory
-    ## @param persistence.imageChartStorage.oss.secretkey OSS storage type setting: Secret key
-    ##
-    oss:
-      accesskeyid: ""
-      accesskeysecret: ""
-      region: ""
-      bucket: ""
-      endpoint: ""
-      internal: ""
-      encrypt: ""
-      secure: ""
-      chunksize: ""
-      rootdirectory: ""
-      secretkey: ""
-## @section Tracing parameters
-##
-
-## Tracing parameters:
-## tracing: Configure tracing for Harbor, only one of tracing.jeager.enabled and tracing.otel.enabled should be set
-##
-tracing:
-  ## @param tracing.enabled Enable tracing
-  ##
-  enabled: false
-  ## @param tracing.sampleRate Tracing sample rate from 0 to 1
-  ##
-  sampleRate: 1
-  ## @param tracing.namespace Used to differentiate traces between different harbor services
-  ##
-  namespace: ""
-  ## @param tracing.attributes A key value dict containing user defined attributes used to initialize the trace provider
-  ## e.g:
-  ## attributes:
-  ##   application: harbor
-  ##
-  attributes: {}
-  ## @extra tracing.jaeger Configuration for exporting to jaeger. If using jaeger collector mode, use endpoint, username and password. If using jaeger agent mode, use agentHostname and agentPort.
-  ## e.g:
-  ## jaeger:
-  ##   enabled: true
-  ##   endpoint: http://hostname:14268/api/traces
-  ##   username: "jaeger-username"
-  ##   password: "jaeger-password"
-  ## @param tracing.jaeger.enabled Enable jaeger export
-  ## @param tracing.jaeger.endpoint Jaeger endpoint
-  ## @param tracing.jaeger.username Jaeger username
-  ## @param tracing.jaeger.password Jaeger password
-  ## @param tracing.jaeger.agentHost Jaeger agent hostname
-  ## @param tracing.jaeger.agentPort Jaeger agent port
-  ##
-  jaeger:
-    enabled: false
-    endpoint: ""
-    username: ""
-    password: ""
-    agentHost: ""
-    agentPort: ""
-  ## @extra tracing.otel Configuration for exporting to an otel endpoint
-  ## @param tracing.otel.enabled Enable otel export
-  ## @param tracing.otel.endpoint The hostname and port for an otel compatible backend
-  ## @param tracing.otel.urlpath Url path of otel endpoint
-  ## @param tracing.otel.compression Enable data compression
-  ## @param tracing.otel.timeout The timeout for data transfer
-  ## @param tracing.otel.insecure Ignore cert verification for otel backend
-  ##
-  otel:
-    enabled: false
-    endpoint: "hostname:4318"
-    urlpath: "/v1/traces"
-    compression: false
-    timeout: 10s
-    insecure: true
-## @section Volume Permissions parameters
-##
-
-## Init containers parameters:
-## certificateVolume: Copy /etc/ssl/certs to a volume so that they can be updated when a read-only volume is in use.
-##
-certificateVolume:
-  ## Init container resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param certificateVolume.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if certificateVolume.resources is set (certificateVolume.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param certificateVolume.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
-##
-volumePermissions:
-  ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
-  ##
-  enabled: false
-  ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
-  ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository
-  ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
-  ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
-  ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r35
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## Example:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## Init container resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Init container' Security Context
-  ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
-  ## and not the below volumePermissions.containerSecurityContext.runAsUser
-  ## @param volumePermissions.containerSecurityContext.enabled Enable init container Security Context
-  ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 0
-## @section NGINX Parameters
-##
-nginx:
-  ## Bitnami NGINX image
-  ## ref: https://hub.docker.com/r/bitnami/nginx/tags/
-  ## @param nginx.image.registry [default: REGISTRY_NAME] NGINX image registry
-  ## @param nginx.image.repository [default: REPOSITORY_NAME/nginx] NGINX image repository
-  ## @skip nginx.image.tag NGINX image tag (immutable tags are recommended)
-  ## @param nginx.image.digest NGINX image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param nginx.image.pullPolicy NGINX image pull policy
-  ## @param nginx.image.pullSecrets NGINX image pull secrets
-  ## @param nginx.image.debug Enable NGINX image debug mode
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/nginx
-    tag: 1.27.3-debian-12-r5
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## TLS parameters
-  ##
-  tls:
-    ## @param nginx.tls.enabled Enable TLS termination
-    ##
-    enabled: true
-    ## @param nginx.tls.existingSecret Existing secret name containing your own TLS certificates.
-    ## The secret must contain the keys:
-    ## `tls.crt` - the certificate (required),
-    ## `tls.key` - the private key (required),
-    ## `ca.crt` - CA certificate (optional)
-    ## Self-signed TLS certificates will be used otherwise.
-    ##
-    existingSecret: ""
-    ## @param nginx.tls.commonName The common name used to generate the self-signed TLS certificates
-    ##
-    commonName: core.harbor.domain
-  ## @param nginx.behindReverseProxy If NGINX is behind another reverse proxy, set to true
-  ## if the reverse proxy already provides the 'X-Forwarded-Proto' header field.
-  ## This is, for example, the case for the OpenShift HAProxy router.
-  ##
-  behindReverseProxy: false
-  ## @param nginx.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param nginx.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param nginx.extraEnvVars Array with extra environment variables to add NGINX pods
-  ##
-  extraEnvVars: []
-  ## @param nginx.extraEnvVarsCM ConfigMap containing extra environment variables for NGINX pods
-  ##
-  extraEnvVarsCM: ""
-  ## @param nginx.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for NGINX pods
-  ##
-  extraEnvVarsSecret: ""
-  ## @param nginx.containerPorts.http NGINX HTTP container port
-  ## @param nginx.containerPorts.https NGINX HTTPS container port
-  ##
-  containerPorts:
-    http: 8080
-    https: 8443
-  ## @param nginx.replicaCount Number of NGINX replicas
-  ##
-  replicaCount: 1
-  ## Configure extra options for NGINX containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param nginx.livenessProbe.enabled Enable livenessProbe on NGINX containers
-  ## @param nginx.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param nginx.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param nginx.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param nginx.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param nginx.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param nginx.readinessProbe.enabled Enable readinessProbe on NGINX containers
-  ## @param nginx.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param nginx.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param nginx.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param nginx.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param nginx.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param nginx.startupProbe.enabled Enable startupProbe on NGINX containers
-  ## @param nginx.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param nginx.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param nginx.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param nginx.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param nginx.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param nginx.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param nginx.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param nginx.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## NGINX resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if nginx.resources is set (nginx.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "small"
-  ## @param nginx.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure NGINX pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param nginx.podSecurityContext.enabled Enabled NGINX pods' Security Context
-  ## @param nginx.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param nginx.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param nginx.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param nginx.podSecurityContext.fsGroup Set NGINX pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure NGINX containers (only main one) Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param nginx.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param nginx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param nginx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param nginx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param nginx.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param nginx.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param nginx.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param nginx.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param nginx.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param nginx.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param nginx.updateStrategy.type NGINX deployment strategy type - only really applicable for deployments with RWO PVs attached
-  ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-  ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-  ## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param nginx.lifecycleHooks LifecycleHook for the NGINX container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param nginx.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## Harbor Nginx ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param nginx.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: false
-    ## @param nginx.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param nginx.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-    ##
-    automountServiceAccountToken: false
-    ## @param nginx.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## @param nginx.hostAliases NGINX pods host aliases
-  ##
-  hostAliases: []
-  ## @param nginx.podLabels Add additional labels to the NGINX pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param nginx.podAnnotations Annotations to add to the NGINX pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param nginx.podAffinityPreset NGINX Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param nginx.podAntiAffinityPreset NGINX Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ## Allowed values: soft, hard
-  ##
-  nodeAffinityPreset:
-    ## @param nginx.nodeAffinityPreset.type NGINX Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param nginx.nodeAffinityPreset.key NGINX Node label key to match Ignored if `affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param nginx.nodeAffinityPreset.values NGINX Node label values to match. Ignored if `affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param nginx.affinity NGINX Affinity for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: podAffinityPreset, podAntiAffinityPreset, and  nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param nginx.nodeSelector NGINX Node labels for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param nginx.tolerations NGINX Tolerations for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param nginx.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param nginx.priorityClassName Priority Class Name
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-  ##
-  priorityClassName: ""
-  ## @param nginx.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param nginx.sidecars Add additional sidecar containers to the NGINX pods
-  ## Example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param nginx.initContainers Add additional init containers to the NGINX pods
-  ## Example:
-  ## initContainers:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param nginx.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param nginx.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param nginx.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `nginx.pdb.minAvailable` and `nginx.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param nginx.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the NGINX pods
-  ##
-  extraVolumeMounts: []
-  ## @param nginx.extraVolumes Optionally specify extra list of additional volumes for the NGINX pods
-  ##
-  extraVolumes: []
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param nginx.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param nginx.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param nginx.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param nginx.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param nginx.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param nginx.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param nginx.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-## @section Harbor Portal Parameters
-##
-portal:
-  ## Bitnami Harbor Portal image
-  ## ref: https://hub.docker.com/r/bitnami/harbor-portal/tags/
-  ## @param portal.image.registry [default: REGISTRY_NAME] Harbor Portal image registry
-  ## @param portal.image.repository [default: REPOSITORY_NAME/harbor-portal] Harbor Portal image repository
-  ## @skip portal.image.tag Harbor Portal image tag (immutable tags are recommended)
-  ## @param portal.image.digest Harbor Portal image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param portal.image.pullPolicy Harbor Portal image pull policy
-  ## @param portal.image.pullSecrets Harbor Portal image pull secrets
-  ## @param portal.image.debug Enable Harbor Portal image debug mode
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/harbor-portal
-    tag: 2.12.2-debian-12-r0
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## Use TLS in the container
-  ##
-  tls:
-    ## @param portal.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
-    ## Requires `internalTLS.enabled` to be set to `true``
-    ## Self-signed TLS certificates will be used otherwise
-    ##
-    existingSecret: ""
-  ## @param portal.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param portal.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param portal.extraEnvVars Array with extra environment variables to add Harbor Portal pods
-  ##
-  extraEnvVars: []
-  ## @param portal.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Portal pods
-  ##
-  extraEnvVarsCM: ""
-  ## @param portal.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Portal pods
-  ##
-  extraEnvVarsSecret: ""
-  ## @param portal.containerPorts.http Harbor Portal HTTP container port
-  ## @param portal.containerPorts.https Harbor Portal HTTPS container port
-  ##
-  containerPorts:
-    http: 8080
-    https: 8443
-  ## @param portal.replicaCount Number of Harbor Portal replicas
-  ##
-  replicaCount: 1
-  ## Configure extra options for Harbor Portal containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param portal.livenessProbe.enabled Enable livenessProbe on Harbor Portal containers
-  ## @param portal.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param portal.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param portal.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param portal.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param portal.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param portal.readinessProbe.enabled Enable readinessProbe on Harbor Portal containers
-  ## @param portal.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param portal.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param portal.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param portal.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param portal.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param portal.startupProbe.enabled Enable startupProbe on Harbor Portal containers
-  ## @param portal.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param portal.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param portal.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param portal.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param portal.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param portal.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param portal.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param portal.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## Harbor Portal resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param portal.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if portal.resources is set (portal.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "small"
-  ## @param portal.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Harbor Portal pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param portal.podSecurityContext.enabled Enabled Harbor Portal pods' Security Context
-  ## @param portal.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param portal.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param portal.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param portal.podSecurityContext.fsGroup Set Harbor Portal pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Harbor Portal containers (only main one) Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param portal.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param portal.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param portal.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param portal.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param portal.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param portal.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param portal.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param portal.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param portal.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param portal.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param portal.updateStrategy.type Harbor Portal deployment strategy type - only really applicable for deployments with RWO PVs attached
-  ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-  ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-  ## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param portal.lifecycleHooks LifecycleHook for the Harbor Portal container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param portal.hostAliases Harbor Portal pods host aliases
-  ##
-  hostAliases: []
-  ## @param portal.podLabels Add additional labels to the Harbor Portal pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param portal.podAnnotations Annotations to add to the Harbor Portal pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param portal.podAffinityPreset Harbor Portal Pod affinity preset. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param portal.podAntiAffinityPreset Harbor Portal Pod anti-affinity preset. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard`
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param portal.nodeAffinityPreset.type Harbor Portal Node affinity preset type. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param portal.nodeAffinityPreset.key Harbor Portal Node label key to match Ignored if `portal.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param portal.nodeAffinityPreset.values Harbor Portal Node label values to match. Ignored if `portal.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param portal.affinity Harbor Portal Affinity for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: portal.podAffinityPreset, portal.podAntiAffinityPreset, and portal.nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param portal.nodeSelector Harbor Portal Node labels for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param portal.tolerations Harbor Portal Tolerations for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param portal.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param portal.priorityClassName Priority Class Name
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-  ##
-  priorityClassName: ""
-  ## @param portal.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param portal.sidecars Add additional sidecar containers to the Harbor Portal pods
-  ## Example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param portal.initContainers Add additional init containers to the Harbor Portal pods
-  ## Example:
-  ## initContainers:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param portal.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param portal.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param portal.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `portal.pdb.minAvailable` and `portal.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param portal.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Portal pods
-  ##
-  extraVolumeMounts: []
-  ## @param portal.extraVolumes Optionally specify extra list of additional volumes for the Harbor Portal pods
-  ##
-  extraVolumes: []
-  ## @param portal.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## Harbor Portal ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param portal.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: false
-    ## @param portal.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param portal.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-    ##
-    automountServiceAccountToken: false
-    ## @param portal.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Harbor Portal service configuration
-  ##
-  service:
-    ## @param portal.service.ports.http Harbor Portal HTTP service port
-    ## @param portal.service.ports.https Harbor Portal HTTPS service port
-    ##
-    ports:
-      http: 80
-      https: 443
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param portal.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param portal.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param portal.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param portal.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param portal.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param portal.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param portal.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-## @section Harbor Core Parameters
-##
-core:
-  ## Bitnami Harbor Core image
-  ## ref: https://hub.docker.com/r/bitnami/harbor-core/tags/
-  ## @param core.image.registry [default: REGISTRY_NAME] Harbor Core image registry
-  ## @param core.image.repository [default: REPOSITORY_NAME/harbor-core] Harbor Core image repository
-  ## @skip core.image.tag Harbor Core image tag (immutable tags are recommended)
-  ## @param core.image.digest Harbor Core image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param core.image.pullPolicy Harbor Core image pull policy
-  ## @param core.image.pullSecrets Harbor Core image pull secrets
-  ## @param core.image.debug Enable Harbor Core image debug mode
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/harbor-core
-    tag: 2.12.2-debian-12-r1
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## @param core.sessionLifetime Explicitly set a session timeout (in seconds) overriding the backend default.
-  ##
-  sessionLifetime: ""
-  ## @param core.uaaSecret If using external UAA auth which has a self signed cert, you can provide a pre-created secret containing it under the key `ca.crt`.
-  ##
-  uaaSecret: ""
-  ## @param core.secretKey The key used for encryption. Must be a string of 16 chars
-  ## e.g:
-  ## secretKey: "not-a-secure-string"
-  ##
-  secretKey: {{ .Values.globals.harbor.coreSecretKey }}
-  ## @param core.secret Secret used when the core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars.
-  ##
-  secret: {{ .Values.globals.harbor.coreSecret }}
-  ## @param core.tokenKey Key of the certificate used for token encryption/decryption.
-  ##
-  tokenKey: {{ .Values.globals.harbor.coreTlsKey | quote }}
-  ## @param core.tokenCert Certificate used for token encryption/decryption.
-  ##
-  tokenCert: {{ .Values.globals.harbor.coreTlsCert | quote }}
-  ## @param core.secretName Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain two keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set
-  ##
-  secretName: ""
-  ## @param core.existingSecret Existing secret for core
-  ## The secret must contain the keys:
-  ## `secret` (required),
-  ## `secretKey` (required),
-  ##
-  existingSecret: ""
-  ## @param core.existingEnvVarsSecret Existing secret for core envvars
-  ## The secret must contain the keys:
-  ## `CSRF_KEY` (optional - alternatively auto-generated),
-  ## `HARBOR_ADMIN_PASSWORD` (optional - alternatively auto-generated),
-  ## `POSTGRESQL_PASSWORD` (optional - alternatively uses weak upstream default. Read below if you set it. You must also set postgresql.auth.existingSecret to the same value as core.existingEnvVarsSecret for this to work!),
-  ## `postgres-password` (required if POSTGRESQL_PASSWORD is set & must be the same as POSTGRESQL_PASSWORD.)
-  ## `HARBOR_DATABASE_PASSWORD` (required if POSTGRESQL_PASSWORD is set & must be the same as POSTGRESQL_PASSWORD.)
-  ## `REGISTRY_CREDENTIAL_USERNAME` (optional - alternatively weak defaults),
-  ## `REGISTRY_CREDENTIAL_PASSWORD` (optional - alternatively weak defaults),
-  ## `_REDIS_URL_CORE` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/0")
-  ## `_REDIS_URL_REG` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/2")
-  ##
-  ## If you do not know how to start, let the chart generate a full secret for you before defining an existingEnvVarsSecret
-  ## Notes:
-  ##   As a EnvVars secret, this secret also store redis config urls
-  ##   The HARBOR_ADMIN_PASSWORD is only required at initial deployment, once the password is set in database, it is not used anymore
-  ##
-  existingEnvVarsSecret: ""
-  ## @param core.csrfKey The CSRF key. Will be generated automatically if it isn't specified
-  ##
-  csrfKey: {{ .Values.globals.harbor.coreCsrfKey }}
-  ## Use TLS in the container
-  ##
-  tls:
-    ## @param core.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
-    ## Requires `internalTLS.enabled` to be set to `true``
-    ## Self-signed TLS certificates will be used otherwise
-    ##
-    existingSecret: ""
-  ## @param core.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param core.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param core.extraEnvVars Array with extra environment variables to add Harbor Core pods
-  ##
-  extraEnvVars: []
-  ## @param core.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Core pods
-  ##
-  extraEnvVarsCM: ""
-  ## @param core.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Core pods
-  ##
-  extraEnvVarsSecret: ""
-  ## @param core.configOverwriteJson String containing a JSON with configuration overrides
-  ## Source: https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#harbor-user-settings
-  ##
-  configOverwriteJson: ""
-  ## @param core.configOverwriteJsonSecret Secret containing the JSON configuration overrides
-  ## Source: https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#harbor-user-settings
-  ##
-  configOverwriteJsonSecret: ""
-  ## @param core.containerPorts.http Harbor Core HTTP container port
-  ## @param core.containerPorts.https Harbor Core HTTPS container port
-  ## @param core.containerPorts.metrics Harbor Core metrics container port
-  ##
-  containerPorts:
-    http: 8080
-    https: 8443
-    metrics: 8001
-  ## @param core.replicaCount Number of Harbor Core replicas
-  ##
-  replicaCount: 1
-  ## Configure extra options for Harbor Core containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param core.livenessProbe.enabled Enable livenessProbe on Harbor Core containers
-  ## @param core.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param core.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param core.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param core.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param core.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param core.readinessProbe.enabled Enable readinessProbe on Harbor Core containers
-  ## @param core.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param core.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param core.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param core.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param core.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param core.startupProbe.enabled Enable startupProbe on Harbor Core containers
-  ## @param core.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param core.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param core.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param core.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param core.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param core.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param core.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param core.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## Harbor Core resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param core.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if core.resources is set (core.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "small"
-  ## @param core.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Harbor Core pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param core.podSecurityContext.enabled Enabled Harbor Core pods' Security Context
-  ## @param core.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param core.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param core.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param core.podSecurityContext.fsGroup Set Harbor Core pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Harbor Core containers (only main one) Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param core.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param core.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param core.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param core.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param core.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param core.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param core.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param core.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param core.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param core.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param core.updateStrategy.type Harbor Core deployment strategy type - only really applicable for deployments with RWO PVs attached
-  ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-  ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-  ## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param core.lifecycleHooks LifecycleHook for the Harbor Core container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param core.hostAliases Harbor Core pods host aliases
-  ##
-  hostAliases: []
-  ## @param core.podLabels Add additional labels to the Harbor Core pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param core.podAnnotations Annotations to add to the Harbor Core pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param core.podAffinityPreset Harbor Core Pod affinity preset. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param core.podAntiAffinityPreset Harbor Core Pod anti-affinity preset. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard`
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param core.nodeAffinityPreset.type Harbor Core Node affinity preset type. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param core.nodeAffinityPreset.key Harbor Core Node label key to match Ignored if `core.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param core.nodeAffinityPreset.values Harbor Core Node label values to match. Ignored if `core.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param core.affinity Harbor Core Affinity for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: core.podAffinityPreset, core.podAntiAffinityPreset, and core.nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param core.nodeSelector Harbor Core Node labels for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param core.tolerations Harbor Core Tolerations for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param core.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param core.priorityClassName Priority Class Name
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-  ##
-  priorityClassName: ""
-  ## @param core.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param core.sidecars Add additional sidecar containers to the Harbor Core pods
-  ## Example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param core.initContainers Add additional init containers to the Harbor Core pods
-  ## Example:
-  ## initContainers:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param core.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param core.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param core.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `core.pdb.minAvailable` and `core.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param core.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Core pods
-  ##
-  extraVolumeMounts: []
-  ## @param core.extraVolumes Optionally specify extra list of additional volumes for the Harbor Core pods
-  ##
-  extraVolumes: []
-  ## @param core.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## Harbor Core ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param core.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: false
-    ## @param core.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param core.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-    ##
-    automountServiceAccountToken: false
-    ## @param core.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Harbor Core service configuration
-  ##
-  service:
-    ## @param core.service.ports.http Harbor Core HTTP service port
-    ## @param core.service.ports.https Harbor Core HTTPS service port
-    ## @param core.service.ports.metrics Harbor Core metrics service port
-    ##
-    ports:
-      http: 80
-      https: 443
-      metrics: 8001
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param core.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param core.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param core.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param core.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param core.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param core.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param core.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-## @section Harbor Jobservice Parameters
-##
-jobservice:
-  ## Bitnami Harbor Jobservice image
-  ## ref: https://hub.docker.com/r/bitnami/harbor-jobservice/tags/
-  ## @param jobservice.image.registry [default: REGISTRY_NAME] Harbor Jobservice image registry
-  ## @param jobservice.image.repository [default: REPOSITORY_NAME/harbor-jobservice] Harbor Jobservice image repository
-  ## @skip jobservice.image.tag Harbor Jobservice image tag (immutable tags are recommended)
-  ## @param jobservice.image.digest Harbor Jobservice image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param jobservice.image.pullPolicy Harbor Jobservice image pull policy
-  ## @param jobservice.image.pullSecrets Harbor Jobservice image pull secrets
-  ## @param jobservice.image.debug Enable Harbor Jobservice image debug mode
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/harbor-jobservice
-    tag: 2.12.2-debian-12-r1
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## @param jobservice.maxJobWorkers The max job workers
-  ##
-  maxJobWorkers: 10
-  ## @param jobservice.redisNamespace Redis namespace for jobservice
-  ##
-  redisNamespace: harbor_job_service_namespace
-  ## @param jobservice.jobLogger The logger for jobs: `file`, `database` or `stdout`
-  ##
-  jobLogger: file
-  ## @param jobservice.secret Secret used when the job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars.
-  ## If a secret key is not specified, Helm will generate one.
-  ## Must be a string of 16 chars.
-  ##
-  secret: {{ .Values.globals.harbor.jobserviceSecret }}
-  ## @param jobservice.existingSecret Existing secret for jobservice
-  ## The secret must contain the keys:
-  ## `secret` (required),
-  ##
-  existingSecret: ""
-  ## @param jobservice.existingEnvVarsSecret Existing secret for jobservice envvars
-  ## The secret must contain the keys:
-  ## `REGISTRY_CREDENTIAL_PASSWORD` (optional),
-  ## `JOB_SERVICE_POOL_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/1"),
-  ##
-  ## If you do not know how to start, let the chart generate a full secret for you before defining an existingEnvVarsSecret
-  existingEnvVarsSecret: ""
-  ## Use TLS in the container
-  ##
-  tls:
-    ## @param jobservice.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
-    ## Requires `internalTLS.enabled` to be set to `true``
-    ## Self-signed TLS certificates will be used otherwise
-    ##
-    existingSecret: ""
-  ## @param jobservice.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param jobservice.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param jobservice.extraEnvVars Array with extra environment variables to add Harbor Jobservice pods
-  ##
-  extraEnvVars: []
-  ## @param jobservice.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Jobservice pods
-  ##
-  extraEnvVarsCM: ""
-  ## @param jobservice.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Jobservice pods
-  ##
-  extraEnvVarsSecret: ""
-  ## @param jobservice.containerPorts.http Harbor Jobservice HTTP container port
-  ## @param jobservice.containerPorts.https Harbor Jobservice HTTPS container port
-  ## @param jobservice.containerPorts.metrics Harbor Jobservice metrics container port
-  ##
-  containerPorts:
-    http: 8080
-    https: 8443
-    metrics: 8001
-  ## @param jobservice.replicaCount Number of Harbor Jobservice replicas
-  ##
-  replicaCount: 1
-  ## Configure extra options for Harbor Jobservice containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param jobservice.livenessProbe.enabled Enable livenessProbe on Harbor Jobservice containers
-  ## @param jobservice.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param jobservice.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param jobservice.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param jobservice.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param jobservice.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param jobservice.readinessProbe.enabled Enable readinessProbe on Harbor Jobservice containers
-  ## @param jobservice.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param jobservice.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param jobservice.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param jobservice.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param jobservice.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param jobservice.startupProbe.enabled Enable startupProbe on Harbor Jobservice containers
-  ## @param jobservice.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param jobservice.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param jobservice.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param jobservice.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param jobservice.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param jobservice.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param jobservice.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param jobservice.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## Harbor Jobservice resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param jobservice.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if jobservice.resources is set (jobservice.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "small"
-  ## @param jobservice.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Harbor Jobservice pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param jobservice.podSecurityContext.enabled Enabled Harbor Jobservice pods' Security Context
-  ## @param jobservice.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param jobservice.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param jobservice.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param jobservice.podSecurityContext.fsGroup Set Harbor Jobservice pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Harbor Jobservice containers (only main one) Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param jobservice.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param jobservice.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param jobservice.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param jobservice.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param jobservice.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param jobservice.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param jobservice.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param jobservice.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param jobservice.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param jobservice.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param jobservice.updateStrategy.type Harbor Jobservice deployment strategy type - only really applicable for deployments with RWO PVs attached
-  ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-  ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-  ## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param jobservice.lifecycleHooks LifecycleHook for the Harbor Jobservice container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param jobservice.hostAliases Harbor Jobservice pods host aliases
-  ##
-  hostAliases: []
-  ## @param jobservice.podLabels Add additional labels to the Harbor Jobservice pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param jobservice.podAnnotations Annotations to add to the Harbor Jobservice pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param jobservice.podAffinityPreset Harbor Jobservice Pod affinity preset. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param jobservice.podAntiAffinityPreset Harbor Jobservice Pod anti-affinity preset. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard`
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param jobservice.nodeAffinityPreset.type Harbor Jobservice Node affinity preset type. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param jobservice.nodeAffinityPreset.key Harbor Jobservice Node label key to match Ignored if `jobservice.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param jobservice.nodeAffinityPreset.values Harbor Jobservice Node label values to match. Ignored if `jobservice.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param jobservice.affinity Harbor Jobservice Affinity for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: jobservice.podAffinityPreset, jobservice.podAntiAffinityPreset, and jobservice.nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param jobservice.nodeSelector Harbor Jobservice Node labels for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param jobservice.tolerations Harbor Jobservice Tolerations for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param jobservice.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param jobservice.priorityClassName Priority Class Name
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-  ##
-  priorityClassName: ""
-  ## @param jobservice.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param jobservice.sidecars Add additional sidecar containers to the Harbor Jobservice pods
-  ## Example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param jobservice.initContainers Add additional init containers to the Harbor Jobservice pods
-  ## Example:
-  ## initContainers:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param jobservice.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param jobservice.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param jobservice.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `jobservice.pdb.minAvailable` and `jobservice.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param jobservice.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Jobservice pods
-  ##
-  extraVolumeMounts: []
-  ## @param jobservice.extraVolumes Optionally specify extra list of additional volumes for the Harbor Jobservice pods
-  ##
-  extraVolumes: []
-  ## @param jobservice.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## Harbor Jobservice ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param jobservice.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: false
-    ## @param jobservice.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param jobservice.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-    ##
-    automountServiceAccountToken: false
-    ## @param jobservice.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Harbor Jobservice service configuration
-  ##
-  service:
-    ## @param jobservice.service.ports.http Harbor Jobservice HTTP service port
-    ## @param jobservice.service.ports.https Harbor Jobservice HTTPS service port
-    ## @param jobservice.service.ports.metrics Harbor Jobservice HTTPS service port
-    ##
-    ports:
-      http: 80
-      https: 443
-      metrics: 8001
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param jobservice.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param jobservice.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param jobservice.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param jobservice.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param jobservice.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param jobservice.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param jobservice.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-## @section Harbor Registry Parameters
-##
-
-## Registry Parameters
-##
-registry:
-  ## @param registry.secret Secret is used to secure the upload state from client and registry storage backend. See: <https://github.com/docker/distribution/blob/master/docs/configuration.md>
-  ## and registry storage backend.
-  ## See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http
-  ## If a secret key is not specified, Helm will generate one.
-  ## Must be a string of 16 chars.
-  ##
-  secret: {{ .Values.globals.harbor.registrySecret }}
-  ## @param registry.existingSecret Existing secret for registry
-  ## The secret must contain the keys:
-  ## `REGISTRY_HTPASSWD` (required - replaces insecure defaults),
-  ## `REGISTRY_HTTP_SECRET` (optional - generated on the fly if not secified),
-  ## `REGISTRY_REDIS_PASSWORD` (optional),
-  ##
-  existingSecret: ""
-  ## @param registry.relativeurls Make the registry return relative URLs in Location headers. The client is responsible for resolving the correct URL.
-  ##
-  relativeurls: false
-  ## @param registry.credentials.username The username for accessing the registry instance, which is hosted by htpasswd auth mode.  More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd)
-  ## @param registry.credentials.password The password for accessing the registry instance, which is hosted by htpasswd auth mode.  More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation.
-  ## @param registry.credentials.htpasswd The content of htpasswd file based on the value of `registry.credentials.username` `registry.credentials.password`.  Currently `helm` does not support bcrypt in the template script, if the credential is updated you need to manually generated by calling
-  ##
-  credentials:
-    username: {{ .Values.globals.harbor.username }}
-    password: {{ .Values.globals.harbor.password }}
-    ## If you update the username or password of registry, make sure use cli tool htpasswd to generate the bcrypt hash
-    ## e.g. "htpasswd -nbBC10 $username $password"
-    ##
-    htpasswd: {{ .Values.globals.harbor.htpasswd }}
-  middleware:
-    ## @param registry.middleware.enabled Middleware is used to add support for a CDN between backend storage and `docker pull` recipient.  See
-    ##
-    enabled: false
-    ## @param registry.middleware.type CDN type for the middleware
-    ##
-    type: cloudFront
-    ## @param registry.middleware.cloudFront.baseurl CloudFront CDN settings: Base URL
-    ## @param registry.middleware.cloudFront.keypairid CloudFront CDN settings: Keypair ID
-    ## @param registry.middleware.cloudFront.duration CloudFront CDN settings: Duration
-    ## @param registry.middleware.cloudFront.ipfilteredby CloudFront CDN settings: IP filters
-    ## @param registry.middleware.cloudFront.privateKeySecret CloudFront CDN settings: Secret name with the private key
-    ##
-    cloudFront:
-      baseurl: example.cloudfront.net
-      keypairid: KEYPAIRID
-      duration: 3000s
-      ipfilteredby: none
-      ## The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
-      ## that allows access to CloudFront
-      ##
-      privateKeySecret: my-secret
-  ## Use TLS in the container
-  ##
-  tls:
-    ## @param registry.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
-    ## Requires `internalTLS.enabled` to be set to `true``
-    ## Self-signed TLS certificates will be used otherwise
-    ##
-    existingSecret: ""
-  ## @param registry.replicaCount Number of Harbor Registry replicas
-  ##
-  replicaCount: 1
-  ## Configure Harbor Registry pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param registry.podSecurityContext.enabled Enabled Harbor Registry pods' Security Context
-  ## @param registry.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param registry.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param registry.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param registry.podSecurityContext.fsGroup Set Harbor Registry pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## @param registry.updateStrategy.type Harbor Registry deployment strategy type - only really applicable for deployments with RWO PVs attached
-  ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-  ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-  ## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param registry.hostAliases Harbor Registry pods host aliases
-  ##
-  hostAliases: []
-  ## @param registry.podLabels Add additional labels to the Harbor Registry pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param registry.podAnnotations Annotations to add to the Harbor Registry pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param registry.podAffinityPreset Harbor Registry Pod affinity preset. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param registry.podAntiAffinityPreset Harbor Registry Pod anti-affinity preset. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard`
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param registry.nodeAffinityPreset.type Harbor Registry Node affinity preset type. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param registry.nodeAffinityPreset.key Harbor Registry Node label key to match Ignored if `registry.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param registry.nodeAffinityPreset.values Harbor Registry Node label values to match. Ignored if `registry.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param registry.affinity Harbor Registry Affinity for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: registry.podAffinityPreset, registry.podAntiAffinityPreset, and registry.nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param registry.nodeSelector Harbor Registry Node labels for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param registry.tolerations Harbor Registry Tolerations for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param registry.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param registry.priorityClassName Priority Class Name
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-  ##
-  priorityClassName: ""
-  ## @param registry.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param registry.sidecars Add additional sidecar containers to the Harbor Registry pods
-  ## Example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param registry.initContainers Add additional init containers to the Harbor Registry pods
-  ## Example:
-  ## initContainers:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param registry.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param registry.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param registry.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `registry.pdb.minAvailable` and `registry.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param registry.extraVolumes Optionally specify extra list of additional volumes for the Harbor Registry pods
-  ##
-  extraVolumes: []
-  ## @param registry.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## Harbor Registry ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param registry.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: true
-    ## @param registry.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param registry.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-    ##
-    automountServiceAccountToken: false
-    ## @param registry.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param registry.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param registry.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param registry.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param registry.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param registry.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param registry.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param registry.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## Harbor Registry main container parameters
-  ##
-  server:
-    ## Bitnami Harbor Registry image
-    ## ref: https://hub.docker.com/r/bitnami/harbor-registry/tags/
-    ## @param registry.server.image.registry [default: REGISTRY_NAME] Harbor Registry image registry
-    ## @param registry.server.image.repository [default: REPOSITORY_NAME/harbor-registry] Harbor Registry image repository
-    ## @skip registry.server.image.tag Harbor Registry image tag (immutable tags are recommended)
-    ## @param registry.server.image.digest Harbor Registry image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-    ## @param registry.server.image.pullPolicy Harbor Registry image pull policy
-    ## @param registry.server.image.pullSecrets Harbor Registry image pull secrets
-    ## @param registry.server.image.debug Enable Harbor Registry image debug mode
-    ##
-    image:
-      registry: docker.io
-      repository: bitnami/harbor-registry
-      tag: 2.12.2-debian-12-r1
-      digest: ""
-      ## Specify a imagePullPolicy
-      ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-      ##
-      pullPolicy: IfNotPresent
-      ## Optionally specify an array of imagePullSecrets.
-      ## Secrets must be manually created in the namespace.
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-      ## e.g:
-      ## pullSecrets:
-      ##   - myRegistryKeySecretName
-      ##
-      pullSecrets: []
-      ## Enable debug mode
-      ##
-      debug: false
-    ## @param registry.server.command Override default container command (useful when using custom images)
-    ##
-    command: []
-    ## @param registry.server.args Override default container args (useful when using custom images)
-    ##
-    args: []
-    ## @param registry.server.extraEnvVars Array with extra environment variables to add Harbor Registry main containers
-    ##
-    extraEnvVars: []
-    ## @param registry.server.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Registry main containers
-    ##
-    extraEnvVarsCM: ""
-    ## @param registry.server.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Registry main containers
-    ##
-    extraEnvVarsSecret: ""
-    ## @param registry.server.containerPorts.http Harbor Registry HTTP container port
-    ## @param registry.server.containerPorts.https Harbor Registry HTTPS container port
-    ## @param registry.server.containerPorts.debug Harbor Registry debug container port
-    ## @param registry.server.containerPorts.metrics Harbor Registry metrics container port
-    ##
-    containerPorts:
-      http: 5000
-      https: 5443
-      debug: 5001
-      metrics: 8001
-    ## Configure extra options for Harbor Registry main containers' liveness, readiness and startup probes
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-    ## @param registry.server.livenessProbe.enabled Enable livenessProbe on Harbor Registry main containers
-    ## @param registry.server.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-    ## @param registry.server.livenessProbe.periodSeconds Period seconds for livenessProbe
-    ## @param registry.server.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-    ## @param registry.server.livenessProbe.failureThreshold Failure threshold for livenessProbe
-    ## @param registry.server.livenessProbe.successThreshold Success threshold for livenessProbe
-    ##
-    livenessProbe:
-      enabled: true
-      initialDelaySeconds: 20
-      periodSeconds: 10
-      timeoutSeconds: 5
-      failureThreshold: 6
-      successThreshold: 1
-    ## @param registry.server.readinessProbe.enabled Enable readinessProbe on Harbor Registry main containers
-    ## @param registry.server.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-    ## @param registry.server.readinessProbe.periodSeconds Period seconds for readinessProbe
-    ## @param registry.server.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-    ## @param registry.server.readinessProbe.failureThreshold Failure threshold for readinessProbe
-    ## @param registry.server.readinessProbe.successThreshold Success threshold for readinessProbe
-    ##
-    readinessProbe:
-      enabled: true
-      initialDelaySeconds: 20
-      periodSeconds: 10
-      timeoutSeconds: 5
-      failureThreshold: 6
-      successThreshold: 1
-    ## @param registry.server.startupProbe.enabled Enable startupProbe on Harbor Registry main containers
-    ## @param registry.server.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-    ## @param registry.server.startupProbe.periodSeconds Period seconds for startupProbe
-    ## @param registry.server.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-    ## @param registry.server.startupProbe.failureThreshold Failure threshold for startupProbe
-    ## @param registry.server.startupProbe.successThreshold Success threshold for startupProbe
-    ##
-    startupProbe:
-      enabled: false
-      initialDelaySeconds: 5
-      periodSeconds: 10
-      timeoutSeconds: 1
-      failureThreshold: 15
-      successThreshold: 1
-    ## @param registry.server.customLivenessProbe Custom livenessProbe that overrides the default one
-    ##
-    customLivenessProbe: {}
-    ## @param registry.server.customReadinessProbe Custom readinessProbe that overrides the default one
-    ##
-    customReadinessProbe: {}
-    ## @param registry.server.customStartupProbe Custom startupProbe that overrides the default one
-    ##
-    customStartupProbe: {}
-    ## Harbor Registry main resource requests and limits
-    ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-    ## @param registry.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if registry.server.resources is set (registry.server.resources is recommended for production).
-    ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-    ##
-    resourcesPreset: "small"
-    ## @param registry.server.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-    ## Example:
-    ## resources:
-    ##   requests:
-    ##     cpu: 2
-    ##     memory: 512Mi
-    ##   limits:
-    ##     cpu: 3
-    ##     memory: 1024Mi
-    ##
-    resources: {}
-    ## Configure Harbor Registry main containers (only main one) Security Context
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-    ## @param registry.server.containerSecurityContext.enabled Enabled containers' Security Context
-    ## @param registry.server.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-    ## @param registry.server.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-    ## @param registry.server.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-    ## @param registry.server.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-    ## @param registry.server.containerSecurityContext.privileged Set container's Security Context privileged
-    ## @param registry.server.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-    ## @param registry.server.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-    ## @param registry.server.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-    ## @param registry.server.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-    ##
-    containerSecurityContext:
-      enabled: true
-      seLinuxOptions: {}
-      runAsUser: 1001
-      runAsGroup: 1001
-      runAsNonRoot: true
-      privileged: false
-      readOnlyRootFilesystem: true
-      allowPrivilegeEscalation: false
-      capabilities:
-        drop: ["ALL"]
-      seccompProfile:
-        type: "RuntimeDefault"
-    ## @param registry.server.lifecycleHooks LifecycleHook for the Harbor Registry main container(s) to automate configuration before or after startup
-    ##
-    lifecycleHooks: {}
-    ## @param registry.server.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Registry main pods
-    ##
-    extraVolumeMounts: []
-    ## Harbor Registry service configuration
-    ##
-    service:
-      ## @param registry.server.service.ports.http Harbor Registry HTTP service port
-      ## @param registry.server.service.ports.https Harbor Registry HTTPS service port
-      ## @param registry.server.service.ports.metrics Harbor Registry metrics service port
-      ##
-      ports:
-        http: 5000
-        https: 5443
-        metrics: 8001
-  ## Harbor Registryctl parameters
-  ##
-  controller:
-    ## Bitnami Harbor Registryctl image
-    ## ref: https://hub.docker.com/r/bitnami/harbor-registryctl/tags/
-    ## @param registry.controller.image.registry [default: REGISTRY_NAME] Harbor Registryctl image registry
-    ## @param registry.controller.image.repository [default: REPOSITORY_NAME/harbor-registryctl] Harbor Registryctl image repository
-    ## @skip registry.controller.image.tag Harbor Registryctl image tag (immutable tags are recommended)
-    ## @param registry.controller.image.digest Harbor Registryctl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-    ## @param registry.controller.image.pullPolicy Harbor Registryctl image pull policy
-    ## @param registry.controller.image.pullSecrets Harbor Registryctl image pull secrets
-    ## @param registry.controller.image.debug Enable Harbor Registryctl image debug mode
-    ##
-    image:
-      registry: docker.io
-      repository: bitnami/harbor-registryctl
-      tag: 2.12.2-debian-12-r1
-      digest: ""
-      ## Specify a imagePullPolicy
-      ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-      ##
-      pullPolicy: IfNotPresent
-      ## Optionally specify an array of imagePullSecrets.
-      ## Secrets must be manually created in the namespace.
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-      ## e.g:
-      ## pullSecrets:
-      ##   - myRegistryKeySecretName
-      ##
-      pullSecrets: []
-      ## Enable debug mode
-      ##
-      debug: false
-    ## @param registry.controller.command Override default container command (useful when using custom images)
-    ##
-    command: []
-    ## @param registry.controller.args Override default container args (useful when using custom images)
-    ##
-    args: []
-    ## @param registry.controller.extraEnvVars Array with extra environment variables to add Harbor Registryctl containers
-    ##
-    extraEnvVars: []
-    ## @param registry.controller.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Registryctl containers
-    ##
-    extraEnvVarsCM: ""
-    ## @param registry.controller.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Registryctl containers
-    ##
-    extraEnvVarsSecret: ""
-    ## @param registry.controller.containerPorts.http Harbor Registryctl HTTP container port
-    ## @param registry.controller.containerPorts.https Harbor Registryctl HTTPS container port
-    ##
-    containerPorts:
-      http: 8080
-      https: 8443
-    ## Configure extra options for Harbor Registryctl containers' liveness, readiness and startup probes
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-    ## @param registry.controller.livenessProbe.enabled Enable livenessProbe on Harbor Registryctl containers
-    ## @param registry.controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-    ## @param registry.controller.livenessProbe.periodSeconds Period seconds for livenessProbe
-    ## @param registry.controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-    ## @param registry.controller.livenessProbe.failureThreshold Failure threshold for livenessProbe
-    ## @param registry.controller.livenessProbe.successThreshold Success threshold for livenessProbe
-    ##
-    livenessProbe:
-      enabled: true
-      initialDelaySeconds: 20
-      periodSeconds: 10
-      timeoutSeconds: 5
-      failureThreshold: 6
-      successThreshold: 1
-    ## @param registry.controller.readinessProbe.enabled Enable readinessProbe on Harbor Registryctl containers
-    ## @param registry.controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-    ## @param registry.controller.readinessProbe.periodSeconds Period seconds for readinessProbe
-    ## @param registry.controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-    ## @param registry.controller.readinessProbe.failureThreshold Failure threshold for readinessProbe
-    ## @param registry.controller.readinessProbe.successThreshold Success threshold for readinessProbe
-    ##
-    readinessProbe:
-      enabled: true
-      initialDelaySeconds: 20
-      periodSeconds: 10
-      timeoutSeconds: 5
-      failureThreshold: 6
-      successThreshold: 1
-    ## @param registry.controller.startupProbe.enabled Enable startupProbe on Harbor Registryctl containers
-    ## @param registry.controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-    ## @param registry.controller.startupProbe.periodSeconds Period seconds for startupProbe
-    ## @param registry.controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-    ## @param registry.controller.startupProbe.failureThreshold Failure threshold for startupProbe
-    ## @param registry.controller.startupProbe.successThreshold Success threshold for startupProbe
-    ##
-    startupProbe:
-      enabled: false
-      initialDelaySeconds: 5
-      periodSeconds: 10
-      timeoutSeconds: 1
-      failureThreshold: 15
-      successThreshold: 1
-    ## @param registry.controller.customLivenessProbe Custom livenessProbe that overrides the default one
-    ##
-    customLivenessProbe: {}
-    ## @param registry.controller.customReadinessProbe Custom readinessProbe that overrides the default one
-    ##
-    customReadinessProbe: {}
-    ## @param registry.controller.customStartupProbe Custom startupProbe that overrides the default one
-    ##
-    customStartupProbe: {}
-    ## Harbor Registryctl resource requests and limits
-    ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-    ## @param registry.controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if registry.controller.resources is set (registry.controller.resources is recommended for production).
-    ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-    ##
-    resourcesPreset: "small"
-    ## @param registry.controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-    ## Example:
-    ## resources:
-    ##   requests:
-    ##     cpu: 2
-    ##     memory: 512Mi
-    ##   limits:
-    ##     cpu: 3
-    ##     memory: 1024Mi
-    ##
-    resources: {}
-    ## Configure Harbor Registryctl containers (only main one) Security Context
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-    ## @param registry.controller.containerSecurityContext.enabled Enabled containers' Security Context
-    ## @param registry.controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-    ## @param registry.controller.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-    ## @param registry.controller.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-    ## @param registry.controller.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-    ## @param registry.controller.containerSecurityContext.privileged Set container's Security Context privileged
-    ## @param registry.controller.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-    ## @param registry.controller.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-    ## @param registry.controller.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-    ## @param registry.controller.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-    ##
-    containerSecurityContext:
-      enabled: true
-      seLinuxOptions: {}
-      runAsUser: 1001
-      runAsGroup: 1001
-      runAsNonRoot: true
-      privileged: false
-      readOnlyRootFilesystem: true
-      allowPrivilegeEscalation: false
-      capabilities:
-        drop: ["ALL"]
-      seccompProfile:
-        type: "RuntimeDefault"
-    ## @param registry.controller.lifecycleHooks LifecycleHook for the Harbor Registryctl container(s) to automate configuration before or after startup
-    ##
-    lifecycleHooks: {}
-    ## @param registry.controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Registryctl pods
-    ##
-    extraVolumeMounts: []
-    ## Harbor Registryctl service configuration
-    ##
-    service:
-      ## @param registry.controller.service.ports.http Harbor Registryctl HTTP service port
-      ## @param registry.controller.service.ports.https Harbor Registryctl HTTPS service port
-      ##
-      ports:
-        http: 8080
-        https: 8443
-## @section Harbor Adapter Trivy Parameters
-##
-trivy:
-  ## Bitnami Harbor Adapter Trivy image
-  ## ref: https://hub.docker.com/r/bitnami/harbor-adapter-trivy/tags/
-  ## @param trivy.image.registry [default: REGISTRY_NAME] Harbor Adapter Trivy image registry
-  ## @param trivy.image.repository [default: REPOSITORY_NAME/harbor-adapter-trivy] Harbor Adapter Trivy image repository
-  ## @skip trivy.image.tag Harbor Adapter Trivy image tag (immutable tags are recommended)
-  ## @param trivy.image.digest Harbor Adapter Trivy image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param trivy.image.pullPolicy Harbor Adapter Trivy image pull policy
-  ## @param trivy.image.pullSecrets Harbor Adapter Trivy image pull secrets
-  ## @param trivy.image.debug Enable Harbor Adapter Trivy image debug mode
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/harbor-adapter-trivy
-    tag: 2.12.2-debian-12-r0
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## @param trivy.enabled Enable Trivy
-  ##
-  enabled: true
-  ## @param trivy.debugMode The flag to enable Trivy debug mode
-  ##
-  debugMode: false
-  ## @param trivy.vulnType Comma-separated list of vulnerability types. Possible values `os` and `library`.
-  ##
-  vulnType: "os,library"
-  ## @param trivy.severity Comma-separated list of severities to be checked
-  ##
-  severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
-  ## @param trivy.ignoreUnfixed The flag to display only fixed vulnerabilities
-  ##
-  ignoreUnfixed: false
-  ## @param trivy.insecure The flag to skip verifying registry certificate
-  ##
-  insecure: false
-  ## @param trivy.existingEnvVarsSecret Existing secret for trivy
-  ## The secret must contain the keys:
-  ## `SCANNER_TRIVY_GITHUB_TOKEN` (optional)
-  ## `SCANNER_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5")
-  ## `SCANNER_STORE_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5")
-  ## `SCANNER_JOB_QUEUE_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5")
-  ##
-  existingEnvVarsSecret: ""
-  ## @param trivy.gitHubToken The GitHub access token to download Trivy DB
-  ##
-  ## Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
-  ## It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
-  ## in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
-  ## timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
-  ## Currently, the database is updated every 12 hours and published as a new release to GitHub.
-  ##
-  ## Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
-  ## for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
-  ## requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
-  ## https://developer.github.com/v3/#rate-limiting
-  ##
-  ## You can create a GitHub token by following the instructions in
-  ## https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
-  ##
-  gitHubToken: ""
-  ## @param trivy.skipUpdate The flag to disable Trivy DB downloads from GitHub
-  ## You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
-  ## If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
-  ## `/bitnami/harbor-adapter-trivy/.cache/trivy/db/trivy.db` path.
-  ## ref: https://trivy.dev/dev/docs/configuration/db/
-  ##
-  skipUpdate: false
-  ## @param trivy.skipJavaDbUpdate The flag to disable Trivy JAVA DB downloads.
-  ## You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
-  ## If the value is set to `true` you have to manually download the `trivy-java.db` file and mount it in the
-  ## `/bitnami/harbor-adapter-trivy/.cache/trivy/java-db/trivy-java.db` path.
-  ##
-  skipJavaDbUpdate: false
-  ## @param trivy.dbRepository OCI repositor(ies) to retrieve the trivy vulnerability database from
-  dbRepository: ""
-  ## @param trivy.javaDbRepository OCI repositor(ies) to retrieve the Java trivy vulnerability database from
-  javaDbRepository: ""
-  ## @param trivy.cacheDir Directory to store the cache
-  ##
-  cacheDir: "/bitnami/harbor-adapter-trivy/.cache"
-  ## Use TLS in the container
-  ##
-  tls:
-    ## @param trivy.tls.existingSecret Name of an existing secret with the certificates for internal TLS access
-    ## Requires `internalTLS.enabled` to be set to `true``
-    ## Self-signed TLS certificates will be used otherwise
-    ##
-    existingSecret: ""
-  ## @param trivy.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param trivy.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param trivy.extraEnvVars Array with extra environment variables to add Trivy pods
-  ##
-  extraEnvVars: []
-  ## @param trivy.extraEnvVarsCM ConfigMap containing extra environment variables for Trivy pods
-  ##
-  extraEnvVarsCM: ""
-  ## @param trivy.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Trivy pods
-  ##
-  extraEnvVarsSecret: ""
-  ## @param trivy.containerPorts.http Trivy HTTP container port
-  ## @param trivy.containerPorts.https Trivy HTTPS container port
-  ##
-  containerPorts:
-    http: 8080
-    https: 8443
-  ## @param trivy.replicaCount Number of Trivy replicas
-  ##
-  replicaCount: 1
-  ## Configure extra options for Trivy containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param trivy.livenessProbe.enabled Enable livenessProbe on Trivy containers
-  ## @param trivy.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param trivy.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param trivy.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param trivy.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param trivy.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param trivy.readinessProbe.enabled Enable readinessProbe on Trivy containers
-  ## @param trivy.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param trivy.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param trivy.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param trivy.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param trivy.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param trivy.startupProbe.enabled Enable startupProbe on Trivy containers
-  ## @param trivy.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param trivy.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param trivy.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param trivy.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param trivy.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param trivy.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param trivy.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param trivy.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## Trivy resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param trivy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if trivy.resources is set (trivy.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "small"
-  ## @param trivy.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Trivy pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param trivy.podSecurityContext.enabled Enabled Trivy pods' Security Context
-  ## @param trivy.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param trivy.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param trivy.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param trivy.podSecurityContext.fsGroup Set Trivy pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Trivy containers (only main one) Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param trivy.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param trivy.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param trivy.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param trivy.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param trivy.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param trivy.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param trivy.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param trivy.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param trivy.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param trivy.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param trivy.updateStrategy.type Trivy deployment strategy type - only really applicable for deployments with RWO PVs attached
-  ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-  ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-  ## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param trivy.lifecycleHooks LifecycleHook for the Trivy container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param trivy.hostAliases Trivy pods host aliases
-  ##
-  hostAliases: []
-  ## @param trivy.podLabels Add additional labels to the Trivy pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param trivy.podAnnotations Annotations to add to the Trivy pods (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param trivy.podAffinityPreset Trivy Pod affinity preset. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param trivy.podAntiAffinityPreset Trivy Pod anti-affinity preset. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard`
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param trivy.nodeAffinityPreset.type Trivy Node affinity preset type. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param trivy.nodeAffinityPreset.key Trivy Node label key to match Ignored if `trivy.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param trivy.nodeAffinityPreset.values Trivy Node label values to match. Ignored if `trivy.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param trivy.affinity Trivy Affinity for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: trivy.podAffinityPreset, trivy.podAntiAffinityPreset, and trivy.nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param trivy.nodeSelector Trivy Node labels for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param trivy.tolerations Trivy Tolerations for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param trivy.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param trivy.priorityClassName Priority Class Name
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-  ##
-  priorityClassName: ""
-  ## @param trivy.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param trivy.sidecars Add additional sidecar containers to the Trivy pods
-  ## Example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param trivy.initContainers Add additional init containers to the Trivy pods
-  ## Example:
-  ## initContainers:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param trivy.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param trivy.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param trivy.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `trivy.pdb.minAvailable` and `trivy.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param trivy.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Trivy pods
-  ##
-  extraVolumeMounts: []
-  ## @param trivy.extraVolumes Optionally specify extra list of additional volumes for the Trivy pods
-  ##
-  extraVolumes: []
-  ## @param trivy.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## Trivy ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param trivy.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: false
-    ## @param trivy.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param trivy.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-    ##
-    automountServiceAccountToken: false
-    ## @param trivy.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Trivy service configuration
-  ##
-  service:
-    ## @param trivy.service.ports.http Trivy HTTP service port
-    ## @param trivy.service.ports.https Trivy HTTPS service port
-    ##
-    ports:
-      http: 8080
-      https: 8443
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param trivy.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param trivy.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param trivy.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param trivy.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param trivy.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param trivy.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param trivy.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-## @section Harbor Exporter Parameters
-##
-exporter:
-  ## Bitnami Harbor Exporter image
-  ## ref: https://hub.docker.com/r/bitnami/harbor-exporter/tags/
-  ## @param exporter.image.registry [default: REGISTRY_NAME] Harbor Exporter image registry
-  ## @param exporter.image.repository [default: REPOSITORY_NAME/harbor-exporter] Harbor Exporter image repository
-  ## @skip exporter.image.tag Harbor Exporter image tag
-  ## @param exporter.image.digest Harbor Exporter image image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param exporter.image.pullPolicy Harbor exporter image pull policy
-  ## @param exporter.image.pullSecrets Specify docker-registry secret names as an array
-  ## @param exporter.image.debug Specify if debug logs should be enabled
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/harbor-exporter
-    tag: 2.12.2-debian-12-r1
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## @param exporter.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param exporter.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param exporter.extraEnvVars Array containing extra env vars
-  ## For example:
-  ##  - name: HARBOR_DATABASE_SSLMODE
-  ##    value: verify-ca
-  ##
-  extraEnvVars: []
-  ## @param exporter.extraEnvVarsCM ConfigMap containing extra env vars
-  ##
-  extraEnvVarsCM: ""
-  ## @param exporter.extraEnvVarsSecret Secret containing extra env vars (in case of sensitive data)
-  ##
-  extraEnvVarsSecret: ""
-  ## @param exporter.containerPorts.metrics Harbor Exporter HTTP container port
-  ##
-  containerPorts:
-    metrics: 8001
-  ## @param exporter.replicaCount The replica count
-  ##
-  replicaCount: 1
-  ## Harbor Exporter containers' liveness probe
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-  ## @param exporter.livenessProbe.enabled Enable livenessProbe
-  ## @param exporter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param exporter.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param exporter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param exporter.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param exporter.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## Harbor Exporter containers' readiness probe
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-  ## @param exporter.readinessProbe.enabled Enable readinessProbe
-  ## @param exporter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param exporter.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param exporter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param exporter.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param exporter.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param exporter.startupProbe.enabled Enable startupProbe on Harbor Exporter containers
-  ## @param exporter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param exporter.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param exporter.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param exporter.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param exporter.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param exporter.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param exporter.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param exporter.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## Harbor Exporter resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param exporter.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if exporter.resources is set (exporter.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param exporter.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Exporter pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param exporter.podSecurityContext.enabled Enabled Exporter pods' Security Context
-  ## @param exporter.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param exporter.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param exporter.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param exporter.podSecurityContext.fsGroup Set Exporter pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Exporter containers (only main one) Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param exporter.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param exporter.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param exporter.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param exporter.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param exporter.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param exporter.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param exporter.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param exporter.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param exporter.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param exporter.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param exporter.updateStrategy.type The update strategy for deployments with persistent volumes: RollingUpdate or Recreate. Set it as Recreate when RWM for volumes isn't supported
-  ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
-  ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
-  ## terminate the single previous pod, so that the new, incoming pod can attach to the PV
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param exporter.lifecycleHooks LifecycleHook to set additional configuration at startup, e.g. LDAP settings via REST API. Evaluated as a template
-  ##
-  lifecycleHooks: {}
-  ## @param exporter.hostAliases Exporter pods host aliases
-  ##
-  hostAliases: []
-  ## @param exporter.podLabels Add additional labels to the pod (evaluated as a template)
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param exporter.podAnnotations Annotations to add to the exporter pod
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param exporter.podAffinityPreset Harbor Exporter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param exporter.podAntiAffinityPreset Harbor Exporter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param exporter.nodeAffinityPreset.type Harbor Exporter Node affinity preset type. Ignored if `exporter.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param exporter.nodeAffinityPreset.key Harbor Exporter Node label key to match Ignored if `exporter.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param exporter.nodeAffinityPreset.values Harbor Exporter Node label values to match. Ignored if `exporter.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param exporter.affinity Harbor Exporter Affinity for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: `exporter.podAffinityPreset`, `exporter.podAntiAffinityPreset`, and `exporter.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param exporter.priorityClassName Exporter pods Priority Class Name
-  ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-  ##
-  priorityClassName: ""
-  ## @param exporter.schedulerName Name of the k8s scheduler (other than default)
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param exporter.nodeSelector Harbor Exporter Node labels for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param exporter.tolerations Harbor Exporter Tolerations for pod assignment
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param exporter.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param exporter.initContainers Add additional init containers to the pod (evaluated as a template)
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param exporter.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param exporter.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param exporter.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `exporter.pdb.minAvailable` and `exporter.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param exporter.extraVolumeMounts
-  ##
-  extraVolumeMounts: []
-  ## @param exporter.extraVolumes
-  ##
-  extraVolumes: []
-  ## @param exporter.sidecars Attach additional containers to the pod (evaluated as a template)
-  ##
-  sidecars: []
-  ## @param exporter.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## Harbor Exporter ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param exporter.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: false
-    ## @param exporter.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param exporter.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-    ##
-    automountServiceAccountToken: false
-    ## @param exporter.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Exporter service configuration
-  ##
-  service:
-    ## @param exporter.service.ports.metrics Exporter HTTP service port
-    ##
-    ports:
-      metrics: 8001
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param exporter.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param exporter.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param exporter.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param exporter.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param exporter.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param exporter.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param exporter.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-## @section PostgreSQL Parameters
-##
-
-## PostgreSQL chart configuration
-## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml
-## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart
-## @param postgresql.auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user
-## @param postgresql.auth.postgresPassword Password for the "postgres" admin user
-## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials
-## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`)
-## @param postgresql.primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration)
-## @param postgresql.primary.initdb.scripts [object] Initdb scripts to create Harbor databases
-##
-postgresql:
-  enabled: false
-  ## Override PostgreSQL default image as 14.x is not supported https://goharbor.io/docs/2.4.0/install-config/
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql
-  ## @param postgresql.image.registry [default: REGISTRY_NAME] PostgreSQL image registry
-  ## @param postgresql.image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository
-  ## @skip postgresql.image.tag PostgreSQL image tag (immutable tags are recommended)
-  ## @param postgresql.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/postgresql
-    tag: 14.15.0-debian-12-r8
-    digest: ""
-  auth:
-    enablePostgresUser: true
-    postgresPassword: not-secure-database-password
-    existingSecret: ""
-  architecture: standalone
-  primary:
-    extendedConfiguration: |
-      max_connections = 1024
-    initdb:
-      scripts:
-        initial-registry.sql: |
-          CREATE DATABASE registry ENCODING 'UTF8';
-          \c registry;
-          CREATE TABLE schema_migrations(version bigint not null primary key, dirty boolean not null);
-    ## PostgreSQL Primary resource requests and limits
-    ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-    ## @param postgresql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
-    ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-    ##
-    resourcesPreset: "nano"
-    ## @param postgresql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-    ## Example:
-    ## resources:
-    ##   requests:
-    ##     cpu: 2
-    ##     memory: 512Mi
-    ##   limits:
-    ##     cpu: 3
-    ##     memory: 1024Mi
-    ##
-    resources: {}
-## External PostgreSQL configuration
-## All of these values are only used when postgresql.enabled is set to false
-## @param externalDatabase.host Database host
-## @param externalDatabase.port Database port number
-## @param externalDatabase.user Non-root username for Harbor
-## @param externalDatabase.password Password for the non-root username for Harbor
-## @param externalDatabase.sslmode External database ssl mode
-## @param externalDatabase.coreDatabase External database name for core
-## @param externalDatabase.existingSecret The name of an existing secret with database credentials
-## @param externalDatabase.existingSecretPasswordKey Password key on the existing secret
-##
-externalDatabase:
-  host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
-  port: 5432
-  user: {{ .Values.globals.harbor.postgres.username }}
-  password: {{ .Values.globals.harbor.postgres.password }}
-  sslmode: disable
-  coreDatabase: {{ .Values.globals.harbor.postgres.database }}
-  existingSecret: ""
-  existingSecretPasswordKey: "db-password"
-
-## @section Redis&reg; parameters
-##
-
-## Redis&reg; chart configuration
-## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml
-## @param redis.enabled Switch to enable or disable the Redis&reg; helm
-## @param redis.auth.enabled Enable password authentication
-## @param redis.auth.password Redis&reg; password
-## @param redis.auth.existingSecret The name of an existing secret with Redis&reg; credentials
-## @param redis.architecture Redis&reg; architecture. Allowed values: `standalone` or `replication`
-## @param redis.sentinel.enabled Use Redis&reg; Sentinel on Redis&reg; pods.
-## @param redis.sentinel.masterSet Master set name
-## @param redis.sentinel.service.ports.sentinel Redis&reg; service port for Redis&reg; Sentinel
-##
-redis:
-  enabled: false
-  auth:
-    enabled: false
-    ## Redis&reg; password (both master and slave). Defaults to a random 10-character alphanumeric string if not set and auth.enabled is true.
-    ## It should always be set using the password value or in the existingSecret to avoid issues
-    ## with Harbor.
-    ## The password value is ignored if existingSecret is set
-    ##
-    password: ""
-    existingSecret: ""
-  architecture: standalone
-  sentinel:
-    enabled: false
-    masterSet: mymaster
-    service:
-      ports:
-        sentinel: 26379
-  master:
-    ## Redis&reg; master resource requests and limits
-    ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-    ## @param redis.master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production).
-    ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-    ##
-    resourcesPreset: "nano"
-    ## @param redis.master.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-    ## Example:
-    ## resources:
-    ##   requests:
-    ##     cpu: 2
-    ##     memory: 512Mi
-    ##   limits:
-    ##     cpu: 3
-    ##     memory: 1024Mi
-    ##
-    resources: {}
-## External Redis&reg; configuration
-## All of these values are only used when redis.enabled is set to false
-## @param externalRedis.host Redis&reg; host
-## @param externalRedis.port Redis&reg; port number
-## @param externalRedis.password Redis&reg; password
-## @param externalRedis.coreDatabaseIndex Index for core database
-## @param externalRedis.jobserviceDatabaseIndex Index for jobservice database
-## @param externalRedis.registryDatabaseIndex Index for registry database
-## @param externalRedis.trivyAdapterDatabaseIndex Index for trivy adapter database
-##
-externalRedis:
-  host: redis-master.{{ .Values.globals.redis.namespace }}.svc.cluster.local
-  port: 6379
-  password: {{ .Values.globals.redis.password }}
-  coreDatabaseIndex: "0"
-  jobserviceDatabaseIndex: "1"
-  registryDatabaseIndex: "2"
-  trivyAdapterDatabaseIndex: "5"
-  ## Redis&reg; sentinel configuration
-  ## @param externalRedis.sentinel.enabled If external redis with sentinal is used, set it to `true`
-  ## @param externalRedis.sentinel.masterSet Name of sentinel masterSet if sentinel is used
-  ## @param externalRedis.sentinel.hosts Sentinel hosts and ports in the format
-  ##
-  sentinel:
-    enabled: false
-    masterSet: "mymaster"
-    hosts: ""
-## @section Harbor metrics parameters
-##
-metrics:
-  ## @param metrics.enabled Whether or not to enable metrics for different
-  ##
-  enabled: true
-  ## @param metrics.path Path where metrics are exposed
-  ##
-  path: /metrics
-  ## Prometheus Operator ServiceMonitor configuration
-  ##
-  serviceMonitor:
-    ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.enabled` to be `true`)
-    ##
-    enabled: false
-    ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
-    ##
-    namespace: ""
-    ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-    ##
-    interval: ""
-    ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-    ##
-    scrapeTimeout: ""
-    ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
-    ##
-    labels: {}
-    ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
-    ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-    ##
-    selector: {}
-    ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-    ##
-    relabelings: []
-    ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-    ##
-    metricRelabelings: []
-    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
-    ##
-    honorLabels: false
-    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-    ##
-    jobLabel: ""
-
diff --git a/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl b/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl
deleted file mode 100644
index 5911482..0000000
--- a/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl
+++ /dev/null
@@ -1,34 +0,0 @@
-# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-imagePullSecrets: []
-# This is to override the chart name.
-nameOverride: ""
-fullnameOverride: ""
-
-postgres:
-  image:
-    ref: postgres
-    # This sets the pull policy for images.
-    pullPolicy: IfNotPresent
-  host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
-  username: postgres
-  password: {{ .Values.globals.postgres.postgresPassword }}
-  databases:
-    {{- range .Values.globals.postgres.databases }}
-    - database: {{ .database }}
-      username: {{ .username }}
-      password: {{ .password }}
-    {{- end }}
-mysql:
-  image:
-    ref: mysql
-    # This sets the pull policy for images.
-    pullPolicy: IfNotPresent
-  host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local
-  username: root
-  password: {{ .Values.globals.mysql.rootPassword }}
-  databases:
-    {{- range .Values.globals.mysql.databases }}
-    - database: {{ .database }}
-      username: {{ .username }}
-      password: {{ .password }}
-    {{- end }}
diff --git a/k8s/helmfile.d/values/kube-prometheus-stack/values.yaml.gotmpl b/k8s/helmfile.d/values/kube-prometheus-stack/values.yaml.gotmpl
deleted file mode 100644
index be64576..0000000
--- a/k8s/helmfile.d/values/kube-prometheus-stack/values.yaml.gotmpl
+++ /dev/null
@@ -1,5142 +0,0 @@
-# Default values for kube-prometheus-stack.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-## Provide a name in place of kube-prometheus-stack for `app:` labels
-##
-nameOverride: ""
-
-## Override the deployment namespace
-##
-namespaceOverride: ""
-
-## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6
-##
-kubeTargetVersionOverride: ""
-
-## Allow kubeVersion to be overridden while creating the ingress
-##
-kubeVersionOverride: ""
-
-## Provide a name to substitute for the full names of resources
-##
-fullnameOverride: ""
-
-## Labels to apply to all resources
-##
-commonLabels: {}
-# scmhash: abc123
-# myLabel: aakkmd
-
-## Install Prometheus Operator CRDs
-##
-crds:
-  enabled: true
-  ## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs.
-  ## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks.
-  ## It deploy a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs.
-  ## This feature is in preview, off by default and may change in the future.
-  upgradeJob:
-    enabled: false
-    image:
-      busybox:
-        registry: docker.io
-        repository: busybox
-        tag: "latest"
-        sha: ""
-        pullPolicy: IfNotPresent
-      kubectl:
-        registry: registry.k8s.io
-        repository: kubectl
-        tag: ""  # defaults to the Kubernetes version
-        sha: ""
-        pullPolicy: IfNotPresent
-
-    env: {}
-    ## Define resources requests and limits for single Pods.
-    ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
-    ##
-    resources: {}
-
-    ## Additional volumes
-    ##
-    extraVolumes: []
-
-    ## Additional volume mounts
-    ##
-    extraVolumeMounts: []
-
-    ## Define which Nodes the Pods are scheduled on.
-    ## ref: https://kubernetes.io/docs/user-guide/node-selection/
-    ##
-    nodeSelector: {}
-
-    ## Assign custom affinity rules to the upgrade-crd job
-    ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-    ##
-    affinity: {}
-    # nodeAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     nodeSelectorTerms:
-    #     - matchExpressions:
-    #       - key: kubernetes.io/e2e-az-name
-    #         operator: In
-    #         values:
-    #         - e2e-az1
-    #         - e2e-az2
-
-    ## If specified, the pod's tolerations.
-    ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-    ##
-    tolerations: []
-    # - key: "key"
-    #   operator: "Equal"
-    #   value: "value"
-    #   effect: "NoSchedule"
-
-    ## If specified, the pod's topology spread constraints.
-    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-    ##
-    topologySpreadConstraints: []
-    # - maxSkew: 1
-    #   topologyKey: topology.kubernetes.io/zone
-    #   whenUnsatisfiable: DoNotSchedule
-    #   labelSelector:
-    #     matchLabels:
-    #       app: alertmanager
-
-    # ## Labels to add to the upgrade-crd job
-    # ##
-    labels: {}
-
-    ## Annotations to add to the upgrade-crd job
-    ##
-    annotations: {}
-
-    ## Labels to add to the upgrade-crd pod
-    ##
-    podLabels: {}
-
-    ## Annotations to add to the upgrade-crd pod
-    ##
-    podAnnotations: {}
-
-    ## Service account for upgrade crd job to use.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-    ##
-    serviceAccount:
-      create: true
-      name: ""
-      annotations: {}
-      labels: {}
-      automountServiceAccountToken: true
-
-    ## Container-specific security context configuration
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-    ##
-    containerSecurityContext:
-      allowPrivilegeEscalation: false
-      readOnlyRootFilesystem: true
-      capabilities:
-        drop:
-          - ALL
-
-    ## SecurityContext holds pod-level security attributes and common container settings.
-    ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext  false
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-    ##
-    securityContext:
-      fsGroup: 65534
-      runAsGroup: 65534
-      runAsNonRoot: true
-      runAsUser: 65534
-      seccompProfile:
-        type: RuntimeDefault
-
-## custom Rules to override "for" and "severity" in defaultRules
-##
-customRules: {}
-  # AlertmanagerFailedReload:
-  #   for: 3m
-  # AlertmanagerMembersInconsistent:
-  #   for: 5m
-  #   severity: "warning"
-
-## Create default rules for monitoring the cluster
-##
-defaultRules:
-  create: true
-  rules:
-    alertmanager: true
-    etcd: true
-    configReloaders: true
-    general: true
-    k8sContainerCpuUsageSecondsTotal: true
-    k8sContainerMemoryCache: true
-    k8sContainerMemoryRss: true
-    k8sContainerMemorySwap: true
-    k8sContainerResource: true
-    k8sContainerMemoryWorkingSetBytes: true
-    k8sPodOwner: true
-    kubeApiserverAvailability: true
-    kubeApiserverBurnrate: true
-    kubeApiserverHistogram: true
-    kubeApiserverSlos: true
-    kubeControllerManager: true
-    kubelet: true
-    kubeProxy: true
-    kubePrometheusGeneral: true
-    kubePrometheusNodeRecording: true
-    kubernetesApps: true
-    kubernetesResources: true
-    kubernetesStorage: true
-    kubernetesSystem: true
-    kubeSchedulerAlerting: true
-    kubeSchedulerRecording: true
-    kubeStateMetrics: true
-    network: true
-    node: true
-    nodeExporterAlerting: true
-    nodeExporterRecording: true
-    prometheus: true
-    prometheusOperator: true
-    windows: true
-
-  ## Reduce app namespace alert scope
-  appNamespacesTarget: ""
-
-  ## Set keep_firing_for for all alerts
-  keepFiringFor: ""
-
-  ## Labels for default rules
-  labels: {}
-  ## Annotations for default rules
-  annotations: {}
-
-  ## Additional labels for PrometheusRule alerts
-  additionalRuleLabels: {}
-
-  ## Additional annotations for PrometheusRule alerts
-  additionalRuleAnnotations: {}
-
-  ## Additional labels for specific PrometheusRule alert groups
-  additionalRuleGroupLabels:
-    alertmanager: {}
-    etcd: {}
-    configReloaders: {}
-    general: {}
-    k8sContainerCpuUsageSecondsTotal: {}
-    k8sContainerMemoryCache: {}
-    k8sContainerMemoryRss: {}
-    k8sContainerMemorySwap: {}
-    k8sContainerResource: {}
-    k8sPodOwner: {}
-    kubeApiserverAvailability: {}
-    kubeApiserverBurnrate: {}
-    kubeApiserverHistogram: {}
-    kubeApiserverSlos: {}
-    kubeControllerManager: {}
-    kubelet: {}
-    kubeProxy: {}
-    kubePrometheusGeneral: {}
-    kubePrometheusNodeRecording: {}
-    kubernetesApps: {}
-    kubernetesResources: {}
-    kubernetesStorage: {}
-    kubernetesSystem: {}
-    kubeSchedulerAlerting: {}
-    kubeSchedulerRecording: {}
-    kubeStateMetrics: {}
-    network: {}
-    node: {}
-    nodeExporterAlerting: {}
-    nodeExporterRecording: {}
-    prometheus: {}
-    prometheusOperator: {}
-
-  ## Additional annotations for specific PrometheusRule alerts groups
-  additionalRuleGroupAnnotations:
-    alertmanager: {}
-    etcd: {}
-    configReloaders: {}
-    general: {}
-    k8sContainerCpuUsageSecondsTotal: {}
-    k8sContainerMemoryCache: {}
-    k8sContainerMemoryRss: {}
-    k8sContainerMemorySwap: {}
-    k8sContainerResource: {}
-    k8sPodOwner: {}
-    kubeApiserverAvailability: {}
-    kubeApiserverBurnrate: {}
-    kubeApiserverHistogram: {}
-    kubeApiserverSlos: {}
-    kubeControllerManager: {}
-    kubelet: {}
-    kubeProxy: {}
-    kubePrometheusGeneral: {}
-    kubePrometheusNodeRecording: {}
-    kubernetesApps: {}
-    kubernetesResources: {}
-    kubernetesStorage: {}
-    kubernetesSystem: {}
-    kubeSchedulerAlerting: {}
-    kubeSchedulerRecording: {}
-    kubeStateMetrics: {}
-    network: {}
-    node: {}
-    nodeExporterAlerting: {}
-    nodeExporterRecording: {}
-    prometheus: {}
-    prometheusOperator: {}
-
-  additionalAggregationLabels: []
-
-  ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
-  runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
-
-  node:
-    fsSelector: 'fstype!=""'
-    # fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"'
-
-  ## Disabled PrometheusRule alerts
-  disabled: {}
-  # KubeAPIDown: true
-  # NodeRAIDDegraded: true
-
-## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
-##
-# additionalPrometheusRules: []
-#  - name: my-rule-file
-#    groups:
-#      - name: my_group
-#        rules:
-#        - record: my_record
-#          expr: 100 * my_record
-
-## Provide custom recording or alerting rules to be deployed into the cluster.
-##
-additionalPrometheusRulesMap: {}
-#  rule-name:
-#    groups:
-#    - name: my_group
-#      rules:
-#      - record: my_record
-#        expr: 100 * my_record
-
-##
-global:
-  rbac:
-    create: true
-
-    ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
-    ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
-    createAggregateClusterRoles: false
-    pspEnabled: false
-    pspAnnotations: {}
-      ## Specify pod annotations
-      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
-      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
-      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
-      ##
-      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
-      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
-      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
-
-  ## Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...)
-  ##
-  imageRegistry: ""
-
-  ## Reference to one or more secrets to be used when pulling images
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ##
-  imagePullSecrets: []
-  # - name: "image-pull-secret"
-  # or
-  # - "image-pull-secret"
-
-windowsMonitoring:
-  ## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter')
-  enabled: false
-
-## Configuration for prometheus-windows-exporter
-## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter
-##
-prometheus-windows-exporter:
-  ## Enable ServiceMonitor and set Kubernetes label to use as a job label
-  ##
-  prometheus:
-    monitor:
-      enabled: true
-      jobLabel: jobLabel
-
-  releaseLabel: true
-
-  ## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards
-  ##
-  podLabels:
-    jobLabel: windows-exporter
-
-  ## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards
-  ##
-  config: |-
-    collectors:
-      enabled: '[defaults],memory,container'
-
-## Configuration for alertmanager
-## ref: https://prometheus.io/docs/alerting/alertmanager/
-##
-alertmanager:
-
-  ## Deploy alertmanager
-  ##
-  enabled: true
-
-  ## Annotations for Alertmanager
-  ##
-  annotations: {}
-
-  ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2
-  ##
-  apiVersion: v2
-
-  ## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features.
-  ##
-  enableFeatures: []
-
-  ## Create dashboard configmap even if alertmanager deployment has been disabled
-  ##
-  forceDeployDashboards: false
-
-  ## Service account for Alertmanager to use.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-  ##
-  serviceAccount:
-    create: true
-    name: ""
-    annotations: {}
-    automountServiceAccountToken: true
-
-  ## Configure pod disruption budgets for Alertmanager
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
-  ##
-  podDisruptionBudget:
-    enabled: false
-    minAvailable: 1
-    maxUnavailable: ""
-
-  ## Alertmanager configuration directives
-  ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
-  ##      https://prometheus.io/webtools/alerting/routing-tree-editor/
-  ##
-  config:
-    global:
-      resolve_timeout: 5m
-    inhibit_rules:
-      - source_matchers:
-          - 'severity = critical'
-        target_matchers:
-          - 'severity =~ warning|info'
-        equal:
-          - 'namespace'
-          - 'alertname'
-      - source_matchers:
-          - 'severity = warning'
-        target_matchers:
-          - 'severity = info'
-        equal:
-          - 'namespace'
-          - 'alertname'
-      - source_matchers:
-          - 'alertname = InfoInhibitor'
-        target_matchers:
-          - 'severity = info'
-        equal:
-          - 'namespace'
-      - target_matchers:
-          - 'alertname = InfoInhibitor'
-    route:
-      group_by: ['namespace']
-      group_wait: 30s
-      group_interval: 5m
-      repeat_interval: 12h
-      receiver: 'null'
-      routes:
-      - receiver: 'null'
-        matchers:
-          - alertname = "Watchdog"
-    receivers:
-    - name: 'null'
-    templates:
-    - '/etc/alertmanager/config/*.tmpl'
-
-  ## Alertmanager configuration directives (as string type, preferred over the config hash map)
-  ## stringConfig will be used only, if tplConfig is true
-  ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
-  ##      https://prometheus.io/webtools/alerting/routing-tree-editor/
-  ##
-  stringConfig: ""
-
-  ## Pass the Alertmanager configuration directives through Helm's templating
-  ## engine. If the Alertmanager configuration contains Alertmanager templates,
-  ## they'll need to be properly escaped so that they are not interpreted by
-  ## Helm
-  ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
-  ##      https://prometheus.io/docs/alerting/configuration/#tmpl_string
-  ##      https://prometheus.io/docs/alerting/notifications/
-  ##      https://prometheus.io/docs/alerting/notification_examples/
-  tplConfig: false
-
-  ## Alertmanager template files to format alerts
-  ## By default, templateFiles are placed in /etc/alertmanager/config/ and if
-  ## they have a .tmpl file suffix will be loaded. See config.templates above
-  ## to change, add other suffixes. If adding other suffixes, be sure to update
-  ## config.templates above to include those suffixes.
-  ## ref: https://prometheus.io/docs/alerting/notifications/
-  ##      https://prometheus.io/docs/alerting/notification_examples/
-  ##
-  templateFiles: {}
-  #
-
-  ingress:
-    enabled: false
-
-    # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
-    # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
-    # ingressClassName: nginx
-
-    annotations: {}
-
-    labels: {}
-
-    ## Override ingress to a different defined port on the service
-    # servicePort: 8081
-    ## Override ingress to a different service then the default, this is useful if you need to
-    ## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0)
-    # serviceName: kube-prometheus-stack-alertmanager-0
-
-    ## Hosts must be provided if Ingress is enabled.
-    ##
-    hosts: []
-      # - alertmanager.domain.com
-
-    ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
-    ##
-    paths: []
-    # - /
-
-    ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
-    ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
-    # pathType: ImplementationSpecific
-
-    ## TLS configuration for Alertmanager Ingress
-    ## Secret must be manually created in the namespace
-    ##
-    tls: []
-    # - secretName: alertmanager-general-tls
-    #   hosts:
-    #   - alertmanager.example.com
-
-  # -- BETA: Configure the gateway routes for the chart here.
-  # More routes can be added by adding a dictionary key like the 'main' route.
-  # Be aware that this is an early beta of this feature,
-  # kube-prometheus-stack does not guarantee this works and is subject to change.
-  # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
-  # [[ref]](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1alpha2)
-  route:
-    main:
-      # -- Enables or disables the route
-      enabled: false
-
-      # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
-      apiVersion: gateway.networking.k8s.io/v1
-      # -- Set the route kind
-      # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
-      kind: HTTPRoute
-
-      annotations: {}
-      labels: {}
-
-      hostnames: []
-      # - my-filter.example.com
-      parentRefs: []
-      # - name: acme-gw
-
-      matches:
-        - path:
-            type: PathPrefix
-            value: /
-
-      ## Filters define the filters that are applied to requests that match this rule.
-      filters: []
-
-      ## Additional custom rules that can be added to the route
-      additionalRules: []
-
-  ## Configuration for Alertmanager secret
-  ##
-  secret:
-    annotations: {}
-
-  ## Configuration for creating an Ingress that will map to each Alertmanager replica service
-  ## alertmanager.servicePerReplica must be enabled
-  ##
-  ingressPerReplica:
-    enabled: false
-
-    # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
-    # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
-    # ingressClassName: nginx
-
-    annotations: {}
-    labels: {}
-
-    ## Final form of the hostname for each per replica ingress is
-    ##
-    ## Prefix for the per replica ingress that will have `-$replicaNumber`
-    ## appended to the end
-    hostPrefix: ""
-    ## Domain that will be used for the per replica ingress
-    hostDomain: ""
-
-    ## Paths to use for ingress rules
-    ##
-    paths: []
-    # - /
-
-    ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
-    ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
-    # pathType: ImplementationSpecific
-
-    ## Secret name containing the TLS certificate for alertmanager per replica ingress
-    ## Secret must be manually created in the namespace
-    tlsSecretName: ""
-
-    ## Separated secret for each per replica Ingress. Can be used together with cert-manager
-    ##
-    tlsSecretPerReplica:
-      enabled: false
-      ## Final form of the secret for each per replica ingress is
-      ##
-      prefix: "alertmanager"
-
-  ## Configuration for Alertmanager service
-  ##
-  service:
-    annotations: {}
-    labels: {}
-    clusterIP: ""
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-
-    ## Port for Alertmanager Service to listen on
-    ##
-    port: 9093
-    ## To be used with a proxy extraContainer port
-    ##
-    targetPort: 9093
-    ## Port to expose on each node
-    ## Only used if service.type is 'NodePort'
-    ##
-    nodePort: 30903
-    ## List of IP addresses at which the Prometheus server service is available
-    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
-    ##
-
-    ## Additional ports to open for Alertmanager service
-    ##
-    additionalPorts: []
-    # - name: oauth-proxy
-    #   port: 8081
-    #   targetPort: 8081
-    # - name: oauth-metrics
-    #   port: 8082
-    #   targetPort: 8082
-
-    externalIPs: []
-    loadBalancerIP: ""
-    loadBalancerSourceRanges: []
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-    ## If you want to make sure that connections from a particular client are passed to the same Pod each time
-    ## Accepts 'ClientIP' or 'None'
-    ##
-    sessionAffinity: None
-
-    ## If you want to modify the ClientIP sessionAffinity timeout
-    ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
-    ##
-    sessionAffinityConfig:
-      clientIP:
-        timeoutSeconds: 10800
-
-    ## Service type
-    ##
-    type: ClusterIP
-
-  ## Configuration for creating a separate Service for each statefulset Alertmanager replica
-  ##
-  servicePerReplica:
-    enabled: false
-    annotations: {}
-
-    ## Port for Alertmanager Service per replica to listen on
-    ##
-    port: 9093
-
-    ## To be used with a proxy extraContainer port
-    targetPort: 9093
-
-    ## Port to expose on each node
-    ## Only used if servicePerReplica.type is 'NodePort'
-    ##
-    nodePort: 30904
-
-    ## Loadbalancer source IP ranges
-    ## Only used if servicePerReplica.type is "LoadBalancer"
-    loadBalancerSourceRanges: []
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-    ## Service type
-    ##
-    type: ClusterIP
-
-  ## Configuration for creating a ServiceMonitor for AlertManager
-  ##
-  serviceMonitor:
-    ## If true, a ServiceMonitor will be created for the AlertManager service.
-    ##
-    selfMonitor: true
-
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
-    scheme: ""
-
-    ## enableHttp2: Whether to enable HTTP2.
-    ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
-    enableHttp2: true
-
-    ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
-    ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
-    tlsConfig: {}
-
-    bearerTokenFile:
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional Endpoints
-    ##
-    additionalEndpoints: []
-    # - port: oauth-metrics
-    #   path: /metrics
-
-  ## Settings affecting alertmanagerSpec
-  ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec
-  ##
-  alertmanagerSpec:
-    ## Statefulset's persistent volume claim retention policy
-    ## whenDeleted and whenScaled determine whether
-    ## statefulset's PVCs are deleted (true) or retained (false)
-    ## on scaling down and deleting statefulset, respectively.
-    ## Requires Kubernetes version 1.27.0+.
-    ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-    persistentVolumeClaimRetentionPolicy: {}
-    #  whenDeleted: Retain
-    #  whenScaled: Retain
-
-    ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
-    ## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
-    ##
-    podMetadata: {}
-
-    ## Image of Alertmanager
-    ##
-    image:
-      registry: quay.io
-      repository: prometheus/alertmanager
-      tag: v0.28.0
-      sha: ""
-
-    ## If true then the user will be responsible to provide a secret with alertmanager configuration
-    ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
-    ##
-    useExistingSecret: false
-
-    ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
-    ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
-    ##
-    secrets: []
-
-    ## If false then the user will opt out of automounting API credentials.
-    ##
-    automountServiceAccountToken: true
-
-    ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
-    ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
-    ##
-    configMaps: []
-
-    ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
-    ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
-    ##
-    # configSecret:
-
-    ## WebTLSConfig defines the TLS parameters for HTTPS
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec
-    web: {}
-
-    ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
-    ##
-    alertmanagerConfigSelector: {}
-    ## Example which selects all alertmanagerConfig resources
-    ## with label "alertconfig" with values any of "example-config" or "example-config-2"
-    # alertmanagerConfigSelector:
-    #   matchExpressions:
-    #     - key: alertconfig
-    #       operator: In
-    #       values:
-    #         - example-config
-    #         - example-config-2
-    #
-    ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
-    # alertmanagerConfigSelector:
-    #   matchLabels:
-    #     role: example-config
-
-    ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
-    ##
-    alertmanagerConfigNamespaceSelector: {}
-    ## Example which selects all namespaces
-    ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
-    # alertmanagerConfigNamespaceSelector:
-    #   matchExpressions:
-    #     - key: alertmanagerconfig
-    #       operator: In
-    #       values:
-    #         - example-namespace
-    #         - example-namespace-2
-
-    ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
-    # alertmanagerConfigNamespaceSelector:
-    #   matchLabels:
-    #     alertmanagerconfig: enabled
-
-    ## AlermanagerConfig to be used as top level configuration
-    ##
-    alertmanagerConfiguration: {}
-    ## Example with select a global alertmanagerconfig
-    # alertmanagerConfiguration:
-    #   name: global-alertmanager-Configuration
-
-    ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg:
-    ##
-    alertmanagerConfigMatcherStrategy: {}
-    ## Example with use OnNamespace strategy
-    # alertmanagerConfigMatcherStrategy:
-    #   type: OnNamespace
-
-    ## Define Log Format
-    # Use logfmt (default) or json logging
-    logFormat: logfmt
-
-    ## Log level for Alertmanager to be configured with.
-    ##
-    logLevel: info
-
-    ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
-    ## running cluster equal to the expected size.
-    replicas: 1
-
-    ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
-    ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
-    ##
-    retention: 120h
-
-    ## Storage is the definition of how storage will be used by the Alertmanager instances.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
-    ##
-    storage: {}
-    # volumeClaimTemplate:
-    #   spec:
-    #     storageClassName: gluster
-    #     accessModes: ["ReadWriteOnce"]
-    #     resources:
-    #       requests:
-    #         storage: 50Gi
-    #   selector: {}
-
-
-    ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string  false
-    ##
-    externalUrl:
-
-    ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
-    ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
-    ##
-    routePrefix: /
-
-    ## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS.
-    scheme: ""
-
-    ## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS.
-    ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
-    tlsConfig: {}
-
-    ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
-    ##
-    paused: false
-
-    ## Define which Nodes the Pods are scheduled on.
-    ## ref: https://kubernetes.io/docs/user-guide/node-selection/
-    ##
-    nodeSelector: {}
-
-    ## Define resources requests and limits for single Pods.
-    ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
-    ##
-    resources: {}
-    # requests:
-    #   memory: 400Mi
-
-    ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
-    ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
-    ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
-    ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
-    ##
-    podAntiAffinity: "soft"
-
-    ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
-    ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
-    ##
-    podAntiAffinityTopologyKey: kubernetes.io/hostname
-
-    ## Assign custom affinity rules to the alertmanager instance
-    ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-    ##
-    affinity: {}
-    # nodeAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     nodeSelectorTerms:
-    #     - matchExpressions:
-    #       - key: kubernetes.io/e2e-az-name
-    #         operator: In
-    #         values:
-    #         - e2e-az1
-    #         - e2e-az2
-
-    ## If specified, the pod's tolerations.
-    ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-    ##
-    tolerations: []
-    # - key: "key"
-    #   operator: "Equal"
-    #   value: "value"
-    #   effect: "NoSchedule"
-
-    ## If specified, the pod's topology spread constraints.
-    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-    ##
-    topologySpreadConstraints: []
-    # - maxSkew: 1
-    #   topologyKey: topology.kubernetes.io/zone
-    #   whenUnsatisfiable: DoNotSchedule
-    #   labelSelector:
-    #     matchLabels:
-    #       app: alertmanager
-
-    ## SecurityContext holds pod-level security attributes and common container settings.
-    ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext  false
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-    ##
-    securityContext:
-      runAsGroup: 2000
-      runAsNonRoot: true
-      runAsUser: 1000
-      fsGroup: 2000
-      seccompProfile:
-        type: RuntimeDefault
-
-    ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
-    ## Note this is only for the Alertmanager UI, not the gossip communication.
-    ##
-    listenLocal: false
-
-    ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
-    ##
-    containers: []
-    # containers:
-    # - name: oauth-proxy
-    #   image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1
-    #   args:
-    #   - --upstream=http://127.0.0.1:9093
-    #   - --http-address=0.0.0.0:8081
-    #   - --metrics-address=0.0.0.0:8082
-    #   - ...
-    #   ports:
-    #   - containerPort: 8081
-    #     name: oauth-proxy
-    #     protocol: TCP
-    #   - containerPort: 8082
-    #     name: oauth-metrics
-    #     protocol: TCP
-    #   resources: {}
-
-    # Additional volumes on the output StatefulSet definition.
-    volumes: []
-
-    # Additional VolumeMounts on the output StatefulSet definition.
-    volumeMounts: []
-
-    ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
-    ## (permissions, dir tree) on mounted volumes before starting prometheus
-    initContainers: []
-
-    ## Priority class assigned to the Pods
-    ##
-    priorityClassName: ""
-
-    ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
-    ##
-    additionalPeers: []
-
-    ## PortName to use for Alert Manager.
-    ##
-    portName: "http-web"
-
-    ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
-    ##
-    clusterAdvertiseAddress: false
-
-    ## clusterGossipInterval determines interval between gossip attempts.
-    ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
-    clusterGossipInterval: ""
-
-    ## clusterPeerTimeout determines timeout for cluster peering.
-    ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
-    clusterPeerTimeout: ""
-
-    ## clusterPushpullInterval determines interval between pushpull attempts.
-    ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
-    clusterPushpullInterval: ""
-
-    ## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster.
-    clusterLabel: ""
-
-    ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
-    ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
-    forceEnableClusterMode: false
-
-    ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
-    ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
-    minReadySeconds: 0
-
-    ## Additional configuration which is not covered by the properties above. (passed through tpl)
-    additionalConfig: {}
-
-    ## Additional configuration which is not covered by the properties above.
-    ## Useful, if you need advanced templating inside alertmanagerSpec.
-    ## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl)
-    additionalConfigString: ""
-
-  ## ExtraSecret can be used to store various data in an extra secret
-  ## (use it for example to store hashed basic auth credentials)
-  extraSecret:
-    ## if not set, name will be auto generated
-    # name: ""
-    annotations: {}
-    data: {}
-  #   auth: |
-  #     foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
-  #     someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
-
-## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
-##
-grafana:
-  enabled: true
-  namespaceOverride: ""
-
-  ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
-  ##
-  forceDeployDatasources: false
-
-  ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
-  ##
-  forceDeployDashboards: false
-
-  ## Deploy default dashboards
-  ##
-  defaultDashboardsEnabled: true
-
-  ## Timezone for the default dashboards
-  ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
-  ##
-  defaultDashboardsTimezone: browser
-
-  ## Editable flag for the default dashboards
-  ##
-  defaultDashboardsEditable: true
-
-  adminPassword: {{ .Values.globals.kubePrometheusStack.grafana.adminPassword }}
-
-  rbac:
-    ## If true, Grafana PSPs will be created
-    ##
-    pspEnabled: false
-
-  ingress:
-    ## If true, Grafana Ingress will be created
-    ##
-    enabled: true
-
-    ## IngressClassName for Grafana Ingress.
-    ## Should be provided if Ingress is enable.
-    ##
-    # ingressClassName: nginx
-
-    ## Annotations for Grafana Ingress
-    ##
-    annotations:
-      kubernetes.io/ingress.class: {{ .Values.globals.kubePrometheusStack.ingressClass }}
-      cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-
-    ## Labels to be added to the Ingress
-    ##
-    labels: {}
-
-    ## Hostnames.
-    ## Must be provided if Ingress is enable.
-    ##
-    # hosts:
-    #   - grafana.domain.com
-    hosts:
-      {{- range .Values.globals.kubePrometheusStack.grafana.hosts }}
-      - {{ . }}
-      {{- end }}
-
-    ## Path for grafana ingress
-    path: /
-
-    ## TLS configuration for grafana Ingress
-    ## Secret must be manually created in the namespace
-    ##
-    tls:
-      {{- range .Values.globals.kubePrometheusStack.grafana.hosts }}
-      - secretName: {{ . }}-tls
-        hosts:
-          - {{ . }}
-      {{- end }}
-    # - secretName: grafana-general-tls
-      # hosts:
-      # - grafana.example.com
-
-  # # To make Grafana persistent (Using Statefulset)
-  # #
-  persistence:
-    enabled: true
-    type: sts
-    storageClassName: {{ .Values.globals.kubePrometheusStack.storageClass }}
-    accessModes:
-      - ReadWriteOnce
-    size: {{ .Values.globals.kubePrometheusStack.grafana.storageSize }}
-    finalizers:
-      - kubernetes.io/pvc-protection
-
-  serviceAccount:
-    create: true
-    autoMount: true
-
-  sidecar:
-    dashboards:
-      enabled: true
-      label: grafana_dashboard
-      labelValue: "1"
-      # Allow discovery in all namespaces for dashboards
-      searchNamespace: ALL
-
-      # Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels
-      enableNewTablePanelSyntax: false
-
-      ## Annotations for Grafana dashboard configmaps
-      ##
-      annotations: {}
-      multicluster:
-        global:
-          enabled: false
-        etcd:
-          enabled: false
-      provider:
-        allowUiUpdates: false
-    datasources:
-      enabled: true
-      defaultDatasourceEnabled: true
-      isDefaultDatasource: true
-
-      name: Prometheus
-      uid: prometheus
-
-      ## URL of prometheus datasource
-      ##
-      # url: http://prometheus-stack-prometheus:9090/
-
-      ## Prometheus request timeout in seconds
-      # timeout: 30
-
-      # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
-      # defaultDatasourceScrapeInterval: 15s
-
-      ## Annotations for Grafana datasource configmaps
-      ##
-      annotations: {}
-
-      ## Set method for HTTP to send query to datasource
-      httpMethod: POST
-
-      ## Create datasource for each Pod of Prometheus StatefulSet;
-      ## this uses headless service `prometheus-operated` which is
-      ## created by Prometheus Operator
-      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
-      createPrometheusReplicasDatasources: false
-      label: grafana_datasource
-      labelValue: "1"
-
-      ## Field with internal link pointing to existing data source in Grafana.
-      ## Can be provisioned via additionalDataSources
-      exemplarTraceIdDestinations: {}
-        # datasourceUid: Jaeger
-        # traceIdLabelName: trace_id
-      alertmanager:
-        enabled: true
-        name: Alertmanager
-        uid: alertmanager
-        handleGrafanaManagedAlerts: false
-        implementation: prometheus
-
-  extraConfigmapMounts: []
-  # - name: certs-configmap
-  #   mountPath: /etc/grafana/ssl/
-  #   configMap: certs-configmap
-  #   readOnly: true
-
-  deleteDatasources: []
-  # - name: example-datasource
-  #   orgId: 1
-
-  ## Configure additional grafana datasources (passed through tpl)
-  ## ref: http://docs.grafana.org/administration/provisioning/#datasources
-  additionalDataSources: []
-  # - name: prometheus-sample
-  #   access: proxy
-  #   basicAuth: true
-  #   secureJsonData:
-  #       basicAuthPassword: pass
-  #   basicAuthUser: daco
-  #   editable: false
-  #   jsonData:
-  #       tlsSkipVerify: true
-  #   orgId: 1
-  #   type: prometheus
-  #   url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
-  #   version: 1
-
-  # Flag to mark provisioned data sources for deletion if they are no longer configured.
-  # It takes no effect if data sources are already listed in the deleteDatasources section.
-  # ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-config-file
-  prune: false
-
-  ## Passed to grafana subchart and used by servicemonitor below
-  ##
-  service:
-    portName: http-web
-    ipFamilies: []
-    ipFamilyPolicy: ""
-
-  serviceMonitor:
-    # If true, a ServiceMonitor CRD is created for a prometheus operator
-    # https://github.com/coreos/prometheus-operator
-    #
-    enabled: true
-
-    # Path to use for scraping metrics. Might be different if server.root_url is set
-    # in grafana.ini
-    path: "/metrics"
-
-    #  namespace: monitoring  (defaults to use the namespace this chart is deployed to)
-
-    # labels for the ServiceMonitor
-    labels: {}
-
-    # Scrape interval. If not set, the Prometheus default scrape interval is used.
-    #
-    interval: ""
-    scheme: http
-    tlsConfig: {}
-    scrapeTimeout: 30s
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-## Flag to disable all the kubernetes component scrapers
-##
-kubernetesServiceMonitors:
-  enabled: true
-
-## Component scraping the kube api server
-##
-kubeApiServer:
-  enabled: true
-  tlsConfig:
-    serverName: kubernetes
-    insecureSkipVerify: false
-  serviceMonitor:
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    jobLabel: component
-    selector:
-      matchLabels:
-        component: apiserver
-        provider: kubernetes
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings:
-      # Drop excessively noisy apiserver buckets.
-      - action: drop
-        regex: (etcd_request|apiserver_request_slo|apiserver_request_sli|apiserver_request)_duration_seconds_bucket;(0\.15|0\.2|0\.3|0\.35|0\.4|0\.45|0\.6|0\.7|0\.8|0\.9|1\.25|1\.5|1\.75|2|3|3\.5|4|4\.5|6|7|8|9|15|20|30|40|45|50)(\.0)?
-        sourceLabels:
-          - __name__
-          - le
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels:
-    #     - __meta_kubernetes_namespace
-    #     - __meta_kubernetes_service_name
-    #     - __meta_kubernetes_endpoint_port_name
-    #   action: keep
-    #   regex: default;kubernetes;https
-    # - targetLabel: __address__
-    #   replacement: kubernetes.default.svc:443
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping the kubelet and kubelet-hosted cAdvisor
-##
-kubelet:
-  enabled: true
-  namespace: kube-system
-
-  serviceMonitor:
-    ## Enable scraping /metrics from kubelet's service
-    kubelet: true
-
-    ## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator.
-    ##
-    attachMetadata:
-      node: false
-
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## If true, Prometheus use (respect) labels provided by exporter.
-    ##
-    honorLabels: true
-
-    ## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape.
-    ##
-    honorTimestamps: true
-
-    ## If true, defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false.
-    ## We recommend enabling this if you want the best possible accuracy for container_ metrics scraped from cadvisor.
-    ## For more details see: https://github.com/prometheus-community/helm-charts/pull/5063#issuecomment-2545374849
-    trackTimestampsStaleness: true
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    ## Enable scraping the kubelet over https. For requirements to enable this see
-    ## https://github.com/prometheus-operator/prometheus-operator/issues/926
-    ##
-    https: true
-
-    ## Skip TLS certificate validation when scraping.
-    ## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed
-    ## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs
-    ##
-    insecureSkipVerify: true
-
-    ## Enable scraping /metrics/probes from kubelet's service
-    ##
-    probes: true
-
-    ## Enable scraping /metrics/resource from kubelet's service
-    ## This is disabled by default because container metrics are already exposed by cAdvisor
-    ##
-    resource: false
-    # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
-    resourcePath: "/metrics/resource/v1alpha1"
-    ## Configure the scrape interval for resource metrics. This is configured to the default Kubelet cAdvisor
-    ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
-    ## if kubelet.serviceMonitor.interval is not empty.
-    resourceInterval: 10s
-
-    ## Enable scraping /metrics/cadvisor from kubelet's service
-    ##
-    cAdvisor: true
-    ## Configure the scrape interval for cAdvisor. This is configured to the default Kubelet cAdvisor
-    ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
-    ## if kubelet.serviceMonitor.interval is not empty.
-    cAdvisorInterval: 10s
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    cAdvisorMetricRelabelings:
-      # Drop less useful container CPU metrics.
-      - sourceLabels: [__name__]
-        action: drop
-        regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
-      # Drop less useful container / always zero filesystem metrics.
-      - sourceLabels: [__name__]
-        action: drop
-        regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
-      # Drop less useful / always zero container memory metrics.
-      - sourceLabels: [__name__]
-        action: drop
-        regex: 'container_memory_(mapped_file|swap)'
-      # Drop less useful container process metrics.
-      - sourceLabels: [__name__]
-        action: drop
-        regex: 'container_(file_descriptors|tasks_state|threads_max)'
-      # Drop container_memory_failures_total{scope="hierarchy"} metrics,
-      # we only need the container scope.
-      - sourceLabels: [__name__, scope]
-        action: drop
-        regex: 'container_memory_failures_total;hierarchy'
-      # Drop container_network_... metrics that match various interfaces that
-      # correspond to CNI and similar interfaces. This avoids capturing network
-      # metrics for host network containers.
-      - sourceLabels: [__name__, interface]
-        action: drop
-        regex: 'container_network_;(cali|cilium|cni|lxc|nodelocaldns|tunl)'
-      # Drop container spec metrics that overlap with kube-state-metrics.
-      - sourceLabels: [__name__]
-        action: drop
-        regex: 'container_spec'
-      # Drop cgroup metrics with no pod.
-      - sourceLabels: [id, pod]
-        action: drop
-        regex: '.+;'
-    # - sourceLabels: [__name__, image]
-    #   separator: ;
-    #   regex: container_([a-z_]+);
-    #   replacement: $1
-    #   action: drop
-    # - sourceLabels: [__name__]
-    #   separator: ;
-    #   regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
-    #   replacement: $1
-    #   action: drop
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    probesMetricRelabelings: []
-    # - sourceLabels: [__name__, image]
-    #   separator: ;
-    #   regex: container_([a-z_]+);
-    #   replacement: $1
-    #   action: drop
-    # - sourceLabels: [__name__]
-    #   separator: ;
-    #   regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
-    #   replacement: $1
-    #   action: drop
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    ## metrics_path is required to match upstream rules and charts
-    cAdvisorRelabelings:
-      - action: replace
-        sourceLabels: [__metrics_path__]
-        targetLabel: metrics_path
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    probesRelabelings:
-      - action: replace
-        sourceLabels: [__metrics_path__]
-        targetLabel: metrics_path
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    resourceRelabelings:
-      - action: replace
-        sourceLabels: [__metrics_path__]
-        targetLabel: metrics_path
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings:
-      # Reduce bucket cardinality of kubelet storage operations.
-      - action: drop
-        sourceLabels: [__name__, le]
-        regex: (csi_operations|storage_operation_duration)_seconds_bucket;(0.25|2.5|15|25|120|600)(\.0)?
-    # - sourceLabels: [__name__, image]
-    #   separator: ;
-    #   regex: container_([a-z_]+);
-    #   replacement: $1
-    #   action: drop
-    # - sourceLabels: [__name__]
-    #   separator: ;
-    #   regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
-    #   replacement: $1
-    #   action: drop
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    ## metrics_path is required to match upstream rules and charts
-    relabelings:
-      - action: replace
-        sourceLabels: [__metrics_path__]
-        targetLabel: metrics_path
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping the kube controller manager
-##
-kubeControllerManager:
-  enabled: true
-
-  ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
-  ##
-  endpoints: []
-  # - 10.141.4.22
-  # - 10.141.4.23
-  # - 10.141.4.24
-
-  ## If using kubeControllerManager.endpoints only the port and targetPort are used
-  ##
-  service:
-    enabled: true
-    ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
-    ## of default port in Kubernetes 1.22.
-    ##
-    port: null
-    targetPort: null
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-    # selector:
-    #   component: kube-controller-manager
-
-  serviceMonitor:
-    enabled: true
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    ## port: Name of the port the metrics will be scraped from
-    ##
-    port: http-metrics
-
-    jobLabel: jobLabel
-    selector: {}
-    #  matchLabels:
-    #    component: kube-controller-manager
-
-    ## Enable scraping kube-controller-manager over https.
-    ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
-    ## If null or unset, the value is determined dynamically based on target Kubernetes version.
-    ##
-    https: null
-
-    # Skip TLS certificate validation when scraping
-    insecureSkipVerify: null
-
-    # Name of the server to use when validating TLS certificate
-    serverName: null
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping coreDns. Use either this or kubeDns
-##
-coreDns:
-  enabled: true
-  service:
-    enabled: true
-    port: 9153
-    targetPort: 9153
-
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-    # selector:
-    #   k8s-app: kube-dns
-  serviceMonitor:
-    enabled: true
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    ## port: Name of the port the metrics will be scraped from
-    ##
-    port: http-metrics
-
-    jobLabel: jobLabel
-    selector: {}
-    #  matchLabels:
-    #    k8s-app: kube-dns
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping kubeDns. Use either this or coreDns
-##
-kubeDns:
-  enabled: false
-  service:
-    dnsmasq:
-      port: 10054
-      targetPort: 10054
-    skydns:
-      port: 10055
-      targetPort: 10055
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-    # selector:
-    #   k8s-app: kube-dns
-  serviceMonitor:
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    jobLabel: jobLabel
-    selector: {}
-    #  matchLabels:
-    #    k8s-app: kube-dns
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    dnsmasqMetricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    dnsmasqRelabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping etcd
-##
-kubeEtcd:
-  enabled: true
-
-  ## If your etcd is not deployed as a pod, specify IPs it can be found on
-  ##
-  endpoints: []
-  # - 10.141.4.22
-  # - 10.141.4.23
-  # - 10.141.4.24
-
-  ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
-  ##
-  service:
-    enabled: true
-    port: 2381
-    targetPort: 2381
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-    # selector:
-    #   component: etcd
-
-  ## Configure secure access to the etcd cluster by loading a secret into prometheus and
-  ## specifying security configuration below. For example, with a secret named etcd-client-cert
-  ##
-  ## serviceMonitor:
-  ##   scheme: https
-  ##   insecureSkipVerify: false
-  ##   serverName: localhost
-  ##   caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
-  ##   certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
-  ##   keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
-  ##
-  serviceMonitor:
-    enabled: true
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-    scheme: http
-    insecureSkipVerify: false
-    serverName: ""
-    caFile: ""
-    certFile: ""
-    keyFile: ""
-
-    ## port: Name of the port the metrics will be scraped from
-    ##
-    port: http-metrics
-
-    jobLabel: jobLabel
-    selector: {}
-    #  matchLabels:
-    #    component: etcd
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping kube scheduler
-##
-kubeScheduler:
-  enabled: true
-
-  ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
-  ##
-  endpoints: []
-  # - 10.141.4.22
-  # - 10.141.4.23
-  # - 10.141.4.24
-
-  ## If using kubeScheduler.endpoints only the port and targetPort are used
-  ##
-  service:
-    enabled: true
-    ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
-    ## of default port in Kubernetes 1.23.
-    ##
-    port: null
-    targetPort: null
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-    # selector:
-    #   component: kube-scheduler
-
-  serviceMonitor:
-    enabled: true
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-    ## Enable scraping kube-scheduler over https.
-    ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
-    ## If null or unset, the value is determined dynamically based on target Kubernetes version.
-    ##
-    https: null
-
-    ## port: Name of the port the metrics will be scraped from
-    ##
-    port: http-metrics
-
-    jobLabel: jobLabel
-    selector: {}
-    #  matchLabels:
-    #    component: kube-scheduler
-
-    ## Skip TLS certificate validation when scraping
-    insecureSkipVerify: null
-
-    ## Name of the server to use when validating TLS certificate
-    serverName: null
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping kube proxy
-##
-kubeProxy:
-  enabled: true
-
-  ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
-  ##
-  endpoints: []
-  # - 10.141.4.22
-  # - 10.141.4.23
-  # - 10.141.4.24
-
-  service:
-    enabled: true
-    port: 10249
-    targetPort: 10249
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-    # selector:
-    #   k8s-app: kube-proxy
-
-  serviceMonitor:
-    enabled: true
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    ## port: Name of the port the metrics will be scraped from
-    ##
-    port: http-metrics
-
-    jobLabel: jobLabel
-    selector: {}
-    #  matchLabels:
-    #    k8s-app: kube-proxy
-
-    ## Enable scraping kube-proxy over https.
-    ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
-    ##
-    https: false
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-    #  foo: bar
-
-    ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
-    targetLabels: []
-
-## Component scraping kube state metrics
-##
-kubeStateMetrics:
-  enabled: true
-
-## Configuration for kube-state-metrics subchart
-##
-kube-state-metrics:
-  namespaceOverride: ""
-  rbac:
-    create: true
-  releaseLabel: true
-  prometheus:
-    monitor:
-      enabled: true
-
-      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-      ##
-      interval: ""
-
-      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-      ##
-      sampleLimit: 0
-
-      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-      ##
-      targetLimit: 0
-
-      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-      ##
-      labelLimit: 0
-
-      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-      ##
-      labelNameLengthLimit: 0
-
-      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-      ##
-      labelValueLengthLimit: 0
-
-      ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used.
-      ##
-      scrapeTimeout: ""
-
-      ## proxyUrl: URL of a proxy that should be used for scraping.
-      ##
-      proxyUrl: ""
-
-      # Keep labels from scraped data, overriding server-side labels
-      ##
-      honorLabels: true
-
-      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      # - action: keep
-      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-      #   sourceLabels: [__name__]
-
-      ## RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      # - sourceLabels: [__meta_kubernetes_pod_node_name]
-      #   separator: ;
-      #   regex: ^()$
-      #   targetLabel: nodename
-      #   replacement: $1
-      #   action: replace
-
-  selfMonitor:
-    enabled: false
-
-## Deploy node exporter as a daemonset to all nodes
-##
-nodeExporter:
-  enabled: true
-  operatingSystems:
-    linux:
-      enabled: true
-    aix:
-      enabled: true
-    darwin:
-      enabled: true
-
-  ## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled
-  ##
-  forceDeployDashboards: false
-
-## Configuration for prometheus-node-exporter subchart
-##
-prometheus-node-exporter:
-  namespaceOverride: ""
-  podLabels:
-    ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards
-    ##
-    jobLabel: node-exporter
-  releaseLabel: true
-  extraArgs:
-    - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
-    - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
-  service:
-    portName: http-metrics
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-    labels:
-      jobLabel: node-exporter
-
-  prometheus:
-    monitor:
-      enabled: true
-
-      jobLabel: jobLabel
-
-      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-      ##
-      interval: ""
-
-      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-      ##
-      sampleLimit: 0
-
-      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-      ##
-      targetLimit: 0
-
-      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-      ##
-      labelLimit: 0
-
-      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-      ##
-      labelNameLengthLimit: 0
-
-      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-      ##
-      labelValueLengthLimit: 0
-
-      ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
-      ##
-      scrapeTimeout: ""
-
-      ## proxyUrl: URL of a proxy that should be used for scraping.
-      ##
-      proxyUrl: ""
-
-      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-      ##
-      metricRelabelings: []
-      # - sourceLabels: [__name__]
-      #   separator: ;
-      #   regex: ^node_mountstats_nfs_(event|operations|transport)_.+
-      #   replacement: $1
-      #   action: drop
-
-      ## RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-      ##
-      relabelings: []
-      # - sourceLabels: [__meta_kubernetes_pod_node_name]
-      #   separator: ;
-      #   regex: ^()$
-      #   targetLabel: nodename
-      #   replacement: $1
-      #   action: replace
-
-      ## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above.
-      ##
-      # attachMetadata:
-      #   node: false
-
-  rbac:
-    ## If true, create PSPs for node-exporter
-    ##
-    pspEnabled: false
-
-## Manages Prometheus and Alertmanager components
-##
-prometheusOperator:
-  enabled: true
-
-  fullnameOverride: ""
-
-  ## Number of old replicasets to retain ##
-  ## The default value is 10, 0 will garbage-collect old replicasets ##
-  revisionHistoryLimit: 10
-
-  ## Strategy of the deployment
-  ##
-  strategy: {}
-
-  ## Prometheus-Operator v0.39.0 and later support TLS natively.
-  ##
-  tls:
-    enabled: true
-    # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
-    tlsMinVersion: VersionTLS13
-    # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
-    internalPort: 10250
-
-  ## Liveness probe for the prometheusOperator deployment
-  ##
-  livenessProbe:
-    enabled: true
-    failureThreshold: 3
-    initialDelaySeconds: 0
-    periodSeconds: 10
-    successThreshold: 1
-    timeoutSeconds: 1
-  ## Readiness probe for the prometheusOperator deployment
-  ##
-  readinessProbe:
-    enabled: true
-    failureThreshold: 3
-    initialDelaySeconds: 0
-    periodSeconds: 10
-    successThreshold: 1
-    timeoutSeconds: 1
-
-  ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
-  ## rules from making their way into prometheus and potentially preventing the container from starting
-  admissionWebhooks:
-    ## Valid values: Fail, Ignore, IgnoreOnInstallOnly
-    ## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail"
-    failurePolicy: ""
-    ## The default timeoutSeconds is 10 and the maximum value is 30.
-    timeoutSeconds: 10
-    enabled: true
-    ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
-    ## If unspecified, system trust roots on the apiserver are used.
-    caBundle: ""
-    ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
-    ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
-    ## certs ahead of time if you wish.
-    ##
-    annotations: {}
-    #   argocd.argoproj.io/hook: PreSync
-    #   argocd.argoproj.io/hook-delete-policy: HookSucceeded
-
-    namespaceSelector: {}
-    objectSelector: {}
-
-    mutatingWebhookConfiguration:
-      annotations: {}
-      #   argocd.argoproj.io/hook: PreSync
-
-    validatingWebhookConfiguration:
-      annotations: {}
-      #   argocd.argoproj.io/hook: PreSync
-
-    deployment:
-      enabled: false
-
-      ## Number of replicas
-      ##
-      replicas: 1
-
-      ## Strategy of the deployment
-      ##
-      strategy: {}
-
-      # Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
-      podDisruptionBudget: {}
-        # maxUnavailable: 1
-        # minAvailable: 1
-
-      ## Number of old replicasets to retain ##
-      ## The default value is 10, 0 will garbage-collect old replicasets ##
-      revisionHistoryLimit: 10
-
-      ## Prometheus-Operator v0.39.0 and later support TLS natively.
-      ##
-      tls:
-        enabled: true
-        # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
-        tlsMinVersion: VersionTLS13
-        # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
-        internalPort: 10250
-
-      ## Service account for Prometheus Operator Webhook to use.
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-      ##
-      serviceAccount:
-        annotations: {}
-        automountServiceAccountToken: false
-        create: true
-        name: ""
-
-      ## Configuration for Prometheus operator Webhook service
-      ##
-      service:
-        annotations: {}
-        labels: {}
-        clusterIP: ""
-        ipDualStack:
-          enabled: false
-          ipFamilies: ["IPv6", "IPv4"]
-          ipFamilyPolicy: "PreferDualStack"
-
-        ## Port to expose on each node
-        ## Only used if service.type is 'NodePort'
-        ##
-        nodePort: 31080
-
-        nodePortTls: 31443
-
-        ## Additional ports to open for Prometheus operator Webhook service
-        ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
-        ##
-        additionalPorts: []
-
-        ## Loadbalancer IP
-        ## Only use if service.type is "LoadBalancer"
-        ##
-        loadBalancerIP: ""
-        loadBalancerSourceRanges: []
-
-        ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-        ##
-        externalTrafficPolicy: Cluster
-
-        ## Service type
-        ## NodePort, ClusterIP, LoadBalancer
-        ##
-        type: ClusterIP
-
-        ## List of IP addresses at which the Prometheus server service is available
-        ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
-        ##
-        externalIPs: []
-
-      # ## Labels to add to the operator webhook deployment
-      # ##
-      labels: {}
-
-      ## Annotations to add to the operator webhook deployment
-      ##
-      annotations: {}
-
-      ## Labels to add to the operator webhook pod
-      ##
-      podLabels: {}
-
-      ## Annotations to add to the operator webhook pod
-      ##
-      podAnnotations: {}
-
-      ## Assign a PriorityClassName to pods if set
-      # priorityClassName: ""
-
-      ## Define Log Format
-      # Use logfmt (default) or json logging
-      # logFormat: logfmt
-
-      ## Decrease log verbosity to errors only
-      # logLevel: error
-
-      ## Prometheus-operator webhook image
-      ##
-      image:
-        registry: quay.io
-        repository: prometheus-operator/admission-webhook
-        # if not set appVersion field from Chart.yaml is used
-        tag: ""
-        sha: ""
-        pullPolicy: IfNotPresent
-
-      ## Define Log Format
-      # Use logfmt (default) or json logging
-      # logFormat: logfmt
-
-      ## Decrease log verbosity to errors only
-      # logLevel: error
-
-
-      ## Liveness probe
-      ##
-      livenessProbe:
-        enabled: true
-        failureThreshold: 3
-        initialDelaySeconds: 30
-        periodSeconds: 10
-        successThreshold: 1
-        timeoutSeconds: 1
-
-      ## Readiness probe
-      ##
-      readinessProbe:
-        enabled: true
-        failureThreshold: 3
-        initialDelaySeconds: 5
-        periodSeconds: 10
-        successThreshold: 1
-        timeoutSeconds: 1
-
-      ## Resource limits & requests
-      ##
-      resources: {}
-      # limits:
-      #   cpu: 200m
-      #   memory: 200Mi
-      # requests:
-      #   cpu: 100m
-      #   memory: 100Mi
-
-      # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
-      # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
-      ##
-      hostNetwork: false
-
-      ## Define which Nodes the Pods are scheduled on.
-      ## ref: https://kubernetes.io/docs/user-guide/node-selection/
-      ##
-      nodeSelector: {}
-
-      ## Tolerations for use with node taints
-      ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-      ##
-      tolerations: []
-      # - key: "key"
-      #   operator: "Equal"
-      #   value: "value"
-      #   effect: "NoSchedule"
-
-      ## Assign custom affinity rules to the prometheus operator
-      ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-      ##
-      affinity: {}
-        # nodeAffinity:
-        #   requiredDuringSchedulingIgnoredDuringExecution:
-        #     nodeSelectorTerms:
-        #     - matchExpressions:
-        #       - key: kubernetes.io/e2e-az-name
-        #         operator: In
-        #         values:
-        #         - e2e-az1
-      #         - e2e-az2
-      dnsConfig: {}
-        # nameservers:
-        #   - 1.2.3.4
-        # searches:
-        #   - ns1.svc.cluster-domain.example
-        #   - my.dns.search.suffix
-        # options:
-        #   - name: ndots
-        #     value: "2"
-        #   - name: edns0
-      securityContext:
-        fsGroup: 65534
-        runAsGroup: 65534
-        runAsNonRoot: true
-        runAsUser: 65534
-        seccompProfile:
-          type: RuntimeDefault
-
-      ## Container-specific security context configuration
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-      ##
-      containerSecurityContext:
-        allowPrivilegeEscalation: false
-        readOnlyRootFilesystem: true
-        capabilities:
-          drop:
-            - ALL
-
-      ## If false then the user will opt out of automounting API credentials.
-      ##
-      automountServiceAccountToken: true
-
-    patch:
-      enabled: true
-      image:
-        registry: registry.k8s.io
-        repository: ingress-nginx/kube-webhook-certgen
-        tag: v1.5.1  # latest tag: https://github.com/kubernetes/ingress-nginx/blob/main/images/kube-webhook-certgen/TAG
-        sha: ""
-        pullPolicy: IfNotPresent
-      resources: {}
-      ## Provide a priority class name to the webhook patching job
-      ##
-      priorityClassName: ""
-      ttlSecondsAfterFinished: 60
-      annotations: {}
-      #   argocd.argoproj.io/hook: PreSync
-      #   argocd.argoproj.io/hook-delete-policy: HookSucceeded
-      podAnnotations: {}
-      nodeSelector: {}
-      affinity: {}
-      tolerations: []
-
-      ## SecurityContext holds pod-level security attributes and common container settings.
-      ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext  false
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-      ##
-      securityContext:
-        runAsGroup: 2000
-        runAsNonRoot: true
-        runAsUser: 2000
-        seccompProfile:
-          type: RuntimeDefault
-      ## Service account for Prometheus Operator Webhook Job Patch to use.
-      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-      ##
-      serviceAccount:
-        create: true
-        annotations: {}
-        automountServiceAccountToken: true
-
-    # Security context for create job container
-    createSecretJob:
-      securityContext:
-        allowPrivilegeEscalation: false
-        readOnlyRootFilesystem: true
-        capabilities:
-          drop:
-          - ALL
-
-      # Security context for patch job container
-    patchWebhookJob:
-      securityContext:
-        allowPrivilegeEscalation: false
-        readOnlyRootFilesystem: true
-        capabilities:
-          drop:
-          - ALL
-
-    # Use certmanager to generate webhook certs
-    certManager:
-      enabled: false
-      # self-signed root certificate
-      rootCert:
-        duration: ""  # default to be 5y
-      admissionCert:
-        duration: ""  # default to be 1y
-      # issuerRef:
-      #   name: "issuer"
-      #   kind: "ClusterIssuer"
-
-  ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
-  ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
-  ##
-  namespaces: {}
-    # releaseNamespace: true
-    # additional:
-    # - kube-system
-
-  ## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
-  ##
-  denyNamespaces: []
-
-  ## Filter namespaces to look for prometheus-operator custom resources
-  ##
-  alertmanagerInstanceNamespaces: []
-  alertmanagerConfigNamespaces: []
-  prometheusInstanceNamespaces: []
-  thanosRulerInstanceNamespaces: []
-
-  ## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
-  ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
-  ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
-  ##
-  # clusterDomain: "cluster.local"
-
-  networkPolicy:
-    ## Enable creation of NetworkPolicy resources.
-    ##
-    enabled: false
-
-    ## Flavor of the network policy to use.
-    #  Can be:
-    #  * kubernetes for networking.k8s.io/v1/NetworkPolicy
-    #  * cilium     for cilium.io/v2/CiliumNetworkPolicy
-    flavor: kubernetes
-
-    # cilium:
-    #   egress:
-
-    ## match labels used in selector
-    # matchLabels: {}
-
-  ## Service account for Prometheus Operator to use.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-  ##
-  serviceAccount:
-    create: true
-    name: ""
-    automountServiceAccountToken: true
-    annotations: {}
-
-  # -- terminationGracePeriodSeconds for container lifecycle hook
-  terminationGracePeriodSeconds: 30
-  # -- Specify lifecycle hooks for the  controller
-  lifecycle: {}
-  ## Configuration for Prometheus operator service
-  ##
-  service:
-    annotations: {}
-    labels: {}
-    clusterIP: ""
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-
-  ## Port to expose on each node
-  ## Only used if service.type is 'NodePort'
-  ##
-    nodePort: 30080
-
-    nodePortTls: 30443
-
-  ## Additional ports to open for Prometheus operator service
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
-  ##
-    additionalPorts: []
-
-  ## Loadbalancer IP
-  ## Only use if service.type is "LoadBalancer"
-  ##
-    loadBalancerIP: ""
-    loadBalancerSourceRanges: []
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-  ## Service type
-  ## NodePort, ClusterIP, LoadBalancer
-  ##
-    type: ClusterIP
-
-    ## List of IP addresses at which the Prometheus server service is available
-    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
-    ##
-    externalIPs: []
-
-  # ## Labels to add to the operator deployment
-  # ##
-  labels: {}
-
-  ## Annotations to add to the operator deployment
-  ##
-  annotations: {}
-
-  ## Labels to add to the operator pod
-  ##
-  podLabels: {}
-
-  ## Annotations to add to the operator pod
-  ##
-  podAnnotations: {}
-
-  ## Assign a PriorityClassName to pods if set
-  # priorityClassName: ""
-
-  ## Define Log Format
-  # Use logfmt (default) or json logging
-  # logFormat: logfmt
-
-  ## Decrease log verbosity to errors only
-  # logLevel: error
-
-  kubeletService:
-    ## If true, the operator will create and maintain a service for scraping kubelets
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
-    ##
-    enabled: true
-    namespace: kube-system
-    selector: ""
-    name: ""
-
-  ## Create Endpoints objects for kubelet targets.
-  kubeletEndpointsEnabled: true
-  ## Create EndpointSlice objects for kubelet targets.
-  kubeletEndpointSliceEnabled: false
-
-  ## Extra arguments to pass to prometheusOperator
-  # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/operator.md
-  extraArgs: []
-  #  - --labels="cluster=talos-cluster"
-
-  ## Create a servicemonitor for the operator
-  ##
-  serviceMonitor:
-    ## If true, create a serviceMonitor for prometheus operator
-    ##
-    selfMonitor: true
-
-    ## Labels for ServiceMonitor
-    additionalLabels: {}
-
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
-    scrapeTimeout: ""
-
-    ## Metric relabel configs to apply to samples before ingestion.
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    #   relabel configs to apply to samples before ingestion.
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-  ## Resource limits & requests
-  ##
-  resources: {}
-  # limits:
-  #   cpu: 200m
-  #   memory: 200Mi
-  # requests:
-  #   cpu: 100m
-  #   memory: 100Mi
-
-  ## Operator Environment
-  ##  env:
-  ##    VARIABLE: value
-  env:
-    GOGC: "30"
-
-  # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
-  # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
-  ##
-  hostNetwork: false
-
-  ## Define which Nodes the Pods are scheduled on.
-  ## ref: https://kubernetes.io/docs/user-guide/node-selection/
-  ##
-  nodeSelector: {}
-
-  ## Tolerations for use with node taints
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  # - key: "key"
-  #   operator: "Equal"
-  #   value: "value"
-  #   effect: "NoSchedule"
-
-  ## Assign custom affinity rules to the prometheus operator
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-  ##
-  affinity: {}
-    # nodeAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     nodeSelectorTerms:
-    #     - matchExpressions:
-    #       - key: kubernetes.io/e2e-az-name
-    #         operator: In
-    #         values:
-    #         - e2e-az1
-    #         - e2e-az2
-  dnsConfig: {}
-    # nameservers:
-    #   - 1.2.3.4
-    # searches:
-    #   - ns1.svc.cluster-domain.example
-    #   - my.dns.search.suffix
-    # options:
-    #   - name: ndots
-    #     value: "2"
-  #   - name: edns0
-  securityContext:
-    fsGroup: 65534
-    runAsGroup: 65534
-    runAsNonRoot: true
-    runAsUser: 65534
-    seccompProfile:
-      type: RuntimeDefault
-
-  ## Container-specific security context configuration
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-  ##
-  containerSecurityContext:
-    allowPrivilegeEscalation: false
-    readOnlyRootFilesystem: true
-    capabilities:
-      drop:
-      - ALL
-
-  # Enable vertical pod autoscaler support for prometheus-operator
-  verticalPodAutoscaler:
-    enabled: false
-
-    # Recommender responsible for generating recommendation for the object.
-    # List should be empty (then the default recommender will generate the recommendation)
-    # or contain exactly one recommender.
-    # recommenders:
-    # - name: custom-recommender-performance
-
-    # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
-    controlledResources: []
-    # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
-    # controlledValues: RequestsAndLimits
-
-    # Define the max allowed resources for the pod
-    maxAllowed: {}
-    # cpu: 200m
-    # memory: 100Mi
-    # Define the min allowed resources for the pod
-    minAllowed: {}
-    # cpu: 200m
-    # memory: 100Mi
-
-    updatePolicy:
-      # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
-      # minReplicas: 1
-      # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
-      # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
-      updateMode: Auto
-
-  ## Prometheus-operator image
-  ##
-  image:
-    registry: quay.io
-    repository: prometheus-operator/prometheus-operator
-    # if not set appVersion field from Chart.yaml is used
-    tag: ""
-    sha: ""
-    pullPolicy: IfNotPresent
-
-  ## Prometheus image to use for prometheuses managed by the operator
-  ##
-  # prometheusDefaultBaseImage: prometheus/prometheus
-
-  ## Prometheus image registry to use for prometheuses managed by the operator
-  ##
-  # prometheusDefaultBaseImageRegistry: quay.io
-
-  ## Alertmanager image to use for alertmanagers managed by the operator
-  ##
-  # alertmanagerDefaultBaseImage: prometheus/alertmanager
-
-  ## Alertmanager image registry to use for alertmanagers managed by the operator
-  ##
-  # alertmanagerDefaultBaseImageRegistry: quay.io
-
-  ## Prometheus-config-reloader
-  ##
-  prometheusConfigReloader:
-    image:
-      registry: quay.io
-      repository: prometheus-operator/prometheus-config-reloader
-      # if not set appVersion field from Chart.yaml is used
-      tag: ""
-      sha: ""
-
-    # add prometheus config reloader liveness and readiness probe. Default: false
-    enableProbe: false
-
-    # resource config for prometheusConfigReloader
-    resources: {}
-      # requests:
-      #   cpu: 200m
-      #   memory: 50Mi
-      # limits:
-      #   cpu: 200m
-      #   memory: 50Mi
-
-  ## Thanos side-car image when configured
-  ##
-  thanosImage:
-    registry: quay.io
-    repository: thanos/thanos
-    tag: v0.37.2
-    sha: ""
-
-  ## Set a Label Selector to filter watched prometheus and prometheusAgent
-  ##
-  prometheusInstanceSelector: ""
-
-  ## Set a Label Selector to filter watched alertmanager
-  ##
-  alertmanagerInstanceSelector: ""
-
-  ## Set a Label Selector to filter watched thanosRuler
-  thanosRulerInstanceSelector: ""
-
-  ## Set a Field Selector to filter watched secrets
-  ##
-  secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1"
-
-  ## If false then the user will opt out of automounting API credentials.
-  ##
-  automountServiceAccountToken: true
-
-  ## Additional volumes
-  ##
-  extraVolumes: []
-
-  ## Additional volume mounts
-  ##
-  extraVolumeMounts: []
-
-## Deploy a Prometheus instance
-##
-prometheus:
-  enabled: true
-
-  ## Toggle prometheus into agent mode
-  ## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode.
-  ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/designs/prometheus-agent.md
-  ##
-  agentMode: false
-
-  ## Annotations for Prometheus
-  ##
-  annotations: {}
-
-  ## Configure network policy for the prometheus
-  networkPolicy:
-    enabled: false
-
-    ## Flavor of the network policy to use.
-    #  Can be:
-    #  * kubernetes for networking.k8s.io/v1/NetworkPolicy
-    #  * cilium     for cilium.io/v2/CiliumNetworkPolicy
-    flavor: kubernetes
-
-    # cilium:
-    #   endpointSelector:
-    #   egress:
-    #   ingress:
-
-    # egress:
-    # - {}
-    # ingress:
-    # - {}
-    # podSelector:
-    #   matchLabels:
-    #     app: prometheus
-
-  ## Service account for Prometheuses to use.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-  ##
-  serviceAccount:
-    create: true
-    name: ""
-    annotations: {}
-    automountServiceAccountToken: true
-
-  # Service for thanos service discovery on sidecar
-  # Enable this can make Thanos Query can use
-  # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
-  # Thanos sidecar on prometheus nodes
-  # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
-  thanosService:
-    enabled: false
-    annotations: {}
-    labels: {}
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-    ## Service type
-    ##
-    type: ClusterIP
-
-    ## Service dual stack
-    ##
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-
-    ## gRPC port config
-    portName: grpc
-    port: 10901
-    targetPort: "grpc"
-
-    ## HTTP port config (for metrics)
-    httpPortName: http
-    httpPort: 10902
-    targetHttpPort: "http"
-
-    ## ClusterIP to assign
-    # Default is to make this a headless service ("None")
-    clusterIP: "None"
-
-    ## Port to expose on each node, if service type is NodePort
-    ##
-    nodePort: 30901
-    httpNodePort: 30902
-
-  # ServiceMonitor to scrape Sidecar metrics
-  # Needs thanosService to be enabled as well
-  thanosServiceMonitor:
-    enabled: false
-    interval: ""
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-
-    ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
-    scheme: ""
-
-    ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
-    ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
-    tlsConfig: {}
-
-    bearerTokenFile:
-
-    ## Metric relabel configs to apply to samples before ingestion.
-    metricRelabelings: []
-
-    ## relabel configs to apply to samples before ingestion.
-    relabelings: []
-
-  # Service for external access to sidecar
-  # Enabling this creates a service to expose thanos-sidecar outside the cluster.
-  thanosServiceExternal:
-    enabled: false
-    annotations: {}
-    labels: {}
-    loadBalancerIP: ""
-    loadBalancerSourceRanges: []
-
-    ## gRPC port config
-    portName: grpc
-    port: 10901
-    targetPort: "grpc"
-
-    ## HTTP port config (for metrics)
-    httpPortName: http
-    httpPort: 10902
-    targetHttpPort: "http"
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-    ## Service type
-    ##
-    type: LoadBalancer
-
-    ## Port to expose on each node
-    ##
-    nodePort: 30901
-    httpNodePort: 30902
-
-  ## Configuration for Prometheus service
-  ##
-  service:
-    annotations: {}
-    labels: {}
-    clusterIP: ""
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-
-    ## Port for Prometheus Service to listen on
-    ##
-    port: 9090
-
-    ## To be used with a proxy extraContainer port
-    targetPort: 9090
-
-    ## Port for Prometheus Reloader to listen on
-    ##
-    reloaderWebPort: 8080
-
-    ## List of IP addresses at which the Prometheus server service is available
-    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
-    ##
-    externalIPs: []
-
-    ## Port to expose on each node
-    ## Only used if service.type is 'NodePort'
-    ##
-    nodePort: 30090
-
-    ## Loadbalancer IP
-    ## Only use if service.type is "LoadBalancer"
-    loadBalancerIP: ""
-    loadBalancerSourceRanges: []
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-    ## Service type
-    ##
-    type: ClusterIP
-
-    ## Additional ports to open for Prometheus service
-    ##
-    additionalPorts: []
-    # additionalPorts:
-    # - name: oauth-proxy
-    #   port: 8081
-    #   targetPort: 8081
-    # - name: oauth-metrics
-    #   port: 8082
-    #   targetPort: 8082
-
-    ## Consider that all endpoints are considered "ready" even if the Pods themselves are not
-    ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
-    publishNotReadyAddresses: false
-
-    ## If you want to make sure that connections from a particular client are passed to the same Pod each time
-    ## Accepts 'ClientIP' or 'None'
-    ##
-    sessionAffinity: None
-
-    ## If you want to modify the ClientIP sessionAffinity timeout
-    ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
-    ##
-    sessionAffinityConfig:
-      clientIP:
-        timeoutSeconds: 10800
-
-  ## Configuration for creating a separate Service for each statefulset Prometheus replica
-  ##
-  servicePerReplica:
-    enabled: false
-    annotations: {}
-
-    ## Port for Prometheus Service per replica to listen on
-    ##
-    port: 9090
-
-    ## To be used with a proxy extraContainer port
-    targetPort: 9090
-
-    ## Port to expose on each node
-    ## Only used if servicePerReplica.type is 'NodePort'
-    ##
-    nodePort: 30091
-
-    ## Loadbalancer source IP ranges
-    ## Only used if servicePerReplica.type is "LoadBalancer"
-    loadBalancerSourceRanges: []
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-    ## Service type
-    ##
-    type: ClusterIP
-
-    ## Service dual stack
-    ##
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-
-  ## Configure pod disruption budgets for Prometheus
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
-  ##
-  podDisruptionBudget:
-    enabled: false
-    minAvailable: 1
-    maxUnavailable: ""
-
-  # Ingress exposes thanos sidecar outside the cluster
-  thanosIngress:
-    enabled: false
-
-    # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
-    # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
-    # ingressClassName: nginx
-
-    annotations: {}
-    labels: {}
-    servicePort: 10901
-
-    ## Port to expose on each node
-    ## Only used if service.type is 'NodePort'
-    ##
-    nodePort: 30901
-
-    ## Hosts must be provided if Ingress is enabled.
-    ##
-    hosts: []
-      # - thanos-gateway.domain.com
-
-    ## Paths to use for ingress rules
-    ##
-    paths: []
-    # - /
-
-    ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
-    ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
-    # pathType: ImplementationSpecific
-
-    ## TLS configuration for Thanos Ingress
-    ## Secret must be manually created in the namespace
-    ##
-    tls: []
-    # - secretName: thanos-gateway-tls
-    #   hosts:
-    #   - thanos-gateway.domain.com
-    #
-
-  ## ExtraSecret can be used to store various data in an extra secret
-  ## (use it for example to store hashed basic auth credentials)
-  extraSecret:
-    ## if not set, name will be auto generated
-    # name: ""
-    annotations: {}
-    data: {}
-  #   auth: |
-  #     foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
-  #     someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
-
-  ingress:
-    enabled: false
-
-    # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
-    # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
-    # ingressClassName: nginx
-
-    annotations: {}
-    labels: {}
-
-    ## Redirect ingress to an additional defined port on the service
-    # servicePort: 8081
-
-    ## Hostnames.
-    ## Must be provided if Ingress is enabled.
-    ##
-    # hosts:
-    #   - prometheus.domain.com
-    hosts: []
-
-    ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
-    ##
-    paths: []
-    # - /
-
-    ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
-    ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
-    # pathType: ImplementationSpecific
-
-    ## TLS configuration for Prometheus Ingress
-    ## Secret must be manually created in the namespace
-    ##
-    tls: []
-      # - secretName: prometheus-general-tls
-      #   hosts:
-      #     - prometheus.example.com
-
-  # -- BETA: Configure the gateway routes for the chart here.
-  # More routes can be added by adding a dictionary key like the 'main' route.
-  # Be aware that this is an early beta of this feature,
-  # kube-prometheus-stack does not guarantee this works and is subject to change.
-  # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
-  # [[ref]](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1alpha2)
-  route:
-    main:
-      # -- Enables or disables the route
-      enabled: false
-
-      # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
-      apiVersion: gateway.networking.k8s.io/v1
-      # -- Set the route kind
-      # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
-      kind: HTTPRoute
-
-      annotations: {}
-      labels: {}
-
-      hostnames: []
-      # - my-filter.example.com
-      parentRefs: []
-      # - name: acme-gw
-
-      matches:
-      - path:
-          type: PathPrefix
-          value: /
-
-      ## Filters define the filters that are applied to requests that match this rule.
-      filters: []
-
-      ## Additional custom rules that can be added to the route
-      additionalRules: []
-
-  ## Configuration for creating an Ingress that will map to each Prometheus replica service
-  ## prometheus.servicePerReplica must be enabled
-  ##
-  ingressPerReplica:
-    enabled: false
-
-    # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
-    # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
-    # ingressClassName: nginx
-
-    annotations: {}
-    labels: {}
-
-    ## Final form of the hostname for each per replica ingress is
-    ##
-    ## Prefix for the per replica ingress that will have `-$replicaNumber`
-    ## appended to the end
-    hostPrefix: ""
-    ## Domain that will be used for the per replica ingress
-    hostDomain: ""
-
-    ## Paths to use for ingress rules
-    ##
-    paths: []
-    # - /
-
-    ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
-    ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
-    # pathType: ImplementationSpecific
-
-    ## Secret name containing the TLS certificate for Prometheus per replica ingress
-    ## Secret must be manually created in the namespace
-    tlsSecretName: ""
-
-    ## Separated secret for each per replica Ingress. Can be used together with cert-manager
-    ##
-    tlsSecretPerReplica:
-      enabled: false
-      ## Final form of the secret for each per replica ingress is
-      ##
-      prefix: "prometheus"
-
-  ## Configure additional options for default pod security policy for Prometheus
-  ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
-  podSecurityPolicy:
-    allowedCapabilities: []
-    allowedHostPaths: []
-    volumes: []
-
-  serviceMonitor:
-    ## If true, create a serviceMonitor for prometheus
-    ##
-    selfMonitor: true
-
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
-    scheme: ""
-
-    ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
-    ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
-    tlsConfig: {}
-
-    bearerTokenFile:
-
-    ## Metric relabel configs to apply to samples before ingestion.
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    #   relabel configs to apply to samples before ingestion.
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional Endpoints
-    ##
-    additionalEndpoints: []
-    # - port: oauth-metrics
-    #   path: /metrics
-
-  ## Settings affecting prometheusSpec
-  ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec
-  ##
-  prometheusSpec:
-    ## Statefulset's persistent volume claim retention policy
-    ## whenDeleted and whenScaled determine whether
-    ## statefulset's PVCs are deleted (true) or retained (false)
-    ## on scaling down and deleting statefulset, respectively.
-    ## Requires Kubernetes version 1.27.0+.
-    ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-    persistentVolumeClaimRetentionPolicy: {}
-    #  whenDeleted: Retain
-    #  whenScaled: Retain
-
-    ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
-    ##
-    ## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod,
-    ## If the field isn’t set, the operator mounts the service account token by default.
-    ## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery,
-    ## It is possible to use strategic merge patch to project the service account token into the ‘prometheus’ container.
-    automountServiceAccountToken: true
-
-    disableCompaction: false
-    ## APIServerConfig
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig
-    ##
-    apiserverConfig: {}
-
-    ## Allows setting additional arguments for the Prometheus container
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus
-    additionalArgs: []
-
-    ## Interval between consecutive scrapes.
-    ## Defaults to 30s.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
-    ##
-    scrapeInterval: ""
-
-    ## Number of seconds to wait for target to respond before erroring
-    ##
-    scrapeTimeout: ""
-
-    ## List of scrape classes to expose to scraping objects such as
-    ## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs.
-    ##
-    scrapeClasses: []
-    # - name: istio-mtls
-    #   default: false
-    #   tlsConfig:
-    #     caFile: /etc/prometheus/secrets/istio.default/root-cert.pem
-    #     certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem
-
-    ## Interval between consecutive evaluations.
-    ##
-    evaluationInterval: ""
-
-    ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
-    ##
-    listenLocal: false
-
-    ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
-    ## This is disabled by default.
-    ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
-    ##
-    enableAdminAPI: false
-
-    ## Sets version of Prometheus overriding the Prometheus version as derived
-    ## from the image tag. Useful in cases where the tag does not follow semver v2.
-    version: ""
-
-    ## WebTLSConfig defines the TLS parameters for HTTPS
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig
-    web: {}
-
-    ## Exemplars related settings that are runtime reloadable.
-    ## It requires to enable the exemplar storage feature to be effective.
-    exemplars: {}
-      ## Maximum number of exemplars stored in memory for all series.
-      ## If not set, Prometheus uses its default value.
-      ## A value of zero or less than zero disables the storage.
-      # maxSize: 100000
-
-    # EnableFeatures API enables access to Prometheus disabled features.
-    # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
-    enableFeatures: []
-    # - exemplar-storage
-
-    ## Image of Prometheus.
-    ##
-    image:
-      registry: quay.io
-      repository: prometheus/prometheus
-      tag: v3.1.0
-      sha: ""
-
-    ## Tolerations for use with node taints
-    ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-    ##
-    tolerations: []
-    #  - key: "key"
-    #    operator: "Equal"
-    #    value: "value"
-    #    effect: "NoSchedule"
-
-    ## If specified, the pod's topology spread constraints.
-    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-    ##
-    topologySpreadConstraints: []
-    # - maxSkew: 1
-    #   topologyKey: topology.kubernetes.io/zone
-    #   whenUnsatisfiable: DoNotSchedule
-    #   labelSelector:
-    #     matchLabels:
-    #       app: prometheus
-
-    ## Alertmanagers to which alerts will be sent
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints
-    ##
-    ## Default configuration will connect to the alertmanager deployed as part of this release
-    ##
-    alertingEndpoints: []
-    # - name: ""
-    #   namespace: ""
-    #   port: http
-    #   scheme: http
-    #   pathPrefix: ""
-    #   tlsConfig: {}
-    #   bearerTokenFile: ""
-    #   apiVersion: v2
-
-    ## External labels to add to any time series or alerts when communicating with external systems
-    ##
-    externalLabels: {}
-
-    ## enable --web.enable-remote-write-receiver flag on prometheus-server
-    ##
-    enableRemoteWriteReceiver: false
-
-    ## Name of the external label used to denote replica name
-    ##
-    replicaExternalLabelName: ""
-
-    ## If true, the Operator won't add the external label used to denote replica name
-    ##
-    replicaExternalLabelNameClear: false
-
-    ## Name of the external label used to denote Prometheus instance name
-    ##
-    prometheusExternalLabelName: ""
-
-    ## If true, the Operator won't add the external label used to denote Prometheus instance name
-    ##
-    prometheusExternalLabelNameClear: false
-
-    ## External URL at which Prometheus will be reachable.
-    ##
-    externalUrl: ""
-
-    ## Define which Nodes the Pods are scheduled on.
-    ## ref: https://kubernetes.io/docs/user-guide/node-selection/
-    ##
-    nodeSelector: {}
-
-    ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
-    ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
-    ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
-    ## with the new list of secrets.
-    ##
-    secrets: []
-
-    ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
-    ## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
-    ##
-    configMaps: []
-
-    ## QuerySpec defines the query command line flags when starting Prometheus.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec
-    ##
-    query: {}
-
-    ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
-    ruleNamespaceSelector: {}
-    ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel"
-    # ruleNamespaceSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
-    ## prometheus resource to be created with selectors based on values in the helm deployment,
-    ## which will also match the PrometheusRule resources created
-    ##
-    ruleSelectorNilUsesHelmValues: true
-
-    ## PrometheusRules to be selected for target discovery.
-    ## If {}, select all PrometheusRules
-    ##
-    ruleSelector: {}
-    ## Example which select all PrometheusRules resources
-    ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
-    # ruleSelector:
-    #   matchExpressions:
-    #     - key: prometheus
-    #       operator: In
-    #       values:
-    #         - example-rules
-    #         - example-rules-2
-    #
-    ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
-    # ruleSelector:
-    #   matchLabels:
-    #     role: example-rules
-
-    ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
-    ## prometheus resource to be created with selectors based on values in the helm deployment,
-    ## which will also match the servicemonitors created
-    ##
-    serviceMonitorSelectorNilUsesHelmValues: true
-
-    ## ServiceMonitors to be selected for target discovery.
-    ## If {}, select all ServiceMonitors
-    ##
-    serviceMonitorSelector: {}
-    ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
-    # serviceMonitorSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## Namespaces to be selected for ServiceMonitor discovery.
-    ##
-    serviceMonitorNamespaceSelector: {}
-    ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
-    # serviceMonitorNamespaceSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
-    ## prometheus resource to be created with selectors based on values in the helm deployment,
-    ## which will also match the podmonitors created
-    ##
-    podMonitorSelectorNilUsesHelmValues: true
-
-    ## PodMonitors to be selected for target discovery.
-    ## If {}, select all PodMonitors
-    ##
-    podMonitorSelector: {}
-    ## Example which selects PodMonitors with label "prometheus" set to "somelabel"
-    # podMonitorSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
-    podMonitorNamespaceSelector: {}
-    ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel"
-    # podMonitorNamespaceSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
-    ## prometheus resource to be created with selectors based on values in the helm deployment,
-    ## which will also match the probes created
-    ##
-    probeSelectorNilUsesHelmValues: true
-
-    ## Probes to be selected for target discovery.
-    ## If {}, select all Probes
-    ##
-    probeSelector: {}
-    ## Example which selects Probes with label "prometheus" set to "somelabel"
-    # probeSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## If nil, select own namespace. Namespaces to be selected for Probe discovery.
-    probeNamespaceSelector: {}
-    ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel"
-    # probeNamespaceSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the
-    ## prometheus resource to be created with selectors based on values in the helm deployment,
-    ## which will also match the scrapeConfigs created
-    ##
-    ## If null and scrapeConfigSelector is also null, exclude field from the prometheusSpec
-    ## (keeping downward compatibility with older versions of CRD)
-    ##
-    scrapeConfigSelectorNilUsesHelmValues: true
-
-    ## scrapeConfigs to be selected for target discovery.
-    ## If {}, select all scrapeConfigs
-    ##
-    scrapeConfigSelector: {}
-    ## Example which selects scrapeConfigs with label "prometheus" set to "somelabel"
-    # scrapeConfigSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery.
-    ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD)
-    scrapeConfigNamespaceSelector: {}
-    ## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel"
-    # scrapeConfigNamespaceSelector:
-    #   matchLabels:
-    #     prometheus: somelabel
-
-    ## How long to retain metrics
-    ##
-    retention: 10d
-
-    ## Maximum size of metrics
-    ##
-    retentionSize: ""
-
-    ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration
-    ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb
-    tsdb:
-      outOfOrderTimeWindow: 0s
-
-    ## Enable compression of the write-ahead log using Snappy.
-    ##
-    walCompression: true
-
-    ## If true, the Operator won't process any Prometheus configuration changes
-    ##
-    paused: false
-
-    ## Number of replicas of each shard to deploy for a Prometheus deployment.
-    ## Number of replicas multiplied by shards is the total number of Pods created.
-    ##
-    replicas: 1
-
-    ## EXPERIMENTAL: Number of shards to distribute targets onto.
-    ## Number of replicas multiplied by shards is the total number of Pods created.
-    ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
-    ## Increasing shards will not reshard data either but it will continue to be available from the same instances.
-    ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
-    ## Sharding is done on the content of the `__address__` target meta-label.
-    ##
-    shards: 1
-
-    ## Log level for Prometheus be configured in
-    ##
-    logLevel: info
-
-    ## Log format for Prometheus be configured in
-    ##
-    logFormat: logfmt
-
-    ## Prefix used to register routes, overriding externalUrl route.
-    ## Useful for proxies that rewrite URLs.
-    ##
-    routePrefix: /
-
-    ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
-    ## Metadata Labels and Annotations gets propagated to the prometheus pods.
-    ##
-    podMetadata: {}
-    # labels:
-    #   app: prometheus
-    #   k8s-app: prometheus
-
-    ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
-    ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
-    ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
-    ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
-    podAntiAffinity: "soft"
-
-    ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
-    ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
-    ##
-    podAntiAffinityTopologyKey: kubernetes.io/hostname
-
-    ## Assign custom affinity rules to the prometheus instance
-    ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-    ##
-    affinity: {}
-    # nodeAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     nodeSelectorTerms:
-    #     - matchExpressions:
-    #       - key: kubernetes.io/e2e-az-name
-    #         operator: In
-    #         values:
-    #         - e2e-az1
-    #         - e2e-az2
-
-    ## The remote_read spec configuration for Prometheus.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec
-    remoteRead: []
-    # - url: http://remote1/read
-    ## additionalRemoteRead is appended to remoteRead
-    additionalRemoteRead: []
-
-    ## The remote_write spec configuration for Prometheus.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec
-    remoteWrite: []
-    # - url: http://remote1/push
-    ## additionalRemoteWrite is appended to remoteWrite
-    additionalRemoteWrite: []
-
-    ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
-    remoteWriteDashboards: false
-
-    ## Resource limits & requests
-    ##
-    resources: {}
-    # requests:
-    #   memory: 400Mi
-
-    ## Prometheus StorageSpec for persistent data
-    storageSpec:
-    ## Using PersistentVolumeClaim
-    ##
-     volumeClaimTemplate:
-       spec:
-         storageClassName: {{ .Values.globals.kubePrometheusStack.storageClass }}
-         accessModes: ["ReadWriteOnce"]
-         resources:
-           requests:
-             storage: {{ .Values.globals.kubePrometheusStack.prometheus.storageSize }}
-       selector: {}
-
-    ## Using tmpfs volume
-    ##
-    #  emptyDir:
-    #    medium: Memory
-
-    # Additional volumes on the output StatefulSet definition.
-    volumes: []
-
-    # Additional VolumeMounts on the output StatefulSet definition.
-    volumeMounts: []
-
-    ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
-    ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
-    ## as specified in the official Prometheus documentation:
-    ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
-    ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
-    ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
-    ## scrape configs are going to break Prometheus after the upgrade.
-    ## AdditionalScrapeConfigs can be defined as a list or as a templated string.
-    ##
-    ## The scrape configuration example below will find master nodes, provided they have the name mst, relabel the
-    ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
-    ##
-    additionalScrapeConfigs: []
-    # - job_name: kube-etcd
-    #   kubernetes_sd_configs:
-    #     - role: node
-    #   scheme: https
-    #   tls_config:
-    #     ca_file:   /etc/prometheus/secrets/etcd-client-cert/etcd-ca
-    #     cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
-    #     key_file:  /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
-    #   relabel_configs:
-    #   - action: labelmap
-    #     regex: __meta_kubernetes_node_label_(.+)
-    #   - source_labels: [__address__]
-    #     action: replace
-    #     targetLabel: __address__
-    #     regex: ([^:;]+):(\d+)
-    #     replacement: ${1}:2379
-    #   - source_labels: [__meta_kubernetes_node_name]
-    #     action: keep
-    #     regex: mst
-    #   - source_labels: [__meta_kubernetes_node_name]
-    #     action: replace
-    #     targetLabel: node
-    #     regex: ()
-    #     replacement: ${1}
-    #   metric_relabel_configs:
-    #   - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
-    #     action: labeldrop
-    #
-    ## If scrape config contains a repetitive section, you may want to use a template.
-    ## In the following example, you can see how to define `gce_sd_configs` for multiple zones
-    # additionalScrapeConfigs: |
-    #  - job_name: "node-exporter"
-    #    gce_sd_configs:
-    #    relabel_configs:
-    #    ...
-
-
-    ## If additional scrape configurations are already deployed in a single secret file you can use this section.
-    ## Expected values are the secret name and key
-    ## Cannot be used with additionalScrapeConfigs
-    additionalScrapeConfigsSecret: {}
-      # enabled: false
-      # name:
-      # key:
-
-    ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
-    ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
-    additionalPrometheusSecretsAnnotations: {}
-
-    ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
-    ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
-    ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
-    ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
-    ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
-    ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
-    ##
-    additionalAlertManagerConfigs: []
-    # - consul_sd_configs:
-    #   - server: consul.dev.test:8500
-    #     scheme: http
-    #     datacenter: dev
-    #     tag_separator: ','
-    #     services:
-    #       - metrics-prometheus-alertmanager
-
-    ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
-    ## them separately from the helm deployment, you can use this section.
-    ## Expected values are the secret name and key
-    ## Cannot be used with additionalAlertManagerConfigs
-    additionalAlertManagerConfigsSecret: {}
-      # name:
-      # key:
-      # optional: false
-
-    ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
-    ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
-    ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
-    ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
-    ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
-    ## configs are going to break Prometheus after the upgrade.
-    ##
-    additionalAlertRelabelConfigs: []
-    # - separator: ;
-    #   regex: prometheus_replica
-    #   replacement: $1
-    #   action: labeldrop
-
-    ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
-    ## them separately from the helm deployment, you can use this section.
-    ## Expected values are the secret name and key
-    ## Cannot be used with additionalAlertRelabelConfigs
-    additionalAlertRelabelConfigsSecret: {}
-      # name:
-      # key:
-
-    ## SecurityContext holds pod-level security attributes and common container settings.
-    ## This defaults to non root user with uid 1000 and gid 2000.
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md
-    ##
-    securityContext:
-      runAsGroup: 2000
-      runAsNonRoot: true
-      runAsUser: 1000
-      fsGroup: 2000
-      seccompProfile:
-        type: RuntimeDefault
-
-    ## Priority class assigned to the Pods
-    ##
-    priorityClassName: ""
-
-    ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
-    ## This section is experimental, it may change significantly without deprecation notice in any release.
-    ## This is experimental and may change significantly without backward compatibility in any release.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec
-    ##
-    thanos: {}
-      # secretProviderClass:
-      #   provider: gcp
-      #   parameters:
-      #     secrets: |
-      #       - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
-      #         fileName: "objstore.yaml"
-      ## ObjectStorageConfig configures object storage in Thanos.
-      # objectStorageConfig:
-      #   # use existing secret, if configured, objectStorageConfig.secret will not be used
-      #   existingSecret: {}
-      #     # name: ""
-      #     # key: ""
-      #   # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource,
-      #   # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set
-      #   # https://thanos.io/tip/thanos/storage.md/#s3
-      #   secret: {}
-      #     # type: S3
-      #     # config:
-      #     #   bucket: ""
-      #     #   endpoint: ""
-      #     #   region: ""
-      #     #   access_key: ""
-      #     #   secret_key: ""
-
-    ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
-    ## if using proxy extraContainer update targetPort with proxy container port
-    containers: []
-    # containers:
-    # - name: oauth-proxy
-    #   image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1
-    #   args:
-    #   - --upstream=http://127.0.0.1:9090
-    #   - --http-address=0.0.0.0:8081
-    #   - --metrics-address=0.0.0.0:8082
-    #   - ...
-    #   ports:
-    #   - containerPort: 8081
-    #     name: oauth-proxy
-    #     protocol: TCP
-    #   - containerPort: 8082
-    #     name: oauth-metrics
-    #     protocol: TCP
-    #   resources: {}
-
-    ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
-    ## (permissions, dir tree) on mounted volumes before starting prometheus
-    initContainers: []
-
-    ## PortName to use for Prometheus.
-    ##
-    portName: "http-web"
-
-    ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
-    ## on the file system of the Prometheus container e.g. bearer token files.
-    arbitraryFSAccessThroughSMs: false
-
-    ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
-    ## or PodMonitor to true, this overrides honor_labels to false.
-    overrideHonorLabels: false
-
-    ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
-    overrideHonorTimestamps: false
-
-    ## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored,
-    ## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object,
-    ## and servicemonitors will be installed in the default service namespace.
-    ## Defaults to false.
-    ignoreNamespaceSelectors: false
-
-    ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
-    ## The label value will always be the namespace of the object that is being created.
-    ## Disabled by default
-    enforcedNamespaceLabel: ""
-
-    ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
-    ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
-    ## Deprecated, use `excludedFromEnforcement` instead
-    prometheusRulesExcludedFromEnforce: []
-
-    ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
-    ## to be excluded from enforcing a namespace label of origin.
-    ## Works only if enforcedNamespaceLabel set to true.
-    ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference
-    excludedFromEnforcement: []
-
-    ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
-    ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
-    ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
-    ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
-    queryLogFile: false
-
-    # Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor.
-    # Set to 'false' to disable global sample_limit. or set to a number to override the default value.
-    sampleLimit: false
-
-    # EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory.
-    # The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets
-    # is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit.
-    enforcedKeepDroppedTargets: 0
-
-    ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
-    ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
-    ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
-    enforcedSampleLimit: false
-
-    ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
-    ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
-    ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
-    ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
-    enforcedTargetLimit: false
-
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
-    ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
-    ## 2.27.0 and newer.
-    enforcedLabelLimit: false
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
-    ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
-    ## 2.27.0 and newer.
-    enforcedLabelNameLengthLimit: false
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
-    ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
-    ## versions 2.27.0 and newer.
-    enforcedLabelValueLengthLimit: false
-
-    ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
-    ## in Prometheus so it may change in any upcoming release.
-    allowOverlappingBlocks: false
-
-    ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
-    ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
-    minReadySeconds: 0
-
-    # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
-    # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
-    # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it.
-    # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically.
-    hostNetwork: false
-
-    # HostAlias holds the mapping between IP and hostnames that will be injected
-    # as an entry in the pod’s hosts file.
-    hostAliases: []
-    #  - ip: 10.10.0.100
-    #    hostnames:
-    #      - a1.app.local
-    #      - b1.app.local
-
-    ## TracingConfig configures tracing in Prometheus.
-    ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheustracingconfig
-    tracingConfig: {}
-
-    ## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints.
-    ## If set, the value should be either “Endpoints” or “EndpointSlice”. If unset, the operator assumes the “Endpoints” role.
-    serviceDiscoveryRole: ""
-
-    ## Additional configuration which is not covered by the properties above. (passed through tpl)
-    additionalConfig: {}
-
-    ## Additional configuration which is not covered by the properties above.
-    ## Useful, if you need advanced templating inside alertmanagerSpec.
-    ## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl)
-    additionalConfigString: ""
-
-    ## Defines the maximum time that the `prometheus` container's startup probe
-    ## will wait before being considered failed. The startup probe will return
-    ## success after the WAL replay is complete. If set, the value should be
-    ## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15
-    ## minutes).
-    maximumStartupDurationSeconds: 0
-
-  additionalRulesForClusterRole: []
-  #  - apiGroups: [ "" ]
-  #    resources:
-  #      - nodes/proxy
-  #    verbs: [ "get", "list", "watch" ]
-
-  additionalServiceMonitors: []
-  ## Name of the ServiceMonitor to create
-  ##
-  # - name: ""
-
-    ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
-    ## the chart
-    ##
-    # additionalLabels: {}
-
-    ## Service label for use in assembling a job name of the form <label value>-<port>
-    ## If no label is specified, the service name is used.
-    ##
-    # jobLabel: ""
-
-    ## labels to transfer from the kubernetes service to the target
-    ##
-    # targetLabels: []
-
-    ## labels to transfer from the kubernetes pods to the target
-    ##
-    # podTargetLabels: []
-
-    ## Label selector for services to which this ServiceMonitor applies
-    ##
-    # selector: {}
-      ## Example which selects all services to be monitored
-      ## with label "monitoredby" with values any of "example-service-1" or "example-service-2"
-      # matchExpressions:
-      #   - key: "monitoredby"
-      #     operator: In
-      #     values:
-      #       - example-service-1
-      #       - example-service-2
-
-      ## label selector for services
-      ##
-      # matchLabels: {}
-
-    ## Namespaces from which services are selected
-    ##
-    # namespaceSelector:
-      ## Match any namespace
-      ##
-      # any: false
-
-      ## Explicit list of namespace names to select
-      ##
-      # matchNames: []
-
-    ## Endpoints of the selected service to be monitored
-    ##
-    # endpoints: []
-      ## Name of the endpoint's service port
-      ## Mutually exclusive with targetPort
-      # - port: ""
-
-      ## Name or number of the endpoint's target port
-      ## Mutually exclusive with port
-      # - targetPort: ""
-
-      ## File containing bearer token to be used when scraping targets
-      ##
-      #   bearerTokenFile: ""
-
-      ## Interval at which metrics should be scraped
-      ##
-      #   interval: 30s
-
-      ## HTTP path to scrape for metrics
-      ##
-      #   path: /metrics
-
-      ## HTTP scheme to use for scraping
-      ##
-      #   scheme: http
-
-      ## TLS configuration to use when scraping the endpoint
-      ##
-      #   tlsConfig:
-
-          ## Path to the CA file
-          ##
-          # caFile: ""
-
-          ## Path to client certificate file
-          ##
-          # certFile: ""
-
-          ## Skip certificate verification
-          ##
-          # insecureSkipVerify: false
-
-          ## Path to client key file
-          ##
-          # keyFile: ""
-
-          ## Server name used to verify host name
-          ##
-          # serverName: ""
-
-      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-      ##
-      # metricRelabelings: []
-      # - action: keep
-      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-      #   sourceLabels: [__name__]
-
-      ## RelabelConfigs to apply to samples before scraping
-      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-      ##
-      # relabelings: []
-      # - sourceLabels: [__meta_kubernetes_pod_node_name]
-      #   separator: ;
-      #   regex: ^()$
-      #   targetLabel: nodename
-      #   replacement: $1
-      #   action: replace
-
-    ## Fallback scrape protocol used by Prometheus for scraping metrics
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.ScrapeProtocol
-    ##
-    # fallbackScrapeProtocol: ""
-
-  additionalPodMonitors: []
-  ## Name of the PodMonitor to create
-  ##
-  # - name: ""
-
-    ## Additional labels to set used for the PodMonitorSelector. Together with standard labels from
-    ## the chart
-    ##
-    # additionalLabels: {}
-
-    ## Pod label for use in assembling a job name of the form <label value>-<port>
-    ## If no label is specified, the pod endpoint name is used.
-    ##
-    # jobLabel: ""
-
-    ## Label selector for pods to which this PodMonitor applies
-    ##
-    # selector: {}
-      ## Example which selects all Pods to be monitored
-      ## with label "monitoredby" with values any of "example-pod-1" or "example-pod-2"
-      # matchExpressions:
-      #   - key: "monitoredby"
-      #     operator: In
-      #     values:
-      #       - example-pod-1
-      #       - example-pod-2
-
-      ## label selector for pods
-      ##
-      # matchLabels: {}
-
-    ## PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
-    ##
-    # podTargetLabels: {}
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    # sampleLimit: 0
-
-    ## Namespaces from which pods are selected
-    ##
-    # namespaceSelector:
-      ## Match any namespace
-      ##
-      # any: false
-
-      ## Explicit list of namespace names to select
-      ##
-      # matchNames: []
-
-    ## Endpoints of the selected pods to be monitored
-    ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#podmetricsendpoint
-    ##
-    # podMetricsEndpoints: []
-
-    ## Fallback scrape protocol used by Prometheus for scraping metrics
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.ScrapeProtocol
-    ##
-    # fallbackScrapeProtocol: ""
-
-## Configuration for thanosRuler
-## ref: https://thanos.io/tip/components/rule.md/
-##
-thanosRuler:
-
-  ## Deploy thanosRuler
-  ##
-  enabled: false
-
-  ## Annotations for ThanosRuler
-  ##
-  annotations: {}
-
-  ## Service account for ThanosRuler to use.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-  ##
-  serviceAccount:
-    create: true
-    name: ""
-    annotations: {}
-
-  ## Configure pod disruption budgets for ThanosRuler
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
-  ##
-  podDisruptionBudget:
-    enabled: false
-    minAvailable: 1
-    maxUnavailable: ""
-
-  ingress:
-    enabled: false
-
-    # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
-    # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
-    # ingressClassName: nginx
-
-    annotations: {}
-
-    labels: {}
-
-    ## Hosts must be provided if Ingress is enabled.
-    ##
-    hosts: []
-      # - thanosruler.domain.com
-
-    ## Paths to use for ingress rules - one path should match the thanosruler.routePrefix
-    ##
-    paths: []
-    # - /
-
-    ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
-    ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
-    # pathType: ImplementationSpecific
-
-    ## TLS configuration for ThanosRuler Ingress
-    ## Secret must be manually created in the namespace
-    ##
-    tls: []
-    # - secretName: thanosruler-general-tls
-    #   hosts:
-    #   - thanosruler.example.com
-
-  # -- BETA: Configure the gateway routes for the chart here.
-  # More routes can be added by adding a dictionary key like the 'main' route.
-  # Be aware that this is an early beta of this feature,
-  # kube-prometheus-stack does not guarantee this works and is subject to change.
-  # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
-  # [[ref]](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1alpha2)
-  route:
-    main:
-      # -- Enables or disables the route
-      enabled: false
-
-      # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
-      apiVersion: gateway.networking.k8s.io/v1
-      # -- Set the route kind
-      # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
-      kind: HTTPRoute
-
-      annotations: {}
-      labels: {}
-
-      hostnames: []
-      # - my-filter.example.com
-      parentRefs: []
-      # - name: acme-gw
-
-      matches:
-        - path:
-            type: PathPrefix
-            value: /
-
-      ## Filters define the filters that are applied to requests that match this rule.
-      filters: []
-
-      ## Additional custom rules that can be added to the route
-      additionalRules: []
-
-  ## Configuration for ThanosRuler service
-  ##
-  service:
-    annotations: {}
-    labels: {}
-    clusterIP: ""
-    ipDualStack:
-      enabled: false
-      ipFamilies: ["IPv6", "IPv4"]
-      ipFamilyPolicy: "PreferDualStack"
-
-    ## Port for ThanosRuler Service to listen on
-    ##
-    port: 10902
-    ## To be used with a proxy extraContainer port
-    ##
-    targetPort: 10902
-    ## Port to expose on each node
-    ## Only used if service.type is 'NodePort'
-    ##
-    nodePort: 30905
-    ## List of IP addresses at which the Prometheus server service is available
-    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
-    ##
-
-    ## Additional ports to open for ThanosRuler service
-    additionalPorts: []
-
-    externalIPs: []
-    loadBalancerIP: ""
-    loadBalancerSourceRanges: []
-
-    ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
-    ##
-    externalTrafficPolicy: Cluster
-
-    ## Service type
-    ##
-    type: ClusterIP
-
-  ## Configuration for creating a ServiceMonitor for the ThanosRuler service
-  ##
-  serviceMonitor:
-    ## If true, create a serviceMonitor for thanosRuler
-    ##
-    selfMonitor: true
-
-    ## Scrape interval. If not set, the Prometheus default scrape interval is used.
-    ##
-    interval: ""
-
-    ## Additional labels
-    ##
-    additionalLabels: {}
-
-    ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
-    ##
-    sampleLimit: 0
-
-    ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
-    ##
-    targetLimit: 0
-
-    ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelLimit: 0
-
-    ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelNameLengthLimit: 0
-
-    ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
-    ##
-    labelValueLengthLimit: 0
-
-    ## proxyUrl: URL of a proxy that should be used for scraping.
-    ##
-    proxyUrl: ""
-
-    ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
-    scheme: ""
-
-    ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
-    ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
-    tlsConfig: {}
-
-    bearerTokenFile:
-
-    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    # - action: keep
-    #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
-    #   sourceLabels: [__name__]
-
-    ## RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    # - sourceLabels: [__meta_kubernetes_pod_node_name]
-    #   separator: ;
-    #   regex: ^()$
-    #   targetLabel: nodename
-    #   replacement: $1
-    #   action: replace
-
-    ## Additional Endpoints
-    ##
-    additionalEndpoints: []
-    # - port: oauth-metrics
-    #   path: /metrics
-
-  ## Settings affecting thanosRulerpec
-  ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosrulerspec
-  ##
-  thanosRulerSpec:
-    ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
-    ## Metadata Labels and Annotations gets propagated to the ThanosRuler pods.
-    ##
-    podMetadata: {}
-
-    ## Image of ThanosRuler
-    ##
-    image:
-      registry: quay.io
-      repository: thanos/thanos
-      tag: v0.37.2
-      sha: ""
-
-    ## Namespaces to be selected for PrometheusRules discovery.
-    ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
-    ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage
-    ##
-    ruleNamespaceSelector: {}
-
-    ## If true, a nil or {} value for thanosRuler.thanosRulerSpec.ruleSelector will cause the
-    ## prometheus resource to be created with selectors based on values in the helm deployment,
-    ## which will also match the PrometheusRule resources created
-    ##
-    ruleSelectorNilUsesHelmValues: true
-
-    ## PrometheusRules to be selected for target discovery.
-    ## If {}, select all PrometheusRules
-    ##
-    ruleSelector: {}
-    ## Example which select all PrometheusRules resources
-    ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
-    # ruleSelector:
-    #   matchExpressions:
-    #     - key: prometheus
-    #       operator: In
-    #       values:
-    #         - example-rules
-    #         - example-rules-2
-    #
-    ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
-    # ruleSelector:
-    #   matchLabels:
-    #     role: example-rules
-
-    ## Define Log Format
-    # Use logfmt (default) or json logging
-    logFormat: logfmt
-
-    ## Log level for ThanosRuler to be configured with.
-    ##
-    logLevel: info
-
-    ## Size is the expected size of the thanosRuler cluster. The controller will eventually make the size of the
-    ## running cluster equal to the expected size.
-    replicas: 1
-
-    ## Time duration ThanosRuler shall retain data for. Default is '24h', and must match the regular expression
-    ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
-    ##
-    retention: 24h
-
-    ## Interval between consecutive evaluations.
-    ##
-    evaluationInterval: ""
-
-    ## Storage is the definition of how storage will be used by the ThanosRuler instances.
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
-    ##
-    storage: {}
-    volumeClaimTemplate:
-      spec:
-        storageClassName: {{ .Values.globals.kubePrometheusStack.storageClass }}
-        accessModes: ["ReadWriteOnce"]
-        resources:
-          requests:
-            storage: {{ .Values.globals.kubePrometheusStack.thanosRuler.storageSize }}
-      selector: {}
-
-    ## AlertmanagerConfig define configuration for connecting to alertmanager.
-    ## Only available with Thanos v0.10.0 and higher. Maps to the alertmanagers.config Thanos Ruler arg.
-    alertmanagersConfig:
-      # use existing secret, if configured, alertmanagersConfig.secret will not be used
-      existingSecret: {}
-        # name: ""
-        # key: ""
-      # will render alertmanagersConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when alertmanagersConfig.existingSecret is set
-      # https://thanos.io/tip/components/rule.md/#alertmanager
-      secret: {}
-        # alertmanagers:
-        # - api_version: v2
-        #   http_config:
-        #     basic_auth:
-        #       username: some_user
-        #       password: some_pass
-        #   static_configs:
-        #     - alertmanager.thanos.io
-        #   scheme: http
-        #   timeout: 10s
-
-    ## DEPRECATED. Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, alertmanagersConfig should be used instead.
-    ## Note: this field will be ignored if alertmanagersConfig is specified. Maps to the alertmanagers.url Thanos Ruler arg.
-    # alertmanagersUrl:
-
-    ## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false
-    ##
-    externalPrefix:
-
-    ## will be used as value for externalPrefix
-    externalPrefixNilUsesHelmValues: true
-
-    ## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
-    ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
-    ##
-    routePrefix: /
-
-    ## ObjectStorageConfig configures object storage in Thanos
-    objectStorageConfig:
-      # use existing secret, if configured, objectStorageConfig.secret will not be used
-      existingSecret: {}
-        # name: ""
-        # key: ""
-      # will render objectStorageConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when objectStorageConfig.existingSecret is set
-      # https://thanos.io/tip/thanos/storage.md/#s3
-      secret: {}
-        # type: S3
-        # config:
-        #   bucket: ""
-        #   endpoint: ""
-        #   region: ""
-        #   access_key: ""
-        #   secret_key: ""
-
-    ## Labels by name to drop before sending to alertmanager
-    ## Maps to the --alert.label-drop flag of thanos ruler.
-    alertDropLabels: []
-
-    ## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
-    ## Maps to the --query flag of thanos ruler.
-    queryEndpoints: []
-
-    ## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
-    ## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
-    queryConfig:
-      # use existing secret, if configured, queryConfig.secret will not be used
-      existingSecret: {}
-        # name: ""
-        # key: ""
-      # render queryConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when queryConfig.existingSecret is set
-      # https://thanos.io/tip/components/rule.md/#query-api
-      secret: {}
-        # - http_config:
-        #     basic_auth:
-        #       username: some_user
-        #       password: some_pass
-        #   static_configs:
-        #     - URL
-        #   scheme: http
-        #   timeout: 10s
-
-    ## Labels configure the external label pairs to ThanosRuler. A default replica
-    ## label `thanos_ruler_replica` will be always added as a label with the value
-    ## of the pod's name and it will be dropped in the alerts.
-    labels: {}
-
-    ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
-    ##
-    paused: false
-
-    ## Allows setting additional arguments for the ThanosRuler container
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosruler
-    ##
-    additionalArgs: []
-      # - name: remote-write.config
-      #   value: |-
-      #     "remote_write":
-      #     - "name": "receiver-0"
-      #       "remote_timeout": "30s"
-      #       "url": "http://thanos-receiver-0.thanos-receiver:8081/api/v1/receive"
-
-    ## Define which Nodes the Pods are scheduled on.
-    ## ref: https://kubernetes.io/docs/user-guide/node-selection/
-    ##
-    nodeSelector: {}
-
-    ## Define resources requests and limits for single Pods.
-    ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
-    ##
-    resources: {}
-    # requests:
-    #   memory: 400Mi
-
-    ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
-    ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
-    ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
-    ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
-    ##
-    podAntiAffinity: "soft"
-
-    ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
-    ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
-    ##
-    podAntiAffinityTopologyKey: kubernetes.io/hostname
-
-    ## Assign custom affinity rules to the thanosRuler instance
-    ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-    ##
-    affinity: {}
-    # nodeAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     nodeSelectorTerms:
-    #     - matchExpressions:
-    #       - key: kubernetes.io/e2e-az-name
-    #         operator: In
-    #         values:
-    #         - e2e-az1
-    #         - e2e-az2
-
-    ## If specified, the pod's tolerations.
-    ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-    ##
-    tolerations: []
-    # - key: "key"
-    #   operator: "Equal"
-    #   value: "value"
-    #   effect: "NoSchedule"
-
-    ## If specified, the pod's topology spread constraints.
-    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-    ##
-    topologySpreadConstraints: []
-    # - maxSkew: 1
-    #   topologyKey: topology.kubernetes.io/zone
-    #   whenUnsatisfiable: DoNotSchedule
-    #   labelSelector:
-    #     matchLabels:
-    #       app: thanos-ruler
-
-    ## SecurityContext holds pod-level security attributes and common container settings.
-    ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext  false
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-    ##
-    securityContext:
-      runAsGroup: 2000
-      runAsNonRoot: true
-      runAsUser: 1000
-      fsGroup: 2000
-      seccompProfile:
-        type: RuntimeDefault
-
-    ## ListenLocal makes the ThanosRuler server listen on loopback, so that it does not bind against the Pod IP.
-    ## Note this is only for the ThanosRuler UI, not the gossip communication.
-    ##
-    listenLocal: false
-
-    ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an ThanosRuler pod.
-    ##
-    containers: []
-
-    # Additional volumes on the output StatefulSet definition.
-    volumes: []
-
-    # Additional VolumeMounts on the output StatefulSet definition.
-    volumeMounts: []
-
-    ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
-    ## (permissions, dir tree) on mounted volumes before starting prometheus
-    initContainers: []
-
-    ## Priority class assigned to the Pods
-    ##
-    priorityClassName: ""
-
-    ## PortName to use for ThanosRuler.
-    ##
-    portName: "web"
-
-    ## WebTLSConfig defines the TLS parameters for HTTPS
-    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosrulerwebspec
-    web: {}
-
-    ## Additional configuration which is not covered by the properties above. (passed through tpl)
-    additionalConfig: {}
-
-    ## Additional configuration which is not covered by the properties above.
-    ## Useful, if you need advanced templating
-    additionalConfigString: ""
-
-  ## ExtraSecret can be used to store various data in an extra secret
-  ## (use it for example to store hashed basic auth credentials)
-  extraSecret:
-    ## if not set, name will be auto generated
-    # name: ""
-    annotations: {}
-    data: {}
-  #   auth: |
-  #     foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
-  #     someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
-
-## Setting to true produces cleaner resource names, but requires a data migration because the name of the persistent volume changes. Therefore this should only be set once on initial installation.
-##
-cleanPrometheusOperatorObjectNames: false
-
-## Extra manifests to deploy as an array
-extraManifests: []
-  # - apiVersion: v1
-  #   kind: ConfigMap
-  #   metadata:
-  #   labels:
-  #     name: prometheus-extra
-  #   data:
-  #     extra-data: "value"
-
diff --git a/k8s/helmfile.d/values/longhorn/values.yaml.gotmpl b/k8s/helmfile.d/values/longhorn/values.yaml.gotmpl
deleted file mode 100644
index 35b0d6c..0000000
--- a/k8s/helmfile.d/values/longhorn/values.yaml.gotmpl
+++ /dev/null
@@ -1,539 +0,0 @@
-# Default values for longhorn.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-global:
-  # -- Toleration for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
-  tolerations: []
-  # -- Node selector for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
-  nodeSelector: {}
-  cattle:
-    # -- Default system registry.
-    systemDefaultRegistry: ""
-    windowsCluster:
-      # -- Setting that allows Longhorn to run on a Rancher Windows cluster.
-      enabled: false
-      # -- Toleration for Linux nodes that can run user-deployed Longhorn components.
-      tolerations:
-      - key: "cattle.io/os"
-        value: "linux"
-        effect: "NoSchedule"
-        operator: "Equal"
-      # -- Node selector for Linux nodes that can run user-deployed Longhorn components.
-      nodeSelector:
-        kubernetes.io/os: "linux"
-      defaultSetting:
-        # -- Toleration for system-managed Longhorn components.
-        taintToleration: cattle.io/os=linux:NoSchedule
-        # -- Node selector for system-managed Longhorn components.
-        systemManagedComponentsNodeSelector: kubernetes.io/os:linux
-
-networkPolicies:
-  # -- Setting that allows you to enable network policies that control access to Longhorn pods.
-  enabled: false
-  # -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1")
-  type: "k3s"
-
-image:
-  longhorn:
-    engine:
-      # -- Repository for the Longhorn Engine image.
-      repository: longhornio/longhorn-engine
-      # -- Tag for the Longhorn Engine image.
-      tag: v1.8.0
-    manager:
-      # -- Repository for the Longhorn Manager image.
-      repository: longhornio/longhorn-manager
-      # -- Tag for the Longhorn Manager image.
-      tag: v1.8.0
-    ui:
-      # -- Repository for the Longhorn UI image.
-      repository: longhornio/longhorn-ui
-      # -- Tag for the Longhorn UI image.
-      tag: v1.8.0
-    instanceManager:
-      # -- Repository for the Longhorn Instance Manager image.
-      repository: longhornio/longhorn-instance-manager
-      # -- Tag for the Longhorn Instance Manager image.
-      tag: v1.8.0
-    shareManager:
-      # -- Repository for the Longhorn Share Manager image.
-      repository: longhornio/longhorn-share-manager
-      # -- Tag for the Longhorn Share Manager image.
-      tag: v1.8.0
-    backingImageManager:
-      # -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
-      repository: longhornio/backing-image-manager
-      # -- Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
-      tag: v1.8.0
-    supportBundleKit:
-      # -- Repository for the Longhorn Support Bundle Manager image.
-      repository: longhornio/support-bundle-kit
-      # -- Tag for the Longhorn Support Bundle Manager image.
-      tag: v0.0.49
-  csi:
-    attacher:
-      # -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
-      repository: longhornio/csi-attacher
-      # -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
-      tag: v4.8.0
-    provisioner:
-      # -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
-      repository: longhornio/csi-provisioner
-      # -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
-      tag: v5.1.0-20241220
-    nodeDriverRegistrar:
-      # -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
-      repository: longhornio/csi-node-driver-registrar
-      # -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
-      tag: v2.13.0
-    resizer:
-      # -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
-      repository: longhornio/csi-resizer
-      # -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
-      tag: v1.13.1
-    snapshotter:
-      # -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
-      repository: longhornio/csi-snapshotter
-      # -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
-      tag: v8.2.0
-    livenessProbe:
-      # -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
-      repository: longhornio/livenessprobe
-      # -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
-      tag: v2.15.0
-  openshift:
-    oauthProxy:
-      # -- Repository for the OAuth Proxy image. Specify the upstream image (for example, "quay.io/openshift/origin-oauth-proxy"). This setting applies only to OpenShift users.
-      repository: ""
-      # -- Tag for the OAuth Proxy image. Specify OCP/OKD version 4.1 or later (including version 4.15, which is available at quay.io/openshift/origin-oauth-proxy:4.15). This setting applies only to OpenShift users.
-      tag: ""
-  # -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.
-  pullPolicy: IfNotPresent
-
-service:
-  ui:
-    # -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy")
-    type: ClusterIP
-    # -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767.
-    nodePort: null
-  manager:
-    # -- Service type for Longhorn Manager.
-    type: ClusterIP
-    # -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767.
-    nodePort: ""
-
-persistence:
-  # -- Setting that allows you to specify the default Longhorn StorageClass.
-  defaultClass: true
-  # -- Filesystem type of the default Longhorn StorageClass.
-  defaultFsType: ext4
-  # -- mkfs parameters of the default Longhorn StorageClass.
-  defaultMkfsParams: ""
-  # -- Replica count of the default Longhorn StorageClass.
-  defaultClassReplicaCount: 3
-  # -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort")
-  defaultDataLocality: disabled
-  # -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete")
-  reclaimPolicy: Delete
-  # -- Setting that allows you to enable live migration of a Longhorn volume from one node to another.
-  migratable: false
-  # -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the volume-head-xxx.img file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery.
-  disableRevisionCounter: "true"
-  # -- Set NFS mount options for Longhorn StorageClass for RWX volumes
-  nfsOptions: ""
-  recurringJobSelector:
-    # -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass.
-    enable: false
-    # -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)
-    jobList: []
-  backingImage:
-    # -- Setting that allows you to use a backing image in a Longhorn StorageClass.
-    enable: false
-    # -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.
-    name: ~
-    # -- Data source type of a backing image used in a Longhorn StorageClass.
-    # If the backing image exists in the cluster, Longhorn uses this setting to verify the image.
-    # If the backing image does not exist, Longhorn creates one using the specified data source type.
-    dataSourceType: ~
-    # -- Data source parameters of a backing image used in a Longhorn StorageClass.
-    # You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
-    dataSourceParameters: ~
-    # -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
-    expectedChecksum: ~
-  defaultDiskSelector:
-    # -- Setting that allows you to enable the disk selector for the default Longhorn StorageClass.
-    enable: false
-    # -- Disk selector for the default Longhorn StorageClass. Longhorn uses only disks with the specified tags for storing volume data. (Examples: "nvme,sata")
-    selector: ""
-  defaultNodeSelector:
-    # -- Setting that allows you to enable the node selector for the default Longhorn StorageClass.
-    enable: false
-    # -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")
-    selector: ""
-  # -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled")
-  removeSnapshotsDuringFilesystemTrim: ignored
-  # -- Setting that allows you to specify the data engine version for the default Longhorn StorageClass. (Options: "v1", "v2")
-  dataEngine: v1
-  # -- Setting that allows you to specify the backup target for the default Longhorn StorageClass.
-  backupTargetName: default
-
-preUpgradeChecker:
-  # -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions.
-  jobEnabled: true
-  # -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled.
-  upgradeVersionCheck: true
-
-csi:
-  # -- kubelet root directory. When unspecified, Longhorn uses the default value.
-  kubeletRootDir: ~
-  # -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3").
-  attacherReplicaCount: ~
-  # -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3").
-  provisionerReplicaCount: ~
-  # -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3").
-  resizerReplicaCount: ~
-  # -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3").
-  snapshotterReplicaCount: ~
-
-defaultSettings:
-  # -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.
-  allowRecurringJobWhileVolumeDetached: ~
-  # -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.
-  createDefaultDiskLabeledNodes: ~
-  # -- Default path for storing data on a host. The default value is "/var/lib/longhorn/".
-  defaultDataPath: ~
-  # -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.
-  defaultDataLocality: ~
-  # -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default.
-  replicaSoftAntiAffinity: ~
-  # -- Setting that automatically rebalances replicas when an available node is discovered.
-  replicaAutoBalance: ~
-  # -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100".
-  storageOverProvisioningPercentage: ~
-  # -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25".
-  storageMinimalAvailablePercentage: ~
-  # -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node.
-  storageReservedPercentageForDefaultDisk: ~
-  # -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default
-  upgradeChecker: ~
-  # -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3".
-  defaultReplicaCount: ~
-  # -- Default name of Longhorn static StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. "storageClassName" needs to be an existing StorageClass. The default value is "longhorn-static".
-  defaultLonghornStaticStorageClass: ~
-  # -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled.
-  failedBackupTTL: ~
-  # -- Number of minutes that Longhorn allows for the backup execution. The default value is "1".
-  backupExecutionTimeout: ~
-  # -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration.
-  restoreVolumeRecurringJobs: ~
-  # -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained.
-  recurringSuccessfulJobsHistoryLimit: ~
-  # -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.
-  recurringFailedJobsHistoryLimit: ~
-  # -- Maximum number of snapshots or backups to be retained.
-  recurringJobMaxRetention: ~
-  # -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles.
-  supportBundleFailedHistoryLimit: ~
-  # -- Taint or toleration for system-managed Longhorn components.
-  # Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect).
-  taintToleration: ~
-  # -- Node selector for system-managed Longhorn components.
-  systemManagedComponentsNodeSelector: ~
-  # -- PriorityClass for system-managed Longhorn components.
-  # This setting can help prevent Longhorn components from being evicted under Node Pressure.
-  # Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`.
-  priorityClass: &defaultPriorityClassNameRef "longhorn-critical"
-  # -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default.
-  autoSalvage: ~
-  # -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.
-  autoDeletePodWhenVolumeDetachedUnexpectedly: ~
-  # -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default.
-  disableSchedulingOnCordonedNode: ~
-  # -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object.
-  replicaZoneSoftAntiAffinity: ~
-  # -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default.
-  replicaDiskSoftAntiAffinity: ~
-  # -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed.
-  nodeDownPodDeletionPolicy: ~
-  # -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained.
-  nodeDrainPolicy: ~
-  # -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned.
-  detachManuallyAttachedVolumesWhenCordoned: ~
-  # -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume.
-  replicaReplenishmentWaitInterval: ~
-  # -- Maximum number of replicas that can be concurrently rebuilt on each node.
-  concurrentReplicaRebuildPerNodeLimit: ~
-  # -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled.
-  concurrentVolumeBackupRestorePerNodeLimit: ~
-  # -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI.
-  disableRevisionCounter: "true"
-  # -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart.
-  systemManagedPodsImagePullPolicy: ~
-  # -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation.
-  allowVolumeCreationWithDegradedAvailability: ~
-  # -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed.
-  autoCleanupSystemGeneratedSnapshot: ~
-  # -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job.
-  autoCleanupRecurringJobBackupSnapshot: ~
-  # -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version.
-  concurrentAutomaticEngineUpgradePerNodeLimit: ~
-  # -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it.
-  backingImageCleanupWaitInterval: ~
-  # -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown".
-  backingImageRecoveryWaitInterval: ~
-  # -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12".
-  guaranteedInstanceManagerCPU: ~
-  # -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
-  kubernetesClusterAutoscalerEnabled: ~
-  # -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up.
-  orphanAutoDeletion: ~
-  # -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network.
-  storageNetwork: ~
-  # -- Flag that prevents accidental uninstallation of Longhorn.
-  deletingConfirmationFlag: ~
-  # -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8".
-  engineReplicaTimeout: ~
-  # -- Setting that allows you to enable and disable snapshot hashing and data integrity checks.
-  snapshotDataIntegrity: ~
-  # -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance.
-  snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
-  # -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format.
-  snapshotDataIntegrityCronjob: ~
-  # -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files.
-  removeSnapshotsDuringFilesystemTrim: ~
-  # -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check".
-  fastReplicaRebuildEnabled: ~
-  # -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed.
-  replicaFileSyncHttpClientTimeout: ~
-  # -- Number of seconds that Longhorn allows for the completion of replica rebuilding and snapshot cloning operations.
-  longGRPCTimeOut: ~
-  # -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")
-  logLevel: ~
-  # -- Setting that allows you to specify a backup compression method.
-  backupCompressionMethod: ~
-  # -- Maximum number of worker threads that can concurrently run for each backup.
-  backupConcurrentLimit: ~
-  # -- Maximum number of worker threads that can concurrently run for each restore operation.
-  restoreConcurrentLimit: ~
-  # -- Setting that allows you to enable the V1 Data Engine.
-  v1DataEngine: ~
-  # -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is an experimental feature and should not be used in production environments.
-  v2DataEngine: ~
-  # -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine.
-  v2DataEngineHugepageLimit: ~
-  # -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
-  v2DataEngineGuaranteedInstanceManagerCPU: ~
-  # -- CPU cores on which the Storage Performance Development Kit (SPDK) target daemon should run. The SPDK target daemon is located in each Instance Manager pod. Ensure that the number of cores is less than or equal to the guaranteed Instance Manager CPUs for the V2 Data Engine. The default value is "0x1".
-  v2DataEngineCPUMask: ~
-  # -- Setting that allows scheduling of empty node selector volumes to any node.
-  allowEmptyNodeSelectorVolume: ~
-  # -- Setting that allows scheduling of empty disk selector volumes to any disk.
-  allowEmptyDiskSelectorVolume: ~
-  # -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses.
-  allowCollectingLonghornUsageMetrics: ~
-  # -- Setting that temporarily prevents all attempts to purge volume snapshots.
-  disableSnapshotPurge: ~
-  # -- Maximum snapshot count for a volume. The value should be between 2 to 250
-  snapshotMaxCount: ~
-  # -- Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
-  v2DataEngineLogLevel: ~
-  # -- Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
-  v2DataEngineLogFlags: ~
-  # -- Setting that freezes the filesystem on the root partition before a snapshot is created.
-  freezeFilesystemForSnapshot: ~
-  # -- Setting that automatically cleans up the snapshot when the backup is deleted.
-  autoCleanupSnapshotWhenDeleteBackup: ~
-  # -- Setting that allows Longhorn to detect node failure and immediately migrate affected RWX volumes.
-  rwxVolumeFastFailover: ~
-
-# -- Setting that allows you to update the default backupstore.
-defaultBackupStore:
-  # -- Endpoint used to access the default backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE")
-  backupTarget: ~
-  # -- Name of the Kubernetes secret associated with the default backup target.
-  backupTargetCredentialSecret: ~
-  # -- Number of seconds that Longhorn waits before checking the default backupstore for new backups. The default value is "300". When the value is "0", polling is disabled.
-  pollInterval: ~
-
-privateRegistry:
-  # -- Setting that allows you to create a private registry secret.
-  createSecret: ~
-  # -- URL of a private registry. When unspecified, Longhorn uses the default system registry.
-  registryUrl: ~
-  # -- User account used for authenticating with a private registry.
-  registryUser: ~
-  # -- Password for authenticating with a private registry.
-  registryPasswd: ~
-  # -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name.
-  registrySecret: ~
-
-longhornManager:
-  log:
-    # -- Format of Longhorn Manager logs. (Options: "plain", "json")
-    format: plain
-  # -- PriorityClass for Longhorn Manager.
-  priorityClass: *defaultPriorityClassNameRef
-  # -- Toleration for Longhorn Manager on nodes allowed to run Longhorn components.
-  tolerations: []
-  ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
-  ## and uncomment this example block
-  # - key: "key"
-  #   operator: "Equal"
-  #   value: "value"
-  #   effect: "NoSchedule"
-  # -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager.
-  nodeSelector: {}
-  ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
-  ## and uncomment this example block
-  #  label-key1: "label-value1"
-  #  label-key2: "label-value2"
-  # -- Annotation for the Longhorn Manager service.
-  serviceAnnotations: {}
-  ## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
-  ## and uncomment this example block
-  #  annotation-key1: "annotation-value1"
-  #  annotation-key2: "annotation-value2"
-
-longhornDriver:
-  log:
-    # -- Format of longhorn-driver logs. (Options: "plain", "json")
-    format: plain
-  # -- PriorityClass for Longhorn Driver.
-  priorityClass: *defaultPriorityClassNameRef
-  # -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components.
-  tolerations: []
-  ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
-  ## and uncomment this example block
-  # - key: "key"
-  #   operator: "Equal"
-  #   value: "value"
-  #   effect: "NoSchedule"
-  # -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver.
-  nodeSelector: {}
-  ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
-  ## and uncomment this example block
-  #  label-key1: "label-value1"
-  #  label-key2: "label-value2"
-
-longhornUI:
-  # -- Replica count for Longhorn UI.
-  replicas: 2
-  # -- PriorityClass for Longhorn UI.
-  priorityClass: *defaultPriorityClassNameRef
-  # -- Toleration for Longhorn UI on nodes allowed to run Longhorn components.
-  tolerations: []
-  ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
-  ## and uncomment this example block
-  # - key: "key"
-  #   operator: "Equal"
-  #   value: "value"
-  #   effect: "NoSchedule"
-  # -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI.
-  nodeSelector: {}
-  ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
-  ## and uncomment this example block
-  #  label-key1: "label-value1"
-  #  label-key2: "label-value2"
-
-ingress:
-  # -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service.
-  enabled: false
-
-  # -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller.
-  # ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases.
-  ingressClassName: ~
-
-  # -- Hostname of the Layer 7 load balancer.
-  host: sslip.io
-
-  # -- Setting that allows you to enable TLS on ingress records.
-  tls: false
-
-  # -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443.
-  secureBackends: false
-
-  # -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records.
-  tlsSecret: longhorn.local-tls
-
-  # -- Default ingress path. You can access the Longhorn UI by following the full ingress path \{\{host\}\}+\{\{path\}\}.
-  path: /
-
-  # -- Ingress path type. To maintain backward compatibility, the default value is "ImplementationSpecific".
-  pathType: ImplementationSpecific
-
-  ## If you're using kube-lego, you will want to add:
-  ## kubernetes.io/tls-acme: true
-  ##
-  ## For a full list of possible ingress annotations, please see
-  ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
-  ##
-  ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
-  # -- Ingress annotations in the form of key-value pairs.
-  annotations:
-  #  kubernetes.io/ingress.class: nginx
-  #  kubernetes.io/tls-acme: true
-
-  # -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses.
-  secrets:
-  ## If you're providing your own certificates, please use this to add the certificates as secrets
-  ## key and certificate should start with -----BEGIN CERTIFICATE----- or
-  ## REDACTED
-  ##
-  ## name should line up with a tlsSecret set further up
-  ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
-  ##
-  ## It is also possible to create and manage the certificates outside of this helm chart
-  ## Please see README.md for more information
-  # - name: longhorn.local-tls
-  #   key:
-  #   certificate:
-
-# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled.
-enablePSP: false
-
-# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`.
-namespaceOverride: ""
-
-# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional.
-annotations: {}
-
-serviceAccount:
-  # -- Annotations to add to the service account
-  annotations: {}
-
-metrics:
-  serviceMonitor:
-    # -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components.
-    enabled: false
-    # -- Additional labels for the Prometheus ServiceMonitor resource.
-    additionalLabels: {}
-    # -- Annotations for the Prometheus ServiceMonitor resource.
-    annotations: {}
-    # -- Interval at which Prometheus scrapes the metrics from the target.
-    interval: ""
-    # -- Timeout after which Prometheus considers the scrape to be failed.
-    scrapeTimeout: ""
-    # -- Configures the relabeling rules to apply the target’s metadata labels. See the [Prometheus Operator
-    # documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
-    # formatting details.
-    relabelings: []
-    # -- Configures the relabeling rules to apply to the samples before ingestion. See the [Prometheus Operator
-    # documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
-    # formatting details.
-    metricRelabelings: []
-
-## openshift settings
-openshift:
-  # -- Setting that allows Longhorn to integrate with OpenShift.
-  enabled: false
-  ui:
-    # -- Route for connections between Longhorn and the OpenShift web console.
-    route: "longhorn-ui"
-    # -- Port for accessing the OpenShift web console.
-    port: 443
-    # -- Port for proxy that provides access to the OpenShift web console.
-    proxy: 8443
-
-# -- Setting that allows Longhorn to generate code coverage profiles.
-enableGoCoverDir: false
-
diff --git a/k8s/helmfile.d/values/mysql/values.yaml.gotmpl b/k8s/helmfile.d/values/mysql/values.yaml.gotmpl
deleted file mode 100644
index c661925..0000000
--- a/k8s/helmfile.d/values/mysql/values.yaml.gotmpl
+++ /dev/null
@@ -1,1614 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
-##
-
-## @param global.imageRegistry Global Docker image registry
-## @param global.imagePullSecrets Global Docker registry secret names as an array
-## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
-##
-global:
-  imageRegistry: ""
-  ## E.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  defaultStorageClass: {{ .Values.globals.mysql.storageClass }}
-  storageClass: {{ .Values.globals.mysql.storageClass }}
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: auto
-## @section Common parameters
-##
-
-## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
-##
-kubeVersion: ""
-## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override common.names.fullname template
-##
-fullnameOverride: ""
-## @param namespaceOverride String to fully override common.names.namespace
-##
-namespaceOverride: ""
-## @param clusterDomain Cluster domain
-##
-clusterDomain: cluster.local
-## @param commonAnnotations Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template
-##
-commonAnnotations: {}
-## @param commonLabels Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template
-##
-commonLabels: {}
-## @param extraDeploy Array with extra yaml to deploy with the chart. Evaluated as a template
-##
-extraDeploy: []
-## @param serviceBindings.enabled Create secret for service binding (Experimental)
-## Ref: https://servicebinding.io/service-provider/
-##
-serviceBindings:
-  enabled: false
-## Enable diagnostic mode in the deployment
-##
-diagnosticMode:
-  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
-  ##
-  enabled: false
-  ## @param diagnosticMode.command Command to override all containers in the deployment
-  ##
-  command:
-    - sleep
-  ## @param diagnosticMode.args Args to override all containers in the deployment
-  ##
-  args:
-    - infinity
-## @section MySQL common parameters
-##
-
-## Bitnami MySQL image
-## ref: https://hub.docker.com/r/bitnami/mysql/tags/
-## @param image.registry [default: REGISTRY_NAME] MySQL image registry
-## @param image.repository [default: REPOSITORY_NAME/mysql] MySQL image repository
-## @skip image.tag MySQL image tag (immutable tags are recommended)
-## @param image.digest MySQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-## @param image.pullPolicy MySQL image pull policy
-## @param image.pullSecrets Specify docker-registry secret names as an array
-## @param image.debug Specify if debug logs should be enabled
-##
-image:
-  registry: docker.io
-  repository: bitnami/mysql
-  tag: 8.4.4-debian-12-r0
-  digest: ""
-  ## Specify a imagePullPolicy
-  ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ## Example:
-  ## pullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  pullSecrets: []
-  ## Set to true if you would like to see extra information on logs
-  ## It turns BASH and/or NAMI debugging in the image
-  ##
-  debug: false
-## @param architecture MySQL architecture (`standalone` or `replication`)
-##
-architecture: standalone
-## MySQL Authentication parameters
-##
-auth:
-  ## @param auth.rootPassword Password for the `root` user. Ignored if existing secret is provided
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-the-root-password-on-first-run
-  ##
-  rootPassword: {{ .Values.globals.mysql.rootPassword }}
-  ## @param auth.createDatabase Whether to create the .Values.auth.database or not
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#creating-a-database-on-first-run
-  ##
-  createDatabase: true
-  ## @param auth.database Name for a custom database to create
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#creating-a-database-on-first-run
-  ##
-  database: "auth"
-  ## @param auth.username Name for a custom user to create
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#creating-a-database-user-on-first-run
-  ##
-  username: {{ .Values.globals.mysql.username }}
-  ## @param auth.password Password for the new user. Ignored if existing secret is provided
-  ##
-  password: {{ .Values.globals.mysql.password }}
-  ## @param auth.replicationUser MySQL replication user
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-up-a-replication-cluster
-  ##
-  replicationUser: replicator
-  ## @param auth.replicationPassword MySQL replication user password. Ignored if existing secret is provided
-  ##
-  replicationPassword: ""
-  ## @param auth.existingSecret Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password`
-  ## NOTE: When it's set the auth.rootPassword, auth.password, auth.replicationPassword are ignored.
-  ##
-  existingSecret: ""
-  ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable
-  ##
-  usePasswordFiles: false
-  ## @param auth.customPasswordFiles Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication`
-  ## Example:
-  ## customPasswordFiles:
-  ##   root: /vault/secrets/mysql-root
-  ##   user: /vault/secrets/mysql-user
-  ##   replicator: /vault/secrets/mysql-replicator
-  ##
-  customPasswordFiles: {}
-  ## @param auth.authenticationPolicy Sets the authentication policy, by default it will use `* ,,`
-  ## ref: https://dev.mysql.com/doc/refman/8.4/en/server-system-variables.html#sysvar_authentication_policy
-  ##
-  authenticationPolicy: ""
-## @param initdbScripts Dictionary of initdb scripts
-## Specify dictionary of scripts to be run at first boot
-## Example:
-## initdbScripts:
-##   my_init_script.sh: |
-##      #!/bin/bash
-##      echo "Do something."
-##
-initdbScripts: {}
-## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`)
-##
-initdbScriptsConfigMap: ""
-## @param startdbScripts Dictionary of startdb scripts
-## Specify dictionary of scripts to be run every time the container is started
-## Example:
-## startdbScripts:
-##   my_start_script.sh: |
-##      #!/bin/bash
-##      echo "Do something."
-##
-startdbScripts: {}
-## @param startdbScriptsConfigMap ConfigMap with the startdb scripts (Note: Overrides `startdbScripts`)
-##
-startdbScriptsConfigMap: ""
-## @section TLS/SSL parameters
-##
-## @param tls.enabled Enable TLS in MySQL
-## @param tls.existingSecret Existing secret that contains TLS certificates
-## @param tls.certFilename The secret key from the existingSecret if 'cert' key different from the default (tls.crt)
-## @param tls.certKeyFilename The secret key from the existingSecret if 'key' key different from the default (tls.key)
-## @param tls.certCAFilename The secret key from the existingSecret if 'ca' key different from the default (tls.crt)
-## @param tls.ca CA certificate for TLS. Ignored if `tls.existingSecret` is set
-## @param tls.cert TLS certificate for MySQL. Ignored if `tls.existingSecret` is set
-## @param tls.key TLS key for MySQL. Ignored if `tls.existingSecret` is set
-##
-tls:
-  enabled: false
-  existingSecret: ""
-  certFilename: tls.crt
-  certKeyFilename: tls.key
-  certCAFilename: ""
-  ca: ""
-  cert: ""
-  key: ""
-  ## @param tls.autoGenerated.enabled Enable automatic generation of certificates for TLS
-  ## @param tls.autoGenerated.engine Mechanism to generate the certificates (allowed values: helm, cert-manager)
-  autoGenerated:
-    enabled: true
-    engine: helm
-    ## @param tls.autoGenerated.certManager.existingIssuer The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine)
-    ## @param tls.autoGenerated.certManager.existingIssuerKind Existing Issuer kind, defaults to Issuer (only for `cert-manager` engine)
-    ## @param tls.autoGenerated.certManager.keyAlgorithm Key algorithm for the certificates (only for `cert-manager` engine)
-    ## @param tls.autoGenerated.certManager.keySize Key size for the certificates (only for `cert-manager` engine)
-    ## @param tls.autoGenerated.certManager.duration Duration for the certificates (only for `cert-manager` engine)
-    ## @param tls.autoGenerated.certManager.renewBefore Renewal period for the certificates (only for `cert-manager` engine)
-    certManager:
-      existingIssuer: ""
-      existingIssuerKind: ""
-      keySize: 2048
-      keyAlgorithm: RSA
-      duration: 2160h
-      renewBefore: 360h
-
-## @section MySQL Primary parameters
-##
-primary:
-  ## @param primary.name Name of the primary database (eg primary, master, leader, ...)
-  ##
-  name: primary
-  ## @param primary.command Override default container command on MySQL Primary container(s) (useful when using custom images)
-  ##
-  command: []
-  ## @param primary.args Override default container args on MySQL Primary container(s) (useful when using custom images)
-  ##
-  args: []
-  ## @param primary.lifecycleHooks for the MySQL Primary container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param primary.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param primary.hostAliases Deployment pod host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param primary.enableMySQLX Enable mysqlx port
-  ## ref: https://dev.mysql.com/doc/dev/mysql-server/latest/mysqlx_protocol_xplugin.html
-  ##
-  enableMySQLX: false
-  ## @param primary.configuration [string] Configure MySQL Primary with a custom my.cnf file
-  ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
-  ##
-  configuration: |-
-    [mysqld]
-    authentication_policy={{`'{{- .Values.auth.authenticationPolicy | default "* ,," }}'`}}
-    skip-name-resolve
-    explicit_defaults_for_timestamp
-    basedir=/opt/bitnami/mysql
-    plugin_dir=/opt/bitnami/mysql/lib/plugin
-    port={{ `{{ .Values.primary.containerPorts.mysql }}` }}
-    mysqlx={{ `{{ ternary 1 0 .Values.primary.enableMySQLX }}` }}
-    mysqlx_port={{ `{{ .Values.primary.containerPorts.mysqlx }}` }}
-    socket=/opt/bitnami/mysql/tmp/mysql.sock
-    datadir=/bitnami/mysql/data
-    tmpdir=/opt/bitnami/mysql/tmp
-    max_allowed_packet=16M
-    bind-address=*
-    pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
-    log-error=/opt/bitnami/mysql/logs/mysqld.log
-    character-set-server=UTF8
-    slow_query_log=0
-    long_query_time=10.0
-    {{ `{{- if .Values.tls.enabled }}` }}
-    ssl_cert=/opt/bitnami/mysql/certs/{{ `{{ .Values.tls.certFilename }}` }}
-    ssl_key=/opt/bitnami/mysql/certs/{{ `{{ .Values.tls.certKeyFilename }}` }}
-    {{ `{{- if (include "mysql.tlsCACert" .) }}` }}
-    ssl_ca={{ `{{ include "mysql.tlsCACert" . }}` }}
-    {{ `{{- end }}` }}
-    {{ `{{- end }}` }}
-
-    [client]
-    port={{ `{{ .Values.primary.containerPorts.mysql }}` }}
-    socket=/opt/bitnami/mysql/tmp/mysql.sock
-    default-character-set=UTF8
-    plugin_dir=/opt/bitnami/mysql/lib/plugin
-
-    [manager]
-    port={{ `{{ .Values.primary.containerPorts.mysql }}` }}
-    socket=/opt/bitnami/mysql/tmp/mysql.sock
-    pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
-  ## @param primary.existingConfigmap Name of existing ConfigMap with MySQL Primary configuration.
-  ## NOTE: When it's set the 'configuration' parameter is ignored
-  ##
-  existingConfigmap: ""
-  ## @param primary.containerPorts.mysql Container port for mysql
-  ## @param primary.containerPorts.mysqlx Container port for mysqlx
-  ##
-  containerPorts:
-    mysql: 3306
-    mysqlx: 33060
-  ## @param primary.updateStrategy.type Update strategy type for the MySQL primary statefulset
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param primary.podAnnotations Additional pod annotations for MySQL primary pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param primary.podAffinityPreset MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param primary.podAntiAffinityPreset MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## MySQL Primary node affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param primary.nodeAffinityPreset.type MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param primary.nodeAffinityPreset.key MySQL primary node label key to match Ignored if `primary.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param primary.nodeAffinityPreset.values MySQL primary node label values to match. Ignored if `primary.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param primary.affinity Affinity for MySQL primary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: podAffinityPreset, podAntiAffinityPreset, and  nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param primary.nodeSelector Node labels for MySQL primary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param primary.tolerations Tolerations for MySQL primary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param primary.priorityClassName MySQL primary pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param primary.runtimeClassName MySQL primary pods' runtimeClassName
-  ##
-  runtimeClassName: ""
-  ## @param primary.schedulerName Name of the k8s scheduler (other than default)
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param primary.terminationGracePeriodSeconds In seconds, time the given to the MySQL primary pod needs to terminate gracefully
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
-  ##
-  terminationGracePeriodSeconds: ""
-  ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment
-  ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## The value is evaluated as a template
-  ##
-  topologySpreadConstraints: []
-  ## @param primary.podManagementPolicy podManagementPolicy to manage scaling operation of MySQL primary pods
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
-  ##
-  podManagementPolicy: ""
-  ## MySQL primary Pod security context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param primary.podSecurityContext.enabled Enable security context for MySQL primary pods
-  ## @param primary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param primary.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param primary.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param primary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## MySQL primary container security context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param primary.containerSecurityContext.enabled MySQL primary container securityContext
-  ## @param primary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param primary.containerSecurityContext.runAsUser User ID for the MySQL primary container
-  ## @param primary.containerSecurityContext.runAsGroup Group ID for the MySQL primary container
-  ## @param primary.containerSecurityContext.runAsNonRoot Set MySQL primary container's Security Context runAsNonRoot
-  ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's privilege escalation
-  ## @param primary.containerSecurityContext.capabilities.drop Set container's Security Context runAsNonRoot
-  ## @param primary.containerSecurityContext.seccompProfile.type Set Client container's Security Context seccomp profile
-  ## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-    readOnlyRootFilesystem: true
-  ## MySQL primary container's resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## We usually recommend not to specify default resources and to leave this as a conscious
-  ## choice for the user. This also increases chances charts run on environments with little
-  ## resources, such as Minikube. If you do want to specify resources, uncomment the following
-  ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-  ## @param primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "small"
-  ## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure extra options for liveness probe
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param primary.livenessProbe.enabled Enable livenessProbe
-  ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## Configure extra options for readiness probe
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param primary.readinessProbe.enabled Enable readinessProbe
-  ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## Configure extra options for startupProbe probe
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param primary.startupProbe.enabled Enable startupProbe
-  ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param primary.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: true
-    initialDelaySeconds: 15
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 10
-    successThreshold: 1
-  ## @param primary.customLivenessProbe Override default liveness probe for MySQL primary containers
-  ##
-  customLivenessProbe: {}
-  ## @param primary.customReadinessProbe Override default readiness probe for MySQL primary containers
-  ##
-  customReadinessProbe: {}
-  ## @param primary.customStartupProbe Override default startup probe for MySQL primary containers
-  ##
-  customStartupProbe: {}
-  ## @param primary.extraFlags MySQL primary additional command line flags
-  ## Can be used to specify command line flags, for example:
-  ## E.g.
-  ## extraFlags: "--max-connect-errors=1000 --max_connections=155"
-  ##
-  extraFlags: ""
-  ## @param primary.extraEnvVars Extra environment variables to be set on MySQL primary containers
-  ## E.g.
-  ## extraEnvVars:
-  ##  - name: TZ
-  ##    value: "Europe/Paris"
-  ##
-  extraEnvVars: []
-  ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL primary containers
-  ##
-  extraEnvVarsCM: ""
-  ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL primary containers
-  ##
-  extraEnvVarsSecret: ""
-  ## @param primary.extraPodSpec Optionally specify extra PodSpec for the MySQL Primary pod(s)
-  ##
-  extraPodSpec: {}
-  ## @param primary.extraPorts Extra ports to expose
-  ##
-  extraPorts: []
-  ## Enable persistence using Persistent Volume Claims
-  ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-  ##
-  persistence:
-    ## @param primary.persistence.enabled Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir
-    ##
-    enabled: true
-    ## @param primary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL primary replicas
-    ## NOTE: When it's set the rest of persistence parameters are ignored
-    ##
-    existingClaim: ""
-    ## @param primary.persistence.subPath The name of a volume's sub path to mount for persistence
-    ##
-    subPath: ""
-    ## @param primary.persistence.storageClass MySQL primary persistent volume storage Class
-    ## If defined, storageClassName: <storageClass>
-    ## If set to "-", storageClassName: "", which disables dynamic provisioning
-    ## If undefined (the default) or set to null, no storageClassName spec is
-    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-    ##   GKE, AWS & OpenStack)
-    ##
-    storageClass: ""
-    ## @param primary.persistence.annotations MySQL primary persistent volume claim annotations
-    ##
-    annotations: {}
-    ## @param primary.persistence.accessModes MySQL primary persistent volume access Modes
-    ##
-    accessModes:
-      - ReadWriteOnce
-    ## @param primary.persistence.size MySQL primary persistent volume size
-    ##
-    size: 8Gi
-    ## @param primary.persistence.selector Selector to match an existing Persistent Volume
-    ## selector:
-    ##   matchLabels:
-    ##     app: my-app
-    ##
-    selector: {}
-  ## Primary Persistent Volume Claim Retention Policy
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-  ##
-  persistentVolumeClaimRetentionPolicy:
-    ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary StatefulSet
-    ##
-    enabled: false
-    ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
-    ##
-    whenScaled: Retain
-    ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
-    ##
-    whenDeleted: Retain
-  ## @param primary.extraVolumes Optionally specify extra list of additional volumes to the MySQL Primary pod(s)
-  ##
-  extraVolumes: []
-  ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param primary.initContainers Add additional init containers for the MySQL Primary pod(s)
-  ##
-  initContainers: []
-  ## @param primary.sidecars Add additional sidecar containers for the MySQL Primary pod(s)
-  ##
-  sidecars: []
-  ## MySQL Primary Service parameters
-  ##
-  service:
-    ## @param primary.service.type MySQL Primary K8s service type
-    ##
-    type: ClusterIP
-    ## @param primary.service.ports.mysql MySQL Primary K8s service port
-    ## @param primary.service.ports.mysqlx MySQL Primary K8s service mysqlx port
-    ##
-    ports:
-      mysql: 3306
-      mysqlx: 33060
-    ## @param primary.service.nodePorts.mysql MySQL Primary K8s service node port
-    ## @param primary.service.nodePorts.mysqlx MySQL Primary K8s service node port mysqlx
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ##
-    nodePorts:
-      mysql: ""
-      mysqlx: ""
-    ## @param primary.service.clusterIP MySQL Primary K8s service clusterIP IP
-    ## e.g:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param primary.service.loadBalancerIP MySQL Primary loadBalancerIP if service type is `LoadBalancer`
-    ## Set the LoadBalancer service type to internal only
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param primary.service.externalTrafficPolicy Enable client source IP preservation
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when MySQL Primary service is LoadBalancer
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## E.g.
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param primary.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param primary.service.annotations Additional custom annotations for MySQL primary service
-    ##
-    annotations: {}
-    ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-    ## Headless service properties
-    ##
-    headless:
-      ## @param primary.service.headless.annotations Additional custom annotations for headless MySQL primary service.
-      ##
-      annotations: {}
-  ## MySQL primary Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
-  ##
-  pdb:
-    ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation for MySQL primary pods
-    ##
-    create: true
-    ## @param primary.pdb.minAvailable Minimum number/percentage of MySQL primary pods that should remain scheduled
-    ##
-    minAvailable: ""
-    ## @param primary.pdb.maxUnavailable Maximum number/percentage of MySQL primary pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty.
-    ##
-    maxUnavailable: ""
-  ## @param primary.podLabels MySQL Primary pod label. If labels are same as commonLabels , this will take precedence
-  ##
-  podLabels: {}
-## @section MySQL Secondary parameters
-##
-secondary:
-  ## @param secondary.name Name of the secondary database (eg secondary, slave, ...)
-  ##
-  name: secondary
-  ## @param secondary.replicaCount Number of MySQL secondary replicas
-  ##
-  replicaCount: 1
-  ## @param secondary.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param secondary.hostAliases Deployment pod host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param secondary.command Override default container command on MySQL Secondary container(s) (useful when using custom images)
-  ##
-  command: []
-  ## @param secondary.args Override default container args on MySQL Secondary container(s) (useful when using custom images)
-  ##
-  args: []
-  ## @param secondary.lifecycleHooks for the MySQL Secondary container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param secondary.enableMySQLX Enable mysqlx port
-  ## ref: https://dev.mysql.com/doc/dev/mysql-server/latest/mysqlx_protocol_xplugin.html
-  ##
-  enableMySQLX: false
-  ## @param secondary.configuration [string] Configure MySQL Secondary with a custom my.cnf file
-  ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
-  ##
-  configuration: |-
-    [mysqld]
-    authentication_policy='{{ `{{- .Values.auth.authenticationPolicy | default "* ,," }}` }}'
-    skip-name-resolve
-    explicit_defaults_for_timestamp
-    basedir=/opt/bitnami/mysql
-    plugin_dir=/opt/bitnami/mysql/lib/plugin
-    port={{ `{{ .Values.secondary.containerPorts.mysql }}` }}
-    mysqlx={{ `{{ ternary 1 0 .Values.secondary.enableMySQLX }}` }}
-    mysqlx_port={{ `{{ .Values.secondary.containerPorts.mysqlx }}` }}
-    socket=/opt/bitnami/mysql/tmp/mysql.sock
-    datadir=/bitnami/mysql/data
-    tmpdir=/opt/bitnami/mysql/tmp
-    max_allowed_packet=16M
-    bind-address=*
-    pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
-    log-error=/opt/bitnami/mysql/logs/mysqld.log
-    character-set-server=UTF8
-    slow_query_log=0
-    long_query_time=10.0
-    {{ `{{- if .Values.tls.enabled }}` }}
-    ssl_cert=/opt/bitnami/mysql/certs/{{ `{{ .Values.tls.certFilename }}` }}
-    ssl_key=/opt/bitnami/mysql/certs/{{ `{{ .Values.tls.certKeyFilename }}` }}
-    {{ `{{- if (include "mysql.tlsCACert" .) }}` }}
-    ssl_ca={{ `{{ include "mysql.tlsCACert" . }}` }}
-    {{ `{{- end }}` }}
-    {{ `{{- end }}` }}
-
-    [client]
-    port={{ `{{ .Values.secondary.containerPorts.mysql }}` }}
-    socket=/opt/bitnami/mysql/tmp/mysql.sock
-    default-character-set=UTF8
-    plugin_dir=/opt/bitnami/mysql/lib/plugin
-
-    [manager]
-    port={{ `{{ .Values.secondary.containerPorts.mysql }}` }}
-    socket=/opt/bitnami/mysql/tmp/mysql.sock
-    pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
-  ## @param secondary.existingConfigmap Name of existing ConfigMap with MySQL Secondary configuration.
-  ## NOTE: When it's set the 'configuration' parameter is ignored
-  ##
-  existingConfigmap: ""
-  ## @param secondary.containerPorts.mysql Container port for mysql
-  ## @param secondary.containerPorts.mysqlx Container port for mysqlx
-  ##
-  containerPorts:
-    mysql: 3306
-    mysqlx: 33060
-  ## @param secondary.updateStrategy.type Update strategy type for the MySQL secondary statefulset
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    type: RollingUpdate
-  ## @param secondary.podAnnotations Additional pod annotations for MySQL secondary pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param secondary.podAffinityPreset MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param secondary.podAntiAffinityPreset MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ## Allowed values: soft, hard
-  ##
-  podAntiAffinityPreset: soft
-  ## MySQL Secondary node affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param secondary.nodeAffinityPreset.type MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param secondary.nodeAffinityPreset.key MySQL secondary node label key to match Ignored if `secondary.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param secondary.nodeAffinityPreset.values MySQL secondary node label values to match. Ignored if `secondary.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param secondary.affinity Affinity for MySQL secondary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: podAffinityPreset, podAntiAffinityPreset, and  nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param secondary.nodeSelector Node labels for MySQL secondary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param secondary.tolerations Tolerations for MySQL secondary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param secondary.priorityClassName MySQL secondary pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param secondary.runtimeClassName MySQL secondary pods' runtimeClassName
-  ##
-  runtimeClassName: ""
-  ## @param secondary.schedulerName Name of the k8s scheduler (other than default)
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param secondary.terminationGracePeriodSeconds In seconds, time the given to the MySQL secondary pod needs to terminate gracefully
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
-  ##
-  terminationGracePeriodSeconds: ""
-  ## @param secondary.topologySpreadConstraints Topology Spread Constraints for pod assignment
-  ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## The value is evaluated as a template
-  ##
-  topologySpreadConstraints: []
-  ## @param secondary.podManagementPolicy podManagementPolicy to manage scaling operation of MySQL secondary pods
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
-  ##
-  podManagementPolicy: ""
-  ## MySQL secondary Pod security context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param secondary.podSecurityContext.enabled Enable security context for MySQL secondary pods
-  ## @param secondary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param secondary.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param secondary.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param secondary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## MySQL secondary container security context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param secondary.containerSecurityContext.enabled MySQL secondary container securityContext
-  ## @param secondary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param secondary.containerSecurityContext.runAsUser User ID for the MySQL secondary container
-  ## @param secondary.containerSecurityContext.runAsGroup Group ID for the MySQL secondary container
-  ## @param secondary.containerSecurityContext.runAsNonRoot Set MySQL secondary container's Security Context runAsNonRoot
-  ## @param secondary.containerSecurityContext.allowPrivilegeEscalation Set container's privilege escalation
-  ## @param secondary.containerSecurityContext.capabilities.drop Set container's Security Context runAsNonRoot
-  ## @param secondary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ## @param secondary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-    readOnlyRootFilesystem: true
-  ## MySQL secondary container's resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## We usually recommend not to specify default resources and to leave this as a conscious
-  ## choice for the user. This also increases chances charts run on environments with little
-  ## resources, such as Minikube. If you do want to specify resources, uncomment the following
-  ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-  ## @param secondary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if secondary.resources is set (secondary.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "small"
-  ## @param secondary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure extra options for liveness probe
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param secondary.livenessProbe.enabled Enable livenessProbe
-  ## @param secondary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param secondary.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param secondary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param secondary.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param secondary.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## Configure extra options for readiness probe
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param secondary.readinessProbe.enabled Enable readinessProbe
-  ## @param secondary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param secondary.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param secondary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param secondary.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param secondary.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 3
-    successThreshold: 1
-  ## Configure extra options for startupProbe probe
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param secondary.startupProbe.enabled Enable startupProbe
-  ## @param secondary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param secondary.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param secondary.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param secondary.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param secondary.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: true
-    initialDelaySeconds: 15
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param secondary.customLivenessProbe Override default liveness probe for MySQL secondary containers
-  ##
-  customLivenessProbe: {}
-  ## @param secondary.customReadinessProbe Override default readiness probe for MySQL secondary containers
-  ##
-  customReadinessProbe: {}
-  ## @param secondary.customStartupProbe Override default startup probe for MySQL secondary containers
-  ##
-  customStartupProbe: {}
-  ## @param secondary.extraFlags MySQL secondary additional command line flags
-  ## Can be used to specify command line flags, for example:
-  ## E.g.
-  ## extraFlags: "--max-connect-errors=1000 --max_connections=155"
-  ##
-  extraFlags: ""
-  ## @param secondary.extraEnvVars An array to add extra environment variables on MySQL secondary containers
-  ## E.g.
-  ## extraEnvVars:
-  ##  - name: TZ
-  ##    value: "Europe/Paris"
-  ##
-  extraEnvVars: []
-  ## @param secondary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL secondary containers
-  ##
-  extraEnvVarsCM: ""
-  ## @param secondary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL secondary containers
-  ##
-  extraEnvVarsSecret: ""
-  ## @param secondary.extraPodSpec Optionally specify extra PodSpec for the MySQL Secondary pod(s)
-  ##
-  extraPodSpec: {}
-  ## @param secondary.extraPorts Extra ports to expose
-  ##
-  extraPorts: []
-  ## Enable persistence using Persistent Volume Claims
-  ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-  ##
-  persistence:
-    ## @param secondary.persistence.enabled Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim`
-    ##
-    enabled: true
-    ## @param secondary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL secondary replicas
-    ## NOTE: When it's set the rest of persistence parameters are ignored
-    ##
-    existingClaim: ""
-    ## @param secondary.persistence.subPath The name of a volume's sub path to mount for persistence
-    ##
-    subPath: ""
-    ## @param secondary.persistence.storageClass MySQL secondary persistent volume storage Class
-    ## If defined, storageClassName: <storageClass>
-    ## If set to "-", storageClassName: "", which disables dynamic provisioning
-    ## If undefined (the default) or set to null, no storageClassName spec is
-    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-    ##   GKE, AWS & OpenStack)
-    ##
-    storageClass: ""
-    ## @param secondary.persistence.annotations MySQL secondary persistent volume claim annotations
-    ##
-    annotations: {}
-    ## @param secondary.persistence.accessModes MySQL secondary persistent volume access Modes
-    ##
-    accessModes:
-      - ReadWriteOnce
-    ## @param secondary.persistence.size MySQL secondary persistent volume size
-    ##
-    size: 8Gi
-    ## @param secondary.persistence.selector Selector to match an existing Persistent Volume
-    ## selector:
-    ##   matchLabels:
-    ##     app: my-app
-    ##
-    selector: {}
-  ## Secondary Persistent Volume Claim Retention Policy
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-  ##
-  persistentVolumeClaimRetentionPolicy:
-    ## @param secondary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only StatefulSet
-    ##
-    enabled: false
-    ## @param secondary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
-    ##
-    whenScaled: Retain
-    ## @param secondary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
-    ##
-    whenDeleted: Retain
-  ## @param secondary.extraVolumes Optionally specify extra list of additional volumes to the MySQL secondary pod(s)
-  ##
-  extraVolumes: []
-  ## @param secondary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param secondary.initContainers Add additional init containers for the MySQL secondary pod(s)
-  ##
-  initContainers: []
-  ## @param secondary.sidecars Add additional sidecar containers for the MySQL secondary pod(s)
-  ##
-  sidecars: []
-  ## MySQL Secondary Service parameters
-  ##
-  service:
-    ## @param secondary.service.type MySQL secondary Kubernetes service type
-    ##
-    type: ClusterIP
-    ## @param secondary.service.ports.mysql MySQL secondary Kubernetes service port
-    ## @param secondary.service.ports.mysqlx MySQL secondary Kubernetes service port mysqlx
-    ##
-    ports:
-      mysql: 3306
-      mysqlx: 33060
-    ## @param secondary.service.nodePorts.mysql MySQL secondary Kubernetes service node port
-    ## @param secondary.service.nodePorts.mysqlx MySQL secondary Kubernetes service node port mysqlx
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ##
-    nodePorts:
-      mysql: ""
-      mysqlx: ""
-    ## @param secondary.service.clusterIP MySQL secondary Kubernetes service clusterIP IP
-    ## e.g:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param secondary.service.loadBalancerIP MySQL secondary loadBalancerIP if service type is `LoadBalancer`
-    ## Set the LoadBalancer service type to internal only
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param secondary.service.externalTrafficPolicy Enable client source IP preservation
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param secondary.service.loadBalancerSourceRanges Addresses that are allowed when MySQL secondary service is LoadBalancer
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## E.g.
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param secondary.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param secondary.service.annotations Additional custom annotations for MySQL secondary service
-    ##
-    annotations: {}
-    ## @param secondary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param secondary.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-    ## Headless service properties
-    ##
-    headless:
-      ## @param secondary.service.headless.annotations Additional custom annotations for headless MySQL secondary service.
-      ##
-      annotations: {}
-  ## MySQL secondary Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
-  ##
-  pdb:
-    ## @param secondary.pdb.create Enable/disable a Pod Disruption Budget creation for MySQL secondary pods
-    ##
-    create: true
-    ## @param secondary.pdb.minAvailable Minimum number/percentage of MySQL secondary pods that should remain scheduled
-    ##
-    minAvailable: ""
-    ## @param secondary.pdb.maxUnavailable Maximum number/percentage of MySQL secondary pods that may be made unavailable. Defaults to `1` if both `secondary.pdb.minAvailable` and `secondary.pdb.maxUnavailable` are empty.
-    ##
-    maxUnavailable: ""
-  ## @param secondary.podLabels Additional pod labels for MySQL secondary pods
-  ##
-  podLabels: {}
-## @section RBAC parameters
-##
-
-## MySQL pods ServiceAccount
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-##
-serviceAccount:
-  ## @param serviceAccount.create Enable the creation of a ServiceAccount for MySQL pods
-  ##
-  create: true
-  ## @param serviceAccount.name Name of the created ServiceAccount
-  ## If not set and create is true, a name is generated using the mysql.fullname template
-  ##
-  name: ""
-  ## @param serviceAccount.annotations Annotations for MySQL Service Account
-  ##
-  annotations: {}
-  ## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account
-  ##
-  automountServiceAccountToken: false
-## Role Based Access
-## ref: https://kubernetes.io/docs/admin/authorization/rbac/
-##
-rbac:
-  ## @param rbac.create Whether to create & use RBAC resources or not
-  ##
-  create: false
-  ## @param rbac.rules Custom RBAC rules to set
-  ## e.g:
-  ## rules:
-  ##   - apiGroups:
-  ##       - ""
-  ##     resources:
-  ##       - pods
-  ##     verbs:
-  ##       - get
-  ##       - list
-  ##
-  rules: []
-## @section Network Policy
-##
-
-## Network Policy configuration
-## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-##
-networkPolicy:
-  ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources
-  ##
-  enabled: true
-  ## @param networkPolicy.allowExternal The Policy model to apply
-  ## When set to false, only pods with the correct client label will have network access to the ports MySQL is
-  ## listening on. When true, MySQL will accept connections from any source (with the correct destination port).
-  ##
-  allowExternal: true
-  ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-  ##
-  allowExternalEgress: true
-  ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraIngress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     from:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  ##
-  extraIngress: []
-  ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraEgress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     to:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  ##
-  extraEgress: []
-  ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-  ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-  ##
-  ingressNSMatchLabels: {}
-  ingressNSPodMatchLabels: {}
-
-## @section Password update job
-##
-passwordUpdateJob:
-  ## @param passwordUpdateJob.enabled Enable password update job
-  ##
-  enabled: false
-  ## @param passwordUpdateJob.backoffLimit set backoff limit of the job
-  ##
-  backoffLimit: 10
-  ## @param passwordUpdateJob.command Override default container command on mysql Primary container(s) (useful when using custom images)
-  ##
-  command: []
-  ## @param passwordUpdateJob.args Override default container args on mysql Primary container(s) (useful when using custom images)
-  ##
-  args: []
-  ## @param passwordUpdateJob.extraCommands Extra commands to pass to the generation job
-  ##
-  extraCommands: ""
-  ## @param passwordUpdateJob.previousPasswords.rootPassword Previous root password (set if the password secret was already changed)
-  ## @param passwordUpdateJob.previousPasswords.password Previous password (set if the password secret was already changed)
-  ## @param passwordUpdateJob.previousPasswords.replicationPassword Previous replication password (set if the password secret was already changed)
-  ## @param passwordUpdateJob.previousPasswords.existingSecret Name of a secret containing the previous passwords (set if the password secret was already changed)
-  previousPasswords:
-    rootPassword: ""
-    password: ""
-    replicationPassword: ""
-    existingSecret: ""
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param passwordUpdateJob.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param passwordUpdateJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param passwordUpdateJob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param passwordUpdateJob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param passwordUpdateJob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param passwordUpdateJob.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param passwordUpdateJob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param passwordUpdateJob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param passwordUpdateJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param passwordUpdateJob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param passwordUpdateJob.podSecurityContext.enabled Enabled credential init job pods' Security Context
-  ## @param passwordUpdateJob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param passwordUpdateJob.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param passwordUpdateJob.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param passwordUpdateJob.podSecurityContext.fsGroup Set credential init job pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## @param passwordUpdateJob.extraEnvVars Array containing extra env vars to configure the credential init job
-  ## For example:
-  ## extraEnvVars:
-  ##  - name: GF_DEFAULT_INSTANCE_NAME
-  ##    value: my-instance
-  ##
-  extraEnvVars: []
-  ## @param passwordUpdateJob.extraEnvVarsCM ConfigMap containing extra env vars to configure the credential init job
-  ##
-  extraEnvVarsCM: ""
-  ## @param passwordUpdateJob.extraEnvVarsSecret Secret containing extra env vars to configure the credential init job (in case of sensitive data)
-  ##
-  extraEnvVarsSecret: ""
-  ## @param passwordUpdateJob.extraVolumes Optionally specify extra list of additional volumes for the credential init job
-  ##
-  extraVolumes: []
-  ## @param passwordUpdateJob.extraVolumeMounts Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`.
-  ##
-  extraVolumeMounts: []
-  ## @param passwordUpdateJob.initContainers Add additional init containers for the mysql Primary pod(s)
-  ##
-  initContainers: []
-  ## Container resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param passwordUpdateJob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if passwordUpdateJob.resources is set (passwordUpdateJob.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "micro"
-  ## @param passwordUpdateJob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## @param passwordUpdateJob.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param passwordUpdateJob.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param passwordUpdateJob.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param passwordUpdateJob.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param passwordUpdateJob.hostAliases Add deployment host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param passwordUpdateJob.annotations [object] Add annotations to the job
-  ##
-  annotations: {}
-  ## @param passwordUpdateJob.podLabels Additional pod labels
-  ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param passwordUpdateJob.podAnnotations Additional pod annotations
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-
-## @section Volume Permissions parameters
-##
-
-## Init containers parameters:
-## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
-##
-volumePermissions:
-  ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup`
-  ##
-  enabled: false
-  ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
-  ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository
-  ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
-  ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
-  ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r35
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-## @section Metrics parameters
-##
-
-## Mysqld Prometheus exporter parameters
-##
-metrics:
-  ## @param metrics.enabled Start a side-car prometheus exporter
-  ##
-  enabled: true
-  ## @param metrics.image.registry [default: REGISTRY_NAME] Exporter image registry
-  ## @param metrics.image.repository [default: REPOSITORY_NAME/mysqld-exporter] Exporter image repository
-  ## @skip metrics.image.tag Exporter image tag (immutable tags are recommended)
-  ## @param metrics.image.digest Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param metrics.image.pullPolicy Exporter image pull policy
-  ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/mysqld-exporter
-    tag: 0.16.0-debian-12-r5
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## MySQL metrics container security context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param metrics.containerSecurityContext.enabled MySQL metrics container securityContext
-  ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param metrics.containerSecurityContext.runAsUser User ID for the MySQL metrics container
-  ## @param metrics.containerSecurityContext.runAsGroup Group ID for the MySQL metrics container
-  ## @param metrics.containerSecurityContext.runAsNonRoot Set MySQL metrics container's Security Context runAsNonRoot
-  ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's privilege escalation
-  ## @param metrics.containerSecurityContext.capabilities.drop Set container's Security Context runAsNonRoot
-  ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-    readOnlyRootFilesystem: true
-  ## @param metrics.containerPorts.http Container port for http
-  ##
-  containerPorts:
-    http: 9104
-  ## MySQL Prometheus exporter service parameters
-  ## Mysqld Prometheus exporter liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-  ## @param metrics.service.type Kubernetes service type for MySQL Prometheus Exporter
-  ## @param metrics.service.clusterIP Kubernetes service clusterIP for MySQL Prometheus Exporter
-  ## @param metrics.service.port MySQL Prometheus Exporter service port
-  ## @param metrics.service.annotations [object] Prometheus exporter service annotations
-  ##
-  service:
-    type: ClusterIP
-    port: 9104
-    clusterIP: ""
-    annotations:
-      prometheus.io/scrape: "true"
-      prometheus.io/port: "{{ `{{ .Values.metrics.service.port }}` }}"
-  ## @param metrics.extraArgs.primary Extra args to be passed to mysqld_exporter on Primary pods
-  ## @param metrics.extraArgs.secondary Extra args to be passed to mysqld_exporter on Secondary pods
-  ## ref: https://github.com/prometheus/mysqld_exporter/
-  ## E.g.
-  ## - --collect.auto_increment.columns
-  ## - --collect.binlog_size
-  ## - --collect.engine_innodb_status
-  ## - --collect.engine_tokudb_status
-  ## - --collect.global_status
-  ## - --collect.global_variables
-  ## - --collect.info_schema.clientstats
-  ## - --collect.info_schema.innodb_metrics
-  ## - --collect.info_schema.innodb_tablespaces
-  ## - --collect.info_schema.innodb_cmp
-  ## - --collect.info_schema.innodb_cmpmem
-  ## - --collect.info_schema.processlist
-  ## - --collect.info_schema.processlist.min_time
-  ## - --collect.info_schema.query_response_time
-  ## - --collect.info_schema.tables
-  ## - --collect.info_schema.tables.databases
-  ## - --collect.info_schema.tablestats
-  ## - --collect.info_schema.userstats
-  ## - --collect.perf_schema.eventsstatements
-  ## - --collect.perf_schema.eventsstatements.digest_text_limit
-  ## - --collect.perf_schema.eventsstatements.limit
-  ## - --collect.perf_schema.eventsstatements.timelimit
-  ## - --collect.perf_schema.eventswaits
-  ## - --collect.perf_schema.file_events
-  ## - --collect.perf_schema.file_instances
-  ## - --collect.perf_schema.indexiowaits
-  ## - --collect.perf_schema.tableiowaits
-  ## - --collect.perf_schema.tablelocks
-  ## - --collect.perf_schema.replication_group_member_stats
-  ## - --collect.slave_status
-  ## - --collect.slave_hosts
-  ## - --collect.heartbeat
-  ## - --collect.heartbeat.database
-  ## - --collect.heartbeat.table
-  ##
-  extraArgs:
-    primary: []
-    secondary: []
-  ## Mysqld Prometheus exporter resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## We usually recommend not to specify default resources and to leave this as a conscious
-  ## choice for the user. This also increases chances charts run on environments with little
-  ## resources, such as Minikube. If you do want to specify resources, uncomment the following
-  ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-  ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Mysqld Prometheus exporter liveness probe
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-  ## @param metrics.livenessProbe.enabled Enable livenessProbe
-  ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 120
-    periodSeconds: 10
-    timeoutSeconds: 1
-    successThreshold: 1
-    failureThreshold: 3
-  ## Mysqld Prometheus exporter readiness probe
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-  ## @param metrics.readinessProbe.enabled Enable readinessProbe
-  ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 30
-    periodSeconds: 10
-    timeoutSeconds: 1
-    successThreshold: 1
-    failureThreshold: 3
-  ## Prometheus Service Monitor
-  ## ref: https://github.com/coreos/prometheus-operator
-  ##
-  serviceMonitor:
-    ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-    ##
-    enabled: false
-    ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created
-    ##
-    namespace: ""
-    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-    ##
-    jobLabel: ""
-    ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped
-    ##
-    interval: 30s
-    ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
-    ## e.g:
-    ## scrapeTimeout: 30s
-    ##
-    scrapeTimeout: ""
-    ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels
-    ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-    ##
-    ## selector:
-    ##   prometheus: my-prometheus
-    ##
-    selector: {}
-    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
-    ##
-    honorLabels: false
-    ## @param metrics.serviceMonitor.labels Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
-    ##
-    labels: {}
-    ## @param metrics.serviceMonitor.annotations ServiceMonitor annotations
-    ##
-    annotations: {}
-  ## Prometheus Operator prometheusRule configuration
-  ##
-  prometheusRule:
-    ## @param metrics.prometheusRule.enabled Creates a Prometheus Operator prometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`)
-    ##
-    enabled: false
-    ## @param metrics.prometheusRule.namespace Namespace for the prometheusRule Resource (defaults to the Release Namespace)
-    ##
-    namespace: ""
-    ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRule will be discovered by Prometheus
-    ##
-    additionalLabels: {}
-    ## @param metrics.prometheusRule.rules Prometheus Rule definitions
-    ##  - alert: Mysql-Down
-    ##    expr: absent(up{job="mysql"} == 1)
-    ##    for: 5m
-    ##    labels:
-    ##      severity: warning
-    ##      service: mysql
-    ##    annotations:
-    ##      message: 'mysql instance {{`{{`}} $labels.instance {{`}}`}}  is down'
-    ##      summary: mysql instance is down
-    ##
-    rules: []
-
diff --git a/k8s/helmfile.d/values/nfs-subdir-external-provisioner/values.yaml.gotmpl b/k8s/helmfile.d/values/nfs-subdir-external-provisioner/values.yaml.gotmpl
deleted file mode 100644
index 3473ef2..0000000
--- a/k8s/helmfile.d/values/nfs-subdir-external-provisioner/values.yaml.gotmpl
+++ /dev/null
@@ -1,115 +0,0 @@
-replicaCount: {{ .Values.globals.nfsSubdirExternalProvisioner.replicaCount }}
-strategyType: Recreate
-
-image:
-  repository: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner
-  tag: v4.0.2
-  pullPolicy: IfNotPresent
-imagePullSecrets: []
-
-nfs:
-  server: {{ .Values.globals.nfsSubdirExternalProvisioner.nfs.server }}
-  path: {{ .Values.globals.nfsSubdirExternalProvisioner.nfs.path }}
-  mountOptions:
-  volumeName: nfs-subdir-external-provisioner-root
-  # Reclaim policy for the main nfs volume
-  reclaimPolicy: Retain
-
-# For creating the StorageClass automatically:
-storageClass:
-  create: true
-
-  # Set a provisioner name. If unset, a name will be generated.
-  # provisionerName:
-
-  # Set StorageClass as the default StorageClass
-  # Ignored if storageClass.create is false
-  defaultClass: true
-
-  # Set a StorageClass name
-  # Ignored if storageClass.create is false
-  name: {{ .Values.globals.nfsSubdirExternalProvisioner.storageClass }}
-
-  # Allow volume to be expanded dynamically
-  allowVolumeExpansion: true
-
-  # Method used to reclaim an obsoleted volume
-  reclaimPolicy: Delete
-
-  # When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
-  archiveOnDelete: true
-
-  # If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory.
-  # Overrides archiveOnDelete.
-  # Ignored if value not set.
-  onDelete:
-
-  # Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace.
-  # Ignored if value not set.
-  pathPattern:
-
-  # Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
-  accessModes: {{ .Values.globals.nfsSubdirExternalProvisioner.accessModes }}
-
-  # Set volume bindinng mode - Immediate or WaitForFirstConsumer
-  volumeBindingMode: Immediate
-
-  # Storage class annotations
-  annotations: {}
-
-leaderElection:
-  # When set to false leader election will be disabled
-  enabled: true
-
-## For RBAC support:
-rbac:
-  # Specifies whether RBAC resources should be created
-  create: true
-
-# If true, create & use Pod Security Policy resources
-# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
-podSecurityPolicy:
-  enabled: false
-
-# Deployment pod annotations
-podAnnotations: {}
-
-## Set pod priorityClassName
-# priorityClassName: ""
-
-podSecurityContext: {}
-
-securityContext: {}
-
-serviceAccount:
-  # Specifies whether a ServiceAccount should be created
-  create: true
-
-  # Annotations to add to the service account
-  annotations: {}
-
-  # The name of the ServiceAccount to use.
-  # If not set and create is true, a name is generated using the fullname template
-  name:
-
-resources: {}
-  # limits:
-  #  cpu: 100m
-  #  memory: 128Mi
-  # requests:
-  #  cpu: 100m
-  #  memory: 128Mi
-
-nodeSelector: {}
-
-tolerations: []
-
-affinity: {}
-
-# Additional labels for any resource created
-labels: {}
-
-podDisruptionBudget:
-  enabled: false
-  maxUnavailable: 1
-
diff --git a/k8s/helmfile.d/values/pgadmin4/values.yaml.gotmpl b/k8s/helmfile.d/values/pgadmin4/values.yaml.gotmpl
deleted file mode 100644
index 161066f..0000000
--- a/k8s/helmfile.d/values/pgadmin4/values.yaml.gotmpl
+++ /dev/null
@@ -1,420 +0,0 @@
-# Default values for pgAdmin4.
-
-replicaCount: 1
-
-## pgAdmin4 container image
-##
-image:
-  registry: docker.io
-  repository: dpage/pgadmin4
-  # Overrides the image tag whose default is the chart appVersion.
-  tag: ""
-  pullPolicy: IfNotPresent
-
-## Deployment annotations
-annotations: {}
-
-## commonLabels Add labels to all the deployed resources
-commonLabels: {}
-
-## priorityClassName
-priorityClassName: ""
-
-## Deployment entrypoint override
-## Useful when there's a requirement to modify container's default:
-## https://www.vaultproject.io/docs/platform/k8s/injector/examples#environment-variable-example
-## ref: https://github.com/postgres/pgadmin4/blob/master/Dockerfile#L206
-# command: "['/bin/sh', '-c', 'source /vault/secrets/config && <entrypoint script>']"
-
-service:
-  type: ClusterIP
-  clusterIP: ""
-  loadBalancerIP: ""
-  port: 80
-  targetPort: 80
-  # targetPort: 4181 To be used with a proxy extraContainer
-  portName: http
-
-  annotations: {}
-    ## Special annotations at the service level, e.g
-    ## this will set vnet internal IP's rather than public ip's
-    ## service.beta.kubernetes.io/azure-load-balancer-internal: "true"
-
-  ## Specify the nodePort value for the service types.
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-  ##
-  # nodePort:
-
-## Pod Service Account
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-##
-serviceAccount:
-  # Specifies whether a service account should be created
-  create: false
-  # Annotations to add to the service account
-  annotations: {}
-  # The name of the service account to use.
-  # If not set and create is true, a name is generated using the fullname template
-  name: ""
-  # Opt out of API credential automounting.
-  # If you don't want the kubelet to automatically mount a ServiceAccount's API credentials,
-  # you can opt out of the default behavior
-  automountServiceAccountToken: false
-
-## Pod HostAliases
-## ref: https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/
-##
-hostAliases:
-  # - ip: "127.0.0.1"
-  #   hostnames:
-  #   - "pgadmin4.local"
-
-## Strategy used to replace old Pods by new ones
-## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
-##
-strategy: {}
-  # type: RollingUpdate
-  # rollingUpdate:
-  #   maxSurge: 0
-  #   maxUnavailable: 1
-
-## Server definitions will be loaded at launch time. This allows connection
-## information to be pre-loaded into the instance of pgAdmin4 in the container.
-## Note that server definitions are only loaded on first launch,
-## i.e. when the configuration database is created, and not on subsequent launches using the same configuration database.
-## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html
-##
-serverDefinitions:
-  ## If true, server definitions will be created
-  ##
-  enabled: true
-
-  ## The resource type to use for deploying server definitions.
-  ## Can either be ConfigMap or Secret
-  resourceType: ConfigMap
-
-  # If resource type is set to ConfigMap, specify existingConfigmap containing definitions
-  existingConfigmap: ""
-
-  # If resource type is set to Secret, specify existingSecret containing definitions
-  existingSecret: ""
-
-  servers:
-   postgres:
-     Name: "main"
-     Group: "Servers"
-     Port: 5432
-     Username: "postgres"
-     Host: "postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local"
-     SSLMode: "prefer"
-     MaintenanceDB: "postgres"
-
-networkPolicy:
-  enabled: true
-
-## Ingress
-## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
-ingress:
-  enabled: true
-  annotations:
-    kubernetes.io/ingress.class: {{ .Values.globals.pgadmin4.ingressClass }}
-    cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-  ingressClassName: {{ .Values.globals.pgadmin4.ingressClass }}
-  hosts:
-    - host: {{ .Values.globals.pgadmin4.hostname }}
-      paths:
-        - path: /
-          pathType: Prefix
-  tls:
-   - secretName: pg-hl-mnke-org-tls
-     hosts:
-       - {{ .Values.globals.pgadmin4.hostname }}
-
-# Additional config maps to be mounted inside a container
-# Can be used to map config maps for sidecar as well
-extraConfigmapMounts: []
-  # - name: certs-configmap
-  #   mountPath: /etc/ssl/certs
-  #   subPath: ""
-  #   configMap: certs-configmap
-  #   readOnly: true
-
-extraSecretMounts: []
-  # - name: pgpassfile
-  #   secret: pgpassfile
-  #   subPath: pgpassfile
-  #   mountPath: "/var/lib/pgadmin/storage/pgadmin/file.pgpass"
-  #   readOnly: true
-
-## Additional volumes to be mounted inside a container
-##
-extraVolumeMounts: []
-
-## Specify additional containers in extraContainers.
-## For example, to add an authentication proxy to a pgadmin4 pod.
-extraContainers: |
-# - name: proxy
-#   image: quay.io/gambol99/keycloak-proxy:latest
-#   args:
-#   - -provider=github
-#   - -client-id=
-#   - -client-secret=
-#   - -github-org=<ORG_NAME>
-#   - -email-domain=*
-#   - -cookie-secret=
-#   - -http-address=http://0.0.0.0:4181
-#   - -upstream-url=http://127.0.0.1:3000
-#   ports:
-#     - name: proxy-web
-#       containerPort: 4181
-
-## @param existingSecret Name of existing secret to use for default pgadmin credentials. `env.password` will be ignored and picked up from this secret.
-##
-existingSecret: ""
-## @param secretKeys.pgadminPasswordKey Name of key in existing secret to use for default pgadmin credentials. Only used when `existingSecret` is set.
-##
-secretKeys:
-  pgadminPasswordKey: password
-
-## pgAdmin4 startup configuration
-## Values in here get injected as environment variables
-## Needed chart reinstall for apply changes
-env:
-  # can be email or nickname
-  email: {{ .Values.globals.pgadmin4.email }}
-  password: {{ .Values.globals.pgadmin4.password }}
-  # pgpassfile: /var/lib/pgadmin/storage/pgadmin/file.pgpass
-
-  # set context path for application (e.g. /pgadmin4/*)
-  # contextPath: /pgadmin4
-
-  ## If True, allows pgAdmin4 to create session cookies based on IP address
-  ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html
-  ##
-  enhanced_cookie_protection: "False"
-
-  ## Add custom environment variables that will be injected to deployment
-  ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html
-  ##
-  variables: []
-  # - name: PGADMIN_LISTEN_ADDRESS
-  #   value: "0.0.0.0"
-  # - name: PGADMIN_LISTEN_PORT
-  #   value: "8080"
-
-## Additional environment variables from ConfigMaps
-envVarsFromConfigMaps: []
-  # - array-of
-  # - config-map-names
-
-## Additional environment variables from Secrets
-envVarsFromSecrets: []
-  # - array-of
-  # - secret-names
-
-## Additional environment variables
-envVarsExtra: []
-  # - name: POSTGRES_USERNAME
-  #   valueFrom:
-  #     secretKeyRef:
-  #       name: pgadmin.pgadmin-db.credentials.postgresql.acid.zalan.do
-  #       key: username
-  # - name: POSTGRES_PASSWORD
-  #   valueFrom:
-  #     secretKeyRef:
-  #       name: pgadmin.pgadmin-db.credentials.postgresql.acid.zalan.do
-  #       key: password
-
-persistentVolume:
-  ## If true, pgAdmin4 will create/use a Persistent Volume Claim
-  ## If false, use emptyDir
-  ##
-  enabled: true
-
-  ## pgAdmin4 Persistent Volume Claim annotations
-  ##
-  annotations: {}
-
-  ## pgAdmin4 Persistent Volume access modes
-  ## Must match those of existing PV or dynamic provisioner
-  ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
-  accessModes:
-    - {{ .Values.globals.pgadmin4.accessMode }}
-
-  ## pgAdmin4 Persistent Volume Size
-  ##
-  size: {{ .Values.globals.pgadmin4.storageSize }}
-
-  ## pgAdmin4 Persistent Volume Storage Class
-  ## If defined, storageClassName: <storageClass>
-  ## If set to "-", storageClassName: "", which disables dynamic provisioning
-  ## If undefined (the default) or set to null, no storageClassName spec is
-  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-  ##   GKE, AWS & OpenStack)
-  ##
-  storageClass: {{ .Values.globals.pgadmin4.storageClass }}
-  # existingClaim: ""
-
-  ## Subdirectory of pgAdmin4 Persistent Volume to mount
-  ## Useful if the volume's root directory is not empty
-  ##
-  subPath: ""
-
-## Additional volumes to be added to the deployment
-##
-extraVolumes: []
-
-## Security context to be added to pgAdmin4 pods
-##
-securityContext:
-  runAsUser: 5050
-  runAsGroup: 5050
-  fsGroup: 5050
-
-containerSecurityContext:
-  enabled: false
-  allowPrivilegeEscalation: false
-
-## pgAdmin4 readiness and liveness probe initial delay and timeout
-## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
-##
-livenessProbe:
-  initialDelaySeconds: 30
-  periodSeconds: 60
-  timeoutSeconds: 15
-  successThreshold: 1
-  failureThreshold: 3
-
-readinessProbe:
-  initialDelaySeconds: 30
-  periodSeconds: 60
-  timeoutSeconds: 15
-  successThreshold: 1
-  failureThreshold: 3
-
-## Required to be enabled pre pgAdmin4 4.16 release, to set the ACL on /var/lib/pgadmin.
-## Ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-##
-VolumePermissions:
-  ## If true, enables an InitContainer to set permissions on /var/lib/pgadmin.
-  ##
-  enabled: false
-
-## @param extraDeploy list of extra manifests to deploy
-##
-extraDeploy: []
-
-## Additional InitContainers to initialize the pod
-##
-extraInitContainers: |
-#   - name: add-folder-for-pgpass
-#     image: "dpage/pgadmin4:latest"
-#     command: ["/bin/mkdir", "-p", "/var/lib/pgadmin/storage/pgadmin"]
-#     volumeMounts:
-#       - name: pgadmin-data
-#         mountPath: /var/lib/pgadmin
-#     securityContext:
-#       runAsUser: 5050
-
-containerPorts:
-  http: 80
-
-resources: {}
-  # We usually recommend not to specify default resources and to leave this as a conscious
-  # choice for the user. This also increases chances charts run on environments with little
-  # resources, such as Minikube. If you do want to specify resources, uncomment the following
-  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-  # limits:
-  #   cpu: 100m
-  #   memory: 128Mi
-  # requests:
-  #   cpu: 100m
-  #   memory: 128Mi
-
-## Horizontal Pod Autoscaling
-## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
-#
-autoscaling:
-  enabled: false
-  minReplicas: 1
-  maxReplicas: 100
-  targetCPUUtilizationPercentage: 80
-  # targetMemoryUtilizationPercentage: 80
-
-## Node labels for pgAdmin4 pod assignment
-## Ref: https://kubernetes.io/docs/user-guide/node-selection/
-##
-nodeSelector: {}
-
-## Node tolerations for server scheduling to nodes with taints
-## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-##
-tolerations: []
-
-## Pod affinity
-##
-affinity: {}
-
-## Pod DNS Policy
-## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
-
-dnsPolicy: ""
-
-## Update pod DNS Config
-## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
-
-dnsConfig: {}
-#  nameservers:
-#    - 192.0.2.1
-#  searches:
-#    - ns1.svc.cluster-domain.example
-#    - my.dns.search.suffix
-#  options:
-#    - name: ndots
-#      value: "2"
-#    - name: edns0
-
-## Pod annotations
-##
-podAnnotations: {}
-templatedPodAnnotations: |-
-#   checksum/configmap-oauth2:{{  `{{ include "<parent-chart-name>/templates/configmap-oauth2.yaml" $ | sha256sum }}` }}
-#   checksum/secret-oauth2: "{{ `{{ include "<parent-chart-name>/templates/secret-oauth2.yaml" $ | sha256sum }}` }}"
-#   checksum/secret-pgpass: "{{ `{{ include "<parent-chart-name>/templates/secret-pgpass.yaml" $ | sha256sum }}` }}"
-
-## Pod labels
-##
-podLabels: {}
-  # key1: value1
-  # key2: value2
-
-# -- The name of the Namespace to deploy
-# If not set, `.Release.Namespace` is used
-namespace: null
-
-init:
-  ## Init container resources
-  ##
-  resources: {}
-
-## Define values for chart tests
-test:
-  ## Container image for test-connection.yaml
-  image:
-    registry: docker.io
-    repository: busybox
-    tag: latest
-  ## Resources request/limit for test-connection Pod
-  resources: {}
-    # limits:
-    #   cpu: 50m
-    #   memory: 32Mi
-    # requests:
-    #   cpu: 25m
-    #   memory: 16Mi
-  ## Security context for test-connection Pod
-  securityContext:
-    runAsUser: 5051
-    runAsGroup: 5051
-    fsGroup: 5051
-
diff --git a/k8s/helmfile.d/values/phpmyadmin/values.yaml.gotmpl b/k8s/helmfile.d/values/phpmyadmin/values.yaml.gotmpl
deleted file mode 100644
index 2a2dd59..0000000
--- a/k8s/helmfile.d/values/phpmyadmin/values.yaml.gotmpl
+++ /dev/null
@@ -1,811 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
-
-## @param global.imageRegistry Global Docker image registry
-## @param global.imagePullSecrets Global Docker registry secret names as an array
-## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-##
-global:
-  imageRegistry: ""
-  ## E.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  defaultStorageClass: {{ .Values.globals.phpmyadmin.storageClass }}
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: disabled
-## @section Common parameters
-
-## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
-##
-kubeVersion: ""
-## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override common.names.fullname template
-##
-fullnameOverride: ""
-## @param commonLabels Add labels to all the deployed resources
-##
-commonLabels: {}
-## @param commonAnnotations Add annotations to all the deployed resources
-##
-commonAnnotations: {}
-## @param clusterDomain Kubernetes Cluster Domain
-##
-clusterDomain: cluster.local
-## @param extraDeploy Array of extra objects to deploy with the release
-##
-extraDeploy: []
-## @section phpMyAdmin parameters
-
-## Bitnami PhpMyAdmin image version
-## ref: https://hub.docker.com/r/bitnami/phpmyadmin/tags/
-## @param image.registry [default: REGISTRY_NAME] phpMyAdmin image registry
-## @param image.repository [default: REPOSITORY_NAME/phpmyadmin] phpMyAdmin image repository
-## @skip image.tag phpMyAdmin image tag (immutable tags are recommended)
-## @param image.digest phpMyAdmin image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-## @param image.pullPolicy Image pull policy
-## @param image.pullSecrets Specify docker-registry secret names as an array
-## @param image.debug Enable phpmyadmin image debug mode
-##
-image:
-  registry: docker.io
-  repository: bitnami/phpmyadmin
-  tag: 5.2.2-debian-12-r0
-  digest: ""
-  ## Specify a imagePullPolicy
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ## Example:
-  ## pullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  pullSecrets: []
-  ## Enable debug mode
-  ##
-  debug: false
-## @param command Override default container command (useful when using custom images)
-##
-command: []
-## @param args Override default container args (useful when using custom images)
-##
-args: []
-## @param lifecycleHooks for the phpmyadmin container(s) to automate configuration before or after startup
-##
-lifecycleHooks: {}
-## @param extraEnvVars Extra environment variables to be set on PhpMyAdmin container
-## For example:
-## extraEnvVars:
-## - name: PHP_UPLOAD_MAX_FILESIZE
-##   value: "80M"
-##
-extraEnvVars: []
-## @param extraEnvVarsCM Name of a existing ConfigMap containing extra env vars
-##
-extraEnvVarsCM: ""
-## @param extraEnvVarsSecret Name of a existing Secret containing extra env vars
-##
-extraEnvVarsSecret: ""
-## @section phpMyAdmin deployment parameters
-
-## @param automountServiceAccountToken Mount Service Account token in pod
-##
-automountServiceAccountToken: false
-## @param hostAliases [array] Deployment pod host aliases
-## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-##
-hostAliases:
-  ## Necessary for apache-exporter to work
-  ##
-  - ip: "127.0.0.1"
-    hostnames:
-      - "status.localhost"
-## phpMyAdmin container ports to open
-## @param containerPorts.http HTTP port to expose at container level
-## @param containerPorts.https HTTPS port to expose at container level
-##
-containerPorts:
-  http: 8080
-  https: 8443
-## @param extraContainerPorts Optionally specify extra list of additional ports for phpMyAdmin container(s)
-## e.g:
-## extraContainerPorts:
-##   - name: myservice
-##     containerPort: 9090
-##
-extraContainerPorts: []
-## @param updateStrategy.type Strategy to use to update Pods
-##
-updateStrategy:
-  ## StrategyType
-  ## Can be set to RollingUpdate or OnDelete
-  ##
-  type: RollingUpdate
-## phpMyAdmin pods' Security Context
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-## @param podSecurityContext.enabled Enable phpMyAdmin pods' Security Context
-## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
-## @param podSecurityContext.supplementalGroups Set filesystem extra groups
-## @param podSecurityContext.fsGroup User ID for the container
-##
-podSecurityContext:
-  enabled: true
-  fsGroupChangePolicy: Always
-  sysctls: []
-  supplementalGroups: []
-  fsGroup: 1001
-## phpMyAdmin containers' Security Context (only main container)
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-## @param containerSecurityContext.enabled Enabled containers' Security Context
-## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-## @param containerSecurityContext.privileged Set container's Security Context privileged
-## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
-## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-##
-containerSecurityContext:
-  enabled: true
-  seLinuxOptions: {}
-  runAsUser: 1001
-  runAsGroup: 1001
-  runAsNonRoot: true
-  privileged: false
-  readOnlyRootFilesystem: true
-  allowPrivilegeEscalation: false
-  capabilities:
-    drop: ["ALL"]
-  seccompProfile:
-    type: "RuntimeDefault"
-## phpMyAdmin number of pod replicas
-## @param replicas Number of replicas
-replicas: 1
-## phpMyAdmin containers' resource requests and limits
-## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-## We usually recommend not to specify default resources and to leave this as a conscious
-## choice for the user. This also increases chances charts run on environments with little
-## resources, such as Minikube. If you do want to specify resources, uncomment the following
-## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
-## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-##
-resourcesPreset: "micro"
-## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-## Example:
-## resources:
-##   requests:
-##     cpu: 2
-##     memory: 512Mi
-##   limits:
-##     cpu: 3
-##     memory: 1024Mi
-##
-resources: {}
-## phpMyAdmin containers' startup probe. Evaluated as a template.
-## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-## @param startupProbe.enabled Enable startupProbe
-## @param startupProbe.httpGet.path Request path for startupProbe
-## @param startupProbe.httpGet.port Port for startupProbe
-## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-## @param startupProbe.periodSeconds Period seconds for startupProbe
-## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
-## @param startupProbe.failureThreshold Failure threshold for startupProbe
-## @param startupProbe.successThreshold Success threshold for startupProbe
-##
-startupProbe:
-  enabled: false
-  initialDelaySeconds: 30
-  timeoutSeconds: 30
-  periodSeconds: 10
-  successThreshold: 1
-  failureThreshold: 6
-  httpGet:
-    path: /
-    port: http
-## phpMyAdmin containers' liveness probe. Evaluated as a template.
-## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-## @param livenessProbe.enabled Enable livenessProbe
-## @param livenessProbe.tcpSocket.port Port for livenessProbe
-## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-## @param livenessProbe.periodSeconds Period seconds for livenessProbe
-## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
-## @param livenessProbe.successThreshold Success threshold for livenessProbe
-##
-livenessProbe:
-  enabled: true
-  initialDelaySeconds: 30
-  timeoutSeconds: 30
-  periodSeconds: 10
-  successThreshold: 1
-  failureThreshold: 6
-  tcpSocket:
-    port: http
-## phpMyAdmin containers' readiness probes. Evaluated as a template.
-## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
-## @param readinessProbe.enabled Enable readinessProbe
-## @param readinessProbe.httpGet.path Request path for readinessProbe
-## @param readinessProbe.httpGet.port Port for readinessProbe
-## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-## @param readinessProbe.periodSeconds Period seconds for readinessProbe
-## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
-## @param readinessProbe.successThreshold Success threshold for readinessProbe
-##
-readinessProbe:
-  enabled: true
-  initialDelaySeconds: 30
-  timeoutSeconds: 30
-  periodSeconds: 10
-  successThreshold: 1
-  failureThreshold: 6
-  httpGet:
-    path: /
-    port: http
-## @param customStartupProbe Override default startup probe
-##
-customStartupProbe: {}
-## @param customLivenessProbe Override default liveness probe
-##
-customLivenessProbe: {}
-## @param customReadinessProbe Override default readiness probe
-##
-customReadinessProbe: {}
-## @param podLabels Extra labels for PhpMyAdmin pods
-## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-##
-podLabels: {}
-## @param podAnnotations Annotations for PhpMyAdmin pods
-## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-##
-podAnnotations: {}
-## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-##
-podAffinityPreset: ""
-## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-##
-podAntiAffinityPreset: soft
-## Node affinity preset
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-##
-nodeAffinityPreset:
-  ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
-  ##
-  type: ""
-  ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set.
-  ## E.g.
-  ## key: "kubernetes.io/e2e-az-name"
-  ##
-  key: ""
-  ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
-  ## E.g.
-  ## values:
-  ##   - e2e-az1
-  ##   - e2e-az2
-  ##
-  values: []
-## @param affinity Affinity for pod assignment. Evaluated as a template.
-## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
-##
-affinity: {}
-## @param nodeSelector Node labels for pod assignment. Evaluated as a template.
-## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-##
-nodeSelector: {}
-## @param tolerations Tolerations for pod assignment. Evaluated as a template.
-## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-##
-tolerations: []
-## @param priorityClassName phpmyadmin pods' priorityClassName
-##
-priorityClassName: ""
-## @param schedulerName Name of the k8s scheduler (other than default)
-## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-##
-schedulerName: ""
-## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
-## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-## The value is evaluated as a template
-##
-topologySpreadConstraints: []
-## @param extraVolumes Optionally specify extra list of additional volumes for PhpMyAdmin pods
-##
-extraVolumes: []
-## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for PhpMyAdmin container(s)
-##
-extraVolumeMounts: []
-## @param initContainers Add init containers to the PhpMyAdmin pods
-## Example:
-## initContainers:
-##   - name: your-image-name
-##     image: your-image
-##     imagePullPolicy: Always
-##     ports:
-##       - name: portname
-##         containerPort: 1234
-##
-initContainers: []
-## Pod Disruption Budget configuration
-## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-## @param pdb.create Enable/disable a Pod Disruption Budget creation
-## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
-##
-pdb:
-  create: true
-  minAvailable: ""
-  maxUnavailable: ""
-## @param sidecars Add sidecar containers to the PhpMyAdmin pods
-## Example:
-## sidecars:
-##   - name: your-image-name
-##     image: your-image
-##     imagePullPolicy: Always
-##     ports:
-##       - name: portname
-##         containerPort: 1234
-##
-sidecars: []
-## @section Traffic Exposure parameters
-
-## Service configuration
-##
-service:
-  ## @param service.type Kubernetes Service type
-  ##
-  type: ClusterIP
-  ## @param service.ports.http Service HTTP port
-  ## @param service.ports.https Service HTTPS port
-  ##
-  ports:
-    http: 80
-    https: 443
-  ## Specify the nodePort values for the LoadBalancer and NodePort service types
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-  ## @param service.nodePorts.http Kubernetes http node port
-  ## @param service.nodePorts.https Kubernetes https node port
-  ##
-  nodePorts:
-    http: ""
-    https: ""
-  ## @param service.clusterIP PhpMyAdmin service clusterIP IP
-  ## e.g:
-  ## clusterIP: None
-  ##
-  clusterIP: ""
-  ## @param service.loadBalancerIP Load balancer IP for the phpMyAdmin Service (optional, cloud specific)
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-  ##
-  loadBalancerIP: ""
-  ## @param service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
-  ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-  ## Example:
-  ## loadBalancerSourceRanges:
-  ##   - 10.10.10.0/24
-  ##
-  loadBalancerSourceRanges: []
-  ## @param service.externalTrafficPolicy Enable client source IP preservation
-  ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-  ##
-  externalTrafficPolicy: Cluster
-  ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-  ##
-  extraPorts: []
-  ## @param service.annotations Provide any additional annotations that may be required for the PhpMyAdmin service
-  ##
-  annotations: {}
-  ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-  ## If "ClientIP", consecutive client requests will be directed to the same Pod
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-  ##
-  sessionAffinity: None
-  ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
-  ## sessionAffinityConfig:
-  ##   clientIP:
-  ##     timeoutSeconds: 300
-  ##
-  sessionAffinityConfig: {}
-## Ingress configuration
-##
-ingress:
-  ## @param ingress.enabled Set to true to enable ingress record generation
-  ##
-  enabled: true
-  ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
-  ##
-  apiVersion: ""
-  ## DEPRECATED: Use ingress.annotations instead of ingress.certManager
-  ## certManager: false
-  ##
-
-  ## @param ingress.hostname When the ingress is enabled, a host pointing to this will be created
-  ##
-  hostname: {{ .Values.globals.phpmyadmin.hostname }}
-  ## @param ingress.pathType Ingress path type
-  ##
-  pathType: ImplementationSpecific
-  ## @param ingress.path Default path for the ingress record
-  ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
-  ##
-  path: /
-  ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
-  ## e.g:
-  ## extraPaths:
-  ## - path: /*
-  ##   backend:
-  ##     serviceName: ssl-redirect
-  ##     servicePort: use-annotation
-  ##
-  extraPaths: []
-  ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
-  ## For a full list of possible ingress annotations, please see
-  ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
-  ## Use this parameter to set the required annotations for cert-manager, see
-  ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
-  ##
-  ## e.g:
-  ## annotations:
-  ##   kubernetes.io/ingress.class: nginx
-  ##   cert-manager.io/cluster-issuer: cluster-issuer-name
-  ##
-  annotations:
-    kubernetes.io/ingress.class: {{ .Values.globals.phpmyadmin.ingressClass }}
-    cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-  ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter
-  ## TLS certificates will be retrieved from a TLS secret with name: \{\{- printf "%s-tls" .Values.ingress.hostname \}\}
-  ## You can use the ingress.secrets parameter to create this TLS secret, relay on cert-manager to create it, or
-  ## let the chart create self-signed certificates for you
-  ##
-  tls: true
-  ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
-  ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
-  ## Example:
-  ## extraHosts:
-  ##   - name: phpmyadmin.local
-  ##     path: /
-  ##
-  extraHosts: []
-  ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
-  ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
-  ## Example:
-  ## extraTls:
-  ## - hosts:
-  ##     - phpmyadmin.local
-  ##   secretName: phpmyadmin.local-tls
-  ##
-  extraTls: []
-  ## @param ingress.secrets If you're providing your own certificates and want to manage the secret via helm,
-  ## please use this to add the certificates as secrets key and certificate should start with
-  ## -----BEGIN CERTIFICATE----- or REDACTED
-  ## name should line up with a secretName set further up
-  ##
-  ## If it is not set and you're using cert-manager, this is unneeded, as it will create the secret for you
-  ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created
-  ## It is also possible to create and manage the certificates outside of this helm chart
-  ## Please see README.md for more information
-  ##
-  ## Example
-  ## secrets:
-  ##   - name: phpmyadmin.local-tls
-  ##     key: ""
-  ##     certificate: ""
-  ##
-  secrets: []
-  ## @param ingress.existingSecretName If you're providing your own certificate and want to manage the secret yourself,
-  ## please provide the name of the secret with this parameter. This secret will then be used for tls termination.
-  ## It has higher priority than the cert-manager or the generation of the certificate from the chart.
-  ##
-  ## Example:
-  ## existingSecretName: "byo-phpmyadmin-tls"
-  ##
-  existingSecretName: ""
-  ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
-  ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
-  ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
-  ##
-  ingressClassName: ""
-  ## @param ingress.extraRules Additional rules to be covered with this ingress record
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
-  ## e.g:
-  ## extraRules:
-  ## - host: phpmyadmin.local
-  ##     http:
-  ##       path: /
-  ##       backend:
-  ##         service:
-  ##           name: phpmyadmin-svc
-  ##           port:
-  ##             name: http
-  ##
-  extraRules: []
-## @section Database parameters
-
-## Database configuration
-##
-db:
-  ## @param db.allowArbitraryServer Enable connection to arbitrary MySQL server
-  ## If you do not want the user to be able to specify an arbitrary MySQL server at login time, set this to false
-  ##
-  allowArbitraryServer: true
-  ## @param db.port Database port to use to connect
-  ##
-  port: 3306
-  ## @param db.chartName Database suffix if included in the same release
-  ## If you are deploying phpMyAdmin as part of a release and the database is part
-  ## of the release, you can pass a suffix that will be used to find the database
-  ## in releasename-dbSuffix. Please note that this setting precedes db.host
-  ## e.g:
-  ## chartName: mariadb
-  ##
-  chartName: ""
-  ## @param db.host Database Hostname. Ignored when `db.chartName` is set.
-  ## e.g:
-  ## host: foo
-  ##
-  host: "mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local"
-  ## @param db.bundleTestDB Deploy a MariaDB instance for testing purposes
-  ##
-  bundleTestDB: false
-  ## @param db.enableSsl Enable SSL for the connection between phpMyAdmin and the database
-  ##
-  enableSsl: false
-  ssl:
-    ## @param db.ssl.clientKey Client key file when using SSL
-    ##
-    clientKey: ""
-    ## @param db.ssl.clientCertificate Client certificate file when using SSL
-    ##
-    clientCertificate: ""
-    ## @param db.ssl.caCertificate CA file when using SSL
-    ##
-    caCertificate: ""
-    ## @param db.ssl.ciphers List of allowable ciphers for connections when using SSL
-    ##
-    ciphers: []
-    ## @param db.ssl.verify Enable SSL certificate validation
-    ##
-    verify: true
-## @param mariadb MariaDB chart configuration
-## https://github.com/bitnami/charts/blob/main/bitnami/mariadb/values.yaml
-##
-mariadb: {}
-## @section Other Parameters
-
-## Service account for PhpMyAdmin to use.
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-##
-serviceAccount:
-  ## @param serviceAccount.create Enable creation of ServiceAccount for PhpMyAdmin pod
-  ##
-  create: true
-  ## @param serviceAccount.name The name of the ServiceAccount to use.
-  ## If not set and create is true, a name is generated using the common.names.fullname template
-  ##
-  name: ""
-  ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-  ## Can be set to false if pods using this serviceAccount do not need to use K8s API
-  ##
-  automountServiceAccountToken: false
-  ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
-  ##
-  annotations: {}
-## @section Metrics parameters
-
-## Prometheus Exporter / Metrics
-##
-metrics:
-  ## @param metrics.enabled Start a side-car prometheus exporter
-  ##
-  enabled: true
-  ## @param metrics.image.registry [default: REGISTRY_NAME] Apache exporter image registry
-  ## @param metrics.image.repository [default: REPOSITORY_NAME/apache-exporter] Apache exporter image repository
-  ## @skip metrics.image.tag Apache exporter image tag (immutable tags are recommended)
-  ## @param metrics.image.digest Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param metrics.image.pullPolicy Image pull policy
-  ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/apache-exporter
-    tag: 1.0.9-debian-12-r8
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## Example:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ##
-  resources: {}
-  ## Prometheus Exporter service configuration
-  ##
-  service:
-    ## @param metrics.service.type Prometheus metrics service type
-    ##
-    type: ClusterIP
-    ## @param metrics.service.port Prometheus metrics service port
-    ##
-    port: 9117
-    ## @param metrics.service.annotations [object] Annotations for Prometheus metrics service
-    ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-    annotations:
-      prometheus.io/scrape: "true"
-      prometheus.io/port: "{{ `{{ .Values.metrics.service.port }}` }}"
-    ## @param metrics.service.clusterIP phpmyadmin service Cluster IP
-    ## e.g.:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param metrics.service.loadBalancerIP Load Balancer IP if the Prometheus metrics server type is `LoadBalancer`
-    ## Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank
-    ##
-    loadBalancerIP: ""
-    ## @param metrics.service.loadBalancerSourceRanges phpmyadmin service Load Balancer sources
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g:
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param metrics.service.externalTrafficPolicy phpmyadmin service external traffic policy
-    ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-  ## Prometheus Service Monitor
-  ## ref: https://github.com/coreos/prometheus-operator
-  ##      https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-  ##
-  serviceMonitor:
-    ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
-    ##
-    enabled: false
-    ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created
-    ##
-    namespace: ""
-    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-    ##
-    jobLabel: ""
-    ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped
-    ##
-    interval: 30s
-    ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
-    ## e.g:
-    ## scrapeTimeout: 30s
-    ##
-    scrapeTimeout: ""
-    ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-    ##
-    relabelings: []
-    ## @param metrics.serviceMonitor.metricRelabelings Specify Metric Relabelings to add to the scrape endpoint
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
-    ##
-    metricRelabelings: []
-    ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor
-    ##
-    labels: {}
-    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
-    ##
-    honorLabels: false
-    ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels
-    ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-    ##
-    ## selector:
-    ##   prometheus: my-prometheus
-    ##
-    selector: {}
-## @section NetworkPolicy parameters
-
-## Network Policy configuration
-## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-##
-networkPolicy:
-  ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-  ##
-  enabled: true
-  ## @param networkPolicy.allowExternal Don't require server label for connections
-  ## The Policy model to apply. When set to false, only pods with the correct
-  ## server label will have network access to the ports server is listening
-  ## on. When true, server will accept connections from any source
-  ## (with the correct destination port).
-  ##
-  allowExternal: true
-  ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-  ##
-  allowExternalEgress: true
-  ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraIngress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     from:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  extraIngress: []
-  ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraEgress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     to:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  ##
-  extraEgress: []
-  ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-  ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-  ##
-  ingressNSMatchLabels: {}
-  ingressNSPodMatchLabels: {}
-
diff --git a/k8s/helmfile.d/values/postgres/values.yaml.gotmpl b/k8s/helmfile.d/values/postgres/values.yaml.gotmpl
deleted file mode 100644
index 06c1261..0000000
--- a/k8s/helmfile.d/values/postgres/values.yaml.gotmpl
+++ /dev/null
@@ -1,1936 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Please, note that this will override the parameters, including dependencies, configured to use the global value
-##
-global:
-  ## @param global.imageRegistry Global Docker image registry
-  ##
-  imageRegistry: ""
-  ## @param global.imagePullSecrets Global Docker registry secret names as an array
-  ## e.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  ## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
-  ##
-  defaultStorageClass: {{ .Values.globals.postgres.storageClass }}
-  storageClass: ""
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  postgresql:
-    ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`)
-    ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
-    ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`)
-    ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
-    ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`).
-    ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
-    ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
-    ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set.
-    ##
-    auth:
-      postgresPassword: {{ .Values.globals.postgres.postgresPassword }}
-      username: {{ .Values.globals.postgres.username }}
-      password: {{ .Values.globals.postgres.password }}
-      database: auth
-      existingSecret: ""
-      secretKeys:
-        adminPasswordKey: ""
-        userPasswordKey: ""
-        replicationPasswordKey: ""
-    ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
-    ##
-    service:
-      ports:
-        postgresql: ""
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: auto
-## @section Common parameters
-##
-
-## @param kubeVersion Override Kubernetes version
-##
-kubeVersion: ""
-## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override common.names.fullname template
-##
-fullnameOverride: ""
-## @param namespaceOverride String to fully override common.names.namespace
-##
-namespaceOverride: ""
-## @param clusterDomain Kubernetes Cluster Domain
-##
-clusterDomain: cluster.local
-## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template)
-##
-extraDeploy: []
-## @param commonLabels Add labels to all the deployed resources
-##
-commonLabels: {}
-## @param commonAnnotations Add annotations to all the deployed resources
-##
-commonAnnotations: {}
-## Enable diagnostic mode in the statefulset
-##
-diagnosticMode:
-  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
-  ##
-  enabled: false
-  ## @param diagnosticMode.command Command to override all containers in the statefulset
-  ##
-  command:
-    - sleep
-  ## @param diagnosticMode.args Args to override all containers in the statefulset
-  ##
-  args:
-    - infinity
-## @section PostgreSQL common parameters
-##
-
-## Bitnami PostgreSQL image version
-## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
-## @param image.registry [default: REGISTRY_NAME] PostgreSQL image registry
-## @param image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository
-## @skip image.tag PostgreSQL image tag (immutable tags are recommended)
-## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-## @param image.pullPolicy PostgreSQL image pull policy
-## @param image.pullSecrets Specify image pull secrets
-## @param image.debug Specify if debug values should be set
-##
-image:
-  registry: docker.io
-  repository: bitnami/postgresql
-  tag: 17.2.0-debian-12-r8
-  digest: ""
-  ## Specify a imagePullPolicy
-  ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ## Example:
-  ## pullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  pullSecrets: []
-  ## Set to true if you would like to see extra information on logs
-  ##
-  debug: false
-## Authentication parameters
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run
-##
-auth:
-  ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user
-  ##
-  enablePostgresUser: true
-  ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided
-  ##
-  postgresPassword: ""
-  ## @param auth.username Name for a custom user to create
-  ##
-  username: ""
-  ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided
-  ##
-  password: ""
-  ## @param auth.database Name for a custom database to create
-  ##
-  database: ""
-  ## @param auth.replicationUsername Name of the replication user
-  ##
-  replicationUsername: repl_user
-  ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided
-  ##
-  replicationPassword: ""
-  ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case.
-  ##
-  existingSecret: ""
-  ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
-  ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
-  ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set.
-  ##
-  secretKeys:
-    adminPasswordKey: postgres-password
-    userPasswordKey: password
-    replicationPasswordKey: replication-password
-  ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable
-  ##
-  usePasswordFiles: false
-## @param architecture PostgreSQL architecture (`standalone` or `replication`)
-##
-architecture: standalone
-## Replication configuration
-## Ignored if `architecture` is `standalone`
-##
-replication:
-  ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off`
-  ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`.
-  ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT
-  ##
-  synchronousCommit: "off"
-  numSynchronousReplicas: 0
-  ## @param replication.applicationName Cluster application name. Useful for advanced replication settings
-  ##
-  applicationName: my_application
-## @param containerPorts.postgresql PostgreSQL container port
-##
-containerPorts:
-  postgresql: 5432
-## Audit settings
-## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing
-## @param audit.logHostname Log client hostnames
-## @param audit.logConnections Add client log-in operations to the log file
-## @param audit.logDisconnections Add client log-outs operations to the log file
-## @param audit.pgAuditLog Add operations to log using the pgAudit extension
-## @param audit.pgAuditLogCatalog Log catalog using pgAudit
-## @param audit.clientMinMessages Message log level to share with the user
-## @param audit.logLinePrefix Template for log line prefix (default if not set)
-## @param audit.logTimezone Timezone for the log timestamps
-##
-audit:
-  logHostname: false
-  logConnections: false
-  logDisconnections: false
-  pgAuditLog: ""
-  pgAuditLogCatalog: "off"
-  clientMinMessages: error
-  logLinePrefix: ""
-  logTimezone: ""
-## LDAP configuration
-## @param ldap.enabled Enable LDAP support
-## @param ldap.server IP address or name of the LDAP server.
-## @param ldap.port Port number on the LDAP server to connect to
-## @param ldap.prefix String to prepend to the user name when forming the DN to bind
-## @param ldap.suffix String to append to the user name when forming the DN to bind
-## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead
-## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead
-## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead
-## @param ldap.basedn Root DN to begin the search for the user in
-## @param ldap.binddn DN of user to bind to LDAP
-## @param ldap.bindpw Password for the user to bind to LDAP
-## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead
-## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead
-## @param ldap.searchAttribute Attribute to match against the user name in the search
-## @param ldap.searchFilter The search filter to use when doing search+bind authentication
-## @param ldap.scheme Set to `ldaps` to use LDAPS
-## DEPRECATED ldap.tls as string is deprecated, please use 'ldap.tls.enabled' instead
-## @param ldap.tls.enabled Se to true to enable TLS encryption
-##
-ldap:
-  enabled: false
-  server: ""
-  port: ""
-  prefix: ""
-  suffix: ""
-  basedn: ""
-  binddn: ""
-  bindpw: ""
-  searchAttribute: ""
-  searchFilter: ""
-  scheme: ""
-  tls:
-    enabled: false
-  ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored.
-  ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html
-  ##
-  uri: ""
-## @param postgresqlDataDir PostgreSQL data dir folder
-##
-postgresqlDataDir: /bitnami/postgresql/data
-## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list)
-##
-postgresqlSharedPreloadLibraries: "pgaudit"
-## Start PostgreSQL pod(s) without limitations on shm memory.
-## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M`
-## ref: https://github.com/docker-library/postgres/issues/416
-## ref: https://github.com/containerd/containerd/issues/3654
-##
-shmVolume:
-  ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s)
-  ##
-  enabled: true
-  ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs
-  ## Note: the size of the tmpfs counts against container's memory limit
-  ## e.g:
-  ## sizeLimit: 1Gi
-  ##
-  sizeLimit: ""
-## TLS configuration
-##
-tls:
-  ## @param tls.enabled Enable TLS traffic support
-  ##
-  enabled: false
-  ## @param tls.autoGenerated Generate automatically self-signed TLS certificates
-  ##
-  autoGenerated: false
-  ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's
-  ##
-  preferServerCiphers: true
-  ## @param tls.certificatesSecret Name of an existing secret that contains the certificates
-  ##
-  certificatesSecret: ""
-  ## @param tls.certFilename Certificate filename
-  ##
-  certFilename: ""
-  ## @param tls.certKeyFilename Certificate key filename
-  ##
-  certKeyFilename: ""
-  ## @param tls.certCAFilename CA Certificate filename
-  ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate
-  ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html
-  ##
-  certCAFilename: ""
-  ## @param tls.crlFilename File containing a Certificate Revocation List
-  ##
-  crlFilename: ""
-## @section PostgreSQL Primary parameters
-##
-primary:
-  ## @param primary.name Name of the primary database (eg primary, master, leader, ...)
-  ##
-  name: primary
-  ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap
-  ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
-  ##
-  configuration: ""
-  ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration
-  ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html
-  ## e.g:#
-  ## pgHbaConfiguration: |-
-  ##   local all all trust
-  ##   host all all localhost trust
-  ##   host mydatabase mysuser 192.168.0.0/24 md5
-  ##
-  pgHbaConfiguration: ""
-  ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration
-  ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored
-  ##
-  existingConfigmap: ""
-  ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration)
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
-  ##
-  extendedConfiguration: ""
-  ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration
-  ## NOTE: `primary.extendedConfiguration` will be ignored
-  ##
-  existingExtendedConfigmap: ""
-  ## Initdb configuration
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments
-  ##
-  initdb:
-    ## @param primary.initdb.args PostgreSQL initdb extra arguments
-    ##
-    args: ""
-    ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log
-    ##
-    postgresqlWalDir: "/var/log/postgres-initdb.log"
-    ## @param primary.initdb.scripts Dictionary of initdb scripts
-    ## Specify dictionary of scripts to be run at first boot
-    ## e.g:
-    ## scripts:
-    ##   my_init_script.sh: |
-    ##      #!/bin/sh
-    ##      echo "Do something."
-    ##
-    scripts:
-      # Blah, blah, blah, I know there's a `scriptsSecret` field at the bottom,
-      # unlike for MySQL. However, using that would mean I'd have to create a
-      # secret first. Since the database information are stored in helmfile
-      # configuration this means I'd have to create a chart just for the secret,
-      # which I don't wanna do.
-      # Also, unfortunately, there's no startDbScripts, so this will be only
-      # executed on the first install.
-      #
-      # Ok after playing around with this, I just cannot get this working!!!
-      # {{- range .Values.globals.postgres.databases }}
-      # create_{{ .database }}_database.sql: |
-        # CREATE USER {{ .username }} WITH ENCRYPTED PASSWORD '{{ .password }}';
-        # CREATE DATABASE {{ .database }} WITH OWNER = {{ .username }};
-      # {{- end }}
-    ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot
-    ## NOTE: This will override `primary.initdb.scripts`
-    ##
-    scriptsConfigMap: ""
-    ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information)
-    ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap`
-    ##
-    scriptsSecret: ""
-    ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts
-    ##
-    user: ""
-    ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts
-    ##
-    password: ""
-  ## Pre-init configuration
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql/#on-container-start
-  preInitDb:
-    ## @param primary.preInitDb.scripts Dictionary of pre-init scripts
-    ## Specify dictionary of shell scripts to be run before db boot
-    ## e.g:
-    ## scripts:
-    ##   my_pre_init_script.sh: |
-    ##      #!/bin/sh
-    ##      echo "Do something."
-    scripts: {}
-    ## @param primary.preInitDb.scriptsConfigMap ConfigMap with pre-init scripts to be run
-    ## NOTE: This will override `primary.preInitDb.scripts`
-    scriptsConfigMap: ""
-    ## @param primary.preInitDb.scriptsSecret Secret with pre-init scripts to be run
-    ## NOTE: This can work along `primary.preInitDb.scripts` or `primary.preInitDb.scriptsConfigMap`
-    scriptsSecret: ""
-  ## Configure current cluster's primary server to be the standby server in other cluster.
-  ## This will allow cross cluster replication and provide cross cluster high availability.
-  ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled.
-  ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not
-  ## @param primary.standby.primaryHost The Host of replication primary in the other cluster
-  ## @param primary.standby.primaryPort The Port of replication primary in the other cluster
-  ##
-  standby:
-    enabled: false
-    primaryHost: ""
-    primaryPort: ""
-  ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param primary.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param primary.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers
-  ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 30
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers
-  ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers
-  ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param primary.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 30
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param primary.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## PostgreSQL Primary resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Pod Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-  ## @param primary.podSecurityContext.enabled Enable security context
-  ## @param primary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param primary.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param primary.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param primary.podSecurityContext.fsGroup Group ID for the pod
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-  ## @param primary.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param primary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param primary.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param primary.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param primary.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param primary.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param primary.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param primary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param primary.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param primary.hostAliases PostgreSQL primary pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary)
-  ##
-  hostNetwork: false
-  ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
-  ##
-  hostIPC: false
-  ## @param primary.labels Map of labels to add to the statefulset (postgresql primary)
-  ##
-  labels: {}
-  ## @param primary.annotations Annotations for PostgreSQL primary pods
-  ##
-  annotations: {}
-  ## @param primary.podLabels Map of labels to add to the pods (postgresql primary)
-  ##
-  podLabels: {}
-  ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary)
-  ##
-  podAnnotations: {}
-  ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## PostgreSQL Primary node affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param primary.affinity Affinity for PostgreSQL primary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary)
-  ##
-  priorityClassName: ""
-  ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
-  ##
-  terminationGracePeriodSeconds: ""
-  ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type
-  ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    type: RollingUpdate
-    rollingUpdate: {}
-  ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s)
-  ##
-  extraVolumes: []
-  ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s)
-  ## For example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s)
-  ## Example
-  ##
-  ## initContainers:
-  ##   - name: do-something
-  ##     image: busybox
-  ##     command: ['do', 'something']
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param primary.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param primary.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s)
-  ##
-  extraPodSpec: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param primary.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param primary.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param primary.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param primary.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param primary.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param primary.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param primary.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## PostgreSQL Primary service configuration
-  ##
-  service:
-    ## @param primary.service.type Kubernetes Service type
-    ##
-    type: ClusterIP
-    ## @param primary.service.ports.postgresql PostgreSQL service port
-    ##
-    ports:
-      postgresql: 5432
-    ## Node ports to expose
-    ## NOTE: choose port between <30000-32767>
-    ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ##
-    nodePorts:
-      postgresql: ""
-    ## @param primary.service.clusterIP Static clusterIP or None for headless services
-    ## e.g:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param primary.service.annotations Annotations for PostgreSQL primary service
-    ##
-    annotations: {}
-    ## @param primary.service.loadBalancerClass Load balancer class if service type is `LoadBalancer`
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
-    ##
-    loadBalancerClass: ""
-    ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
-    ## Set the LoadBalancer service type to internal only
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param primary.service.externalTrafficPolicy Enable client source IP preservation
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ##
-    ## loadBalancerSourceRanges:
-    ## - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service
-    ##
-    extraPorts: []
-    ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-    ## Headless service properties
-    ##
-    headless:
-      ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service
-      ##
-      annotations: {}
-  ## PostgreSQL Primary persistence configuration
-  ##
-  persistence:
-    ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC
-    ##
-    enabled: true
-    ## @param primary.persistence.volumeName Name to assign the volume
-    ##
-    volumeName: "data"
-    ## @param primary.persistence.existingClaim Name of an existing PVC to use
-    ##
-    existingClaim: ""
-    ## @param primary.persistence.mountPath The path the volume will be mounted at
-    ## Note: useful when using custom PostgreSQL images
-    ##
-    mountPath: /bitnami/postgresql
-    ## @param primary.persistence.subPath The subdirectory of the volume to mount to
-    ## Useful in dev environments and one PV for multiple services
-    ##
-    subPath: ""
-    ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume
-    ## If defined, storageClassName: <storageClass>
-    ## If set to "-", storageClassName: "", which disables dynamic provisioning
-    ## If undefined (the default) or set to null, no storageClassName spec is
-    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-    ##   GKE, AWS & OpenStack)
-    ##
-    storageClass: ""
-    ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume
-    ##
-    accessModes:
-      - ReadWriteOnce
-    ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume
-    ##
-    size: 8Gi
-    ## @param primary.persistence.annotations Annotations for the PVC
-    ##
-    annotations: {}
-    ## @param primary.persistence.labels Labels for the PVC
-    ##
-    labels: {}
-    ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
-    ## selector:
-    ##   matchLabels:
-    ##     app: my-app
-    ##
-    selector: {}
-    ## @param primary.persistence.dataSource Custom PVC data source
-    ##
-    dataSource: {}
-  ## PostgreSQL Primary Persistent Volume Claim Retention Policy
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-  ##
-  persistentVolumeClaimRetentionPolicy:
-    ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset
-    ##
-    enabled: false
-    ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
-    ##
-    whenScaled: Retain
-    ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
-    ##
-    whenDeleted: Retain
-## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
-##
-readReplicas:
-  ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...)
-  ##
-  name: read
-  ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas
-  ##
-  replicaCount: 1
-  ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration)
-  ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
-  ##
-  extendedConfiguration: ""
-  ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param readReplicas.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param readReplicas.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers
-  ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 30
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers
-  ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers
-  ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 30
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## PostgreSQL read only resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param readReplicas.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if readReplicas.resources is set (readReplicas.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param readReplicas.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Pod Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-  ## @param readReplicas.podSecurityContext.enabled Enable security context
-  ## @param readReplicas.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param readReplicas.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param readReplicas.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-  ## @param readReplicas.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param readReplicas.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param readReplicas.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param readReplicas.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param readReplicas.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param readReplicas.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param readReplicas.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param readReplicas.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param readReplicas.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## @param readReplicas.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only)
-  ##
-  hostNetwork: false
-  ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary)
-  ##
-  hostIPC: false
-  ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only)
-  ##
-  labels: {}
-  ## @param readReplicas.annotations Annotations for PostgreSQL read only pods
-  ##
-  annotations: {}
-  ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only)
-  ##
-  podLabels: {}
-  ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only)
-  ##
-  podAnnotations: {}
-  ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## PostgreSQL read only node affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set.
-    ## E.g.
-    ## key: "kubernetes.io/e2e-az-name"
-    ##
-    key: ""
-    ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set.
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template
-  ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods
-  ##
-  topologySpreadConstraints: []
-  ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only)
-  ##
-  priorityClassName: ""
-  ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork".
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
-  ##
-  terminationGracePeriodSeconds: ""
-  ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type
-  ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    type: RollingUpdate
-    rollingUpdate: {}
-  ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s)
-  ##
-  extraVolumes: []
-  ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s)
-  ## For example:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s)
-  ## Example
-  ##
-  ## initContainers:
-  ##   - name: do-something
-  ##     image: busybox
-  ##     command: ['do', 'something']
-  ##
-  initContainers: []
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param readReplicas.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param readReplicas.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
-  ## @param readReplicas.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `readReplicas.pdb.minAvailable` and `readReplicas.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s)
-  ##
-  extraPodSpec: {}
-  ## Network Policies
-  ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-  ##
-  networkPolicy:
-    ## @param readReplicas.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-    ##
-    enabled: true
-    ## @param readReplicas.networkPolicy.allowExternal Don't require server label for connections
-    ## The Policy model to apply. When set to false, only pods with the correct
-    ## server label will have network access to the ports server is listening
-    ## on. When true, server will accept connections from any source
-    ## (with the correct destination port).
-    ##
-    allowExternal: true
-    ## @param readReplicas.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-    ##
-    allowExternalEgress: true
-    ## @param readReplicas.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraIngress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     from:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    extraIngress: []
-    ## @param readReplicas.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
-    ## e.g:
-    ## extraEgress:
-    ##   - ports:
-    ##       - port: 1234
-    ##     to:
-    ##       - podSelector:
-    ##           - matchLabels:
-    ##               - role: frontend
-    ##       - podSelector:
-    ##           - matchExpressions:
-    ##               - key: role
-    ##                 operator: In
-    ##                 values:
-    ##                   - frontend
-    ##
-    extraEgress: []
-    ## @param readReplicas.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
-    ## @param readReplicas.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-  ## PostgreSQL read only service configuration
-  ##
-  service:
-    ## @param readReplicas.service.type Kubernetes Service type
-    ##
-    type: ClusterIP
-    ## @param readReplicas.service.ports.postgresql PostgreSQL service port
-    ##
-    ports:
-      postgresql: 5432
-    ## Node ports to expose
-    ## NOTE: choose port between <30000-32767>
-    ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ##
-    nodePorts:
-      postgresql: ""
-    ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services
-    ## e.g:
-    ## clusterIP: None
-    ##
-    clusterIP: ""
-    ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service
-    ##
-    annotations: {}
-    ## @param readReplicas.service.loadBalancerClass Load balancer class if service type is `LoadBalancer`
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
-    ##
-    loadBalancerClass: ""
-    ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer`
-    ## Set the LoadBalancer service type to internal only
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation
-    ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ##
-    ## loadBalancerSourceRanges:
-    ## - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service
-    ##
-    extraPorts: []
-    ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-    ## Headless service properties
-    ##
-    headless:
-      ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service
-      ##
-      annotations: {}
-  ## PostgreSQL read only persistence configuration
-  ##
-  persistence:
-    ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC
-    ##
-    enabled: true
-    ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use
-    ##
-    existingClaim: ""
-    ## @param readReplicas.persistence.mountPath The path the volume will be mounted at
-    ## Note: useful when using custom PostgreSQL images
-    ##
-    mountPath: /bitnami/postgresql
-    ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to
-    ## Useful in dev environments and one PV for multiple services
-    ##
-    subPath: ""
-    ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume
-    ## If defined, storageClassName: <storageClass>
-    ## If set to "-", storageClassName: "", which disables dynamic provisioning
-    ## If undefined (the default) or set to null, no storageClassName spec is
-    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
-    ##   GKE, AWS & OpenStack)
-    ##
-    storageClass: ""
-    ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume
-    ##
-    accessModes:
-      - ReadWriteOnce
-    ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume
-    ##
-    size: 8Gi
-    ## @param readReplicas.persistence.annotations Annotations for the PVC
-    ##
-    annotations: {}
-    ## @param readReplicas.persistence.labels Labels for the PVC
-    ##
-    labels: {}
-    ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
-    ## selector:
-    ##   matchLabels:
-    ##     app: my-app
-    ##
-    selector: {}
-    ## @param readReplicas.persistence.dataSource Custom PVC data source
-    ##
-    dataSource: {}
-  ## PostgreSQL Read only Persistent Volume Claim Retention Policy
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-  ##
-  persistentVolumeClaimRetentionPolicy:
-    ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset
-    ##
-    enabled: false
-    ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
-    ##
-    whenScaled: Retain
-    ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
-    ##
-    whenDeleted: Retain
-## @section Backup parameters
-## This section implements a trivial logical dump cronjob of the database.
-## This only comes with the consistency guarantees of the dump program.
-## This is not a snapshot based roll forward/backward recovery backup.
-## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/
-backup:
-  ## @param backup.enabled Enable the logical dump of the database "regularly"
-  enabled: false
-  cronjob:
-    ## @param backup.cronjob.schedule Set the cronjob parameter schedule
-    schedule: "@daily"
-    ## @param backup.cronjob.timeZone Set the cronjob parameter timeZone
-    timeZone: ""
-    ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy
-    concurrencyPolicy: Allow
-    ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit
-    failedJobsHistoryLimit: 1
-    ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit
-    successfulJobsHistoryLimit: 3
-    ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds
-    startingDeadlineSeconds: ""
-    ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished
-    ttlSecondsAfterFinished: ""
-    ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy
-    restartPolicy: OnFailure
-    ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup
-    ## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-    ## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-    ## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups
-    ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob
-    podSecurityContext:
-      enabled: true
-      fsGroupChangePolicy: Always
-      sysctls: []
-      supplementalGroups: []
-      fsGroup: 1001
-    ## backup container's Security Context
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-    ## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context
-    ## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-    ## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-    ## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-    ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-    ## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged
-    ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-    ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-    ## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-    ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-    containerSecurityContext:
-      enabled: true
-      seLinuxOptions: {}
-      runAsUser: 1001
-      runAsGroup: 1001
-      runAsNonRoot: true
-      privileged: false
-      readOnlyRootFilesystem: true
-      allowPrivilegeEscalation: false
-      capabilities:
-        drop: ["ALL"]
-      seccompProfile:
-        type: "RuntimeDefault"
-    ## @param backup.cronjob.command Set backup container's command to run
-    command:
-      - /bin/sh
-      - -c
-      - "pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump"
-    ## @param backup.cronjob.labels Set the cronjob labels
-    labels: {}
-    ## @param backup.cronjob.annotations Set the cronjob annotations
-    annotations: {}
-    ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/
-    ##
-    nodeSelector: {}
-    ## @param backup.cronjob.tolerations Tolerations for PostgreSQL backup CronJob pods assignment
-    ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-    ##
-    tolerations: []
-    ## backup cronjob container resource requests and limits
-    ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-    ## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if backup.cronjob.resources is set (backup.cronjob.resources is recommended for production).
-    ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-    ##
-    resourcesPreset: "nano"
-    ## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory
-    ## Example:
-    resources: {}
-    ## resources:
-    ##   requests:
-    ##     cpu: 1
-    ##     memory: 512Mi
-    ##   limits:
-    ##     cpu: 2
-    ##     memory: 1024Mi
-    networkPolicy:
-      ## @param backup.cronjob.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
-      ##
-      enabled: true
-    storage:
-      ## @param backup.cronjob.storage.enabled Enable using a `PersistentVolumeClaim` as backup data volume 
-      ##
-      enabled: true
-      ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`)
-      ## If defined, PVC must be created manually before volume will be bound
-      ##
-      existingClaim: ""
-      ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted
-      ##
-      resourcePolicy: ""
-      ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume
-      ## If defined, storageClassName: <storageClass>
-      ## If set to "-", storageClassName: "", which disables dynamic provisioning
-      ## If undefined (the default) or set to null, no storageClassName spec is
-      ## set, choosing the default provisioner.
-      ##
-      storageClass: ""
-      ## @param backup.cronjob.storage.accessModes PV Access Mode
-      ##
-      accessModes:
-        - ReadWriteOnce
-      ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume
-      ##
-      size: 8Gi
-      ## @param backup.cronjob.storage.annotations PVC annotations
-      ##
-      annotations: {}
-      ## @param backup.cronjob.storage.mountPath Path to mount the volume at
-      ##
-      mountPath: /backup/pgdump
-      ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at
-      ## and one PV for multiple services.
-      ##
-      subPath: ""
-      ## Fine tuning for volumeClaimTemplates
-      ##
-      volumeClaimTemplates:
-        ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes)
-        ## A label query over volumes to consider for binding (e.g. when using local volumes)
-        ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details
-        ##
-        selector: {}
-    ## @param backup.cronjob.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the backup container
-    ##
-    extraVolumeMounts: []
-    ## @param backup.cronjob.extraVolumes Optionally specify extra list of additional volumes for the backup container
-    ##
-    extraVolumes: []
-
-## @section Password update job
-##
-passwordUpdateJob:
-  ## @param passwordUpdateJob.enabled Enable password update job
-  ##
-  enabled: false
-  ## @param passwordUpdateJob.backoffLimit set backoff limit of the job
-  ##
-  backoffLimit: 10
-  ## @param passwordUpdateJob.command Override default container command on mysql Primary container(s) (useful when using custom images)
-  ##
-  command: []
-  ## @param passwordUpdateJob.args Override default container args on mysql Primary container(s) (useful when using custom images)
-  ##
-  args: []
-  ## @param passwordUpdateJob.extraCommands Extra commands to pass to the generation job
-  ##
-  extraCommands: ""
-  ## @param passwordUpdateJob.previousPasswords.postgresPassword Previous postgres password (set if the password secret was already changed)
-  ## @param passwordUpdateJob.previousPasswords.password Previous password (set if the password secret was already changed)
-  ## @param passwordUpdateJob.previousPasswords.replicationPassword Previous replication password (set if the password secret was already changed)
-  ## @param passwordUpdateJob.previousPasswords.existingSecret Name of a secret containing the previous passwords (set if the password secret was already changed)
-  previousPasswords:
-    postgresPassword: ""
-    password: ""
-    replicationPassword: ""
-    existingSecret: ""
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param passwordUpdateJob.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param passwordUpdateJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param passwordUpdateJob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param passwordUpdateJob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param passwordUpdateJob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param passwordUpdateJob.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param passwordUpdateJob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param passwordUpdateJob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param passwordUpdateJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param passwordUpdateJob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param passwordUpdateJob.podSecurityContext.enabled Enabled credential init job pods' Security Context
-  ## @param passwordUpdateJob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param passwordUpdateJob.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param passwordUpdateJob.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param passwordUpdateJob.podSecurityContext.fsGroup Set credential init job pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## @param passwordUpdateJob.extraEnvVars Array containing extra env vars to configure the credential init job
-  ## For example:
-  ## extraEnvVars:
-  ##  - name: GF_DEFAULT_INSTANCE_NAME
-  ##    value: my-instance
-  ##
-  extraEnvVars: []
-  ## @param passwordUpdateJob.extraEnvVarsCM ConfigMap containing extra env vars to configure the credential init job
-  ##
-  extraEnvVarsCM: ""
-  ## @param passwordUpdateJob.extraEnvVarsSecret Secret containing extra env vars to configure the credential init job (in case of sensitive data)
-  ##
-  extraEnvVarsSecret: ""
-  ## @param passwordUpdateJob.extraVolumes Optionally specify extra list of additional volumes for the credential init job
-  ##
-  extraVolumes: []
-  ## @param passwordUpdateJob.extraVolumeMounts Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`.
-  ##
-  extraVolumeMounts: []
-  ## @param passwordUpdateJob.initContainers Add additional init containers for the mysql Primary pod(s)
-  ##
-  initContainers: []
-  ## Container resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param passwordUpdateJob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if passwordUpdateJob.resources is set (passwordUpdateJob.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "micro"
-  ## @param passwordUpdateJob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## @param passwordUpdateJob.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param passwordUpdateJob.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param passwordUpdateJob.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param passwordUpdateJob.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param passwordUpdateJob.hostAliases Add deployment host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param passwordUpdateJob.annotations [object] Add annotations to the job
-  ##
-  annotations: {}
-  ## @param passwordUpdateJob.podLabels Additional pod labels
-  ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param passwordUpdateJob.podAnnotations Additional pod annotations
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-
-## @section Volume Permissions parameters
-##
-
-## Init containers parameters:
-## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node
-##
-volumePermissions:
-  ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume
-  ##
-  enabled: false
-  ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
-  ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository
-  ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended)
-  ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
-  ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r35
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## Example:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## Init container resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Init container' Security Context
-  ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser
-  ## and not the below volumePermissions.containerSecurityContext.runAsUser
-  ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
-  ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container
-  ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container
-  ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container
-  ##
-  containerSecurityContext:
-    seLinuxOptions: {}
-    runAsUser: 0
-    runAsGroup: 0
-    runAsNonRoot: false
-    seccompProfile:
-      type: RuntimeDefault
-## @section Other Parameters
-##
-
-## @param serviceBindings.enabled Create secret for service binding (Experimental)
-## Ref: https://servicebinding.io/service-provider/
-##
-serviceBindings:
-  enabled: false
-## Service account for PostgreSQL to use.
-## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
-##
-serviceAccount:
-  ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod
-  ##
-  create: true
-  ## @param serviceAccount.name The name of the ServiceAccount to use.
-  ## If not set and create is true, a name is generated using the common.names.fullname template
-  ##
-  name: ""
-  ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
-  ## Can be set to false if pods using this serviceAccount do not need to use K8s API
-  ##
-  automountServiceAccountToken: false
-  ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
-  ##
-  annotations: {}
-## Creates role for ServiceAccount
-## @param rbac.create Create Role and RoleBinding (required for PSP to work)
-##
-rbac:
-  create: false
-  ## @param rbac.rules Custom RBAC rules to set
-  ## e.g:
-  ## rules:
-  ##   - apiGroups:
-  ##       - ""
-  ##     resources:
-  ##       - pods
-  ##     verbs:
-  ##       - get
-  ##       - list
-  ##
-  rules: []
-## Pod Security Policy
-## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
-## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
-##
-psp:
-  create: false
-## @section Metrics Parameters
-##
-metrics:
-  ## @param metrics.enabled Start a prometheus exporter
-  ##
-  enabled: false
-  ## @param metrics.image.registry [default: REGISTRY_NAME] PostgreSQL Prometheus Exporter image registry
-  ## @param metrics.image.repository [default: REPOSITORY_NAME/postgres-exporter] PostgreSQL Prometheus Exporter image repository
-  ## @skip metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended)
-  ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy
-  ## @param metrics.image.pullSecrets Specify image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/postgres-exporter
-    tag: 0.16.0-debian-12-r4
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## Example:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## @param metrics.collectors Control enabled collectors
-  ## ref: https://github.com/prometheus-community/postgres_exporter#flags
-  ## Example:
-  ## collectors:
-  ##   wal: false
-  collectors: {}
-  ## @param metrics.customMetrics Define additional custom metrics
-  ## ref: https://github.com/prometheus-community/postgres_exporter#adding-new-metrics-via-a-config-file-deprecated
-  ## customMetrics:
-  ##   pg_database:
-  ##     query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
-  ##     metrics:
-  ##       - name:
-  ##           usage: "LABEL"
-  ##           description: "Name of the database"
-  ##       - size_bytes:
-  ##           usage: "GAUGE"
-  ##           description: "Size of the database in bytes"
-  ##
-  customMetrics: {}
-  ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter
-  ## see: https://github.com/prometheus-community/postgres_exporter#environment-variables
-  ## For example:
-  ##  extraEnvVars:
-  ##  - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS
-  ##    value: "true"
-  ##
-  extraEnvVars: []
-  ## PostgreSQL Prometheus exporter containers' Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context
-  ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
-  ## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
-  ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
-  ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged
-  ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
-  ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
-  ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped
-  ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    privileged: false
-    readOnlyRootFilesystem: true
-    allowPrivilegeEscalation: false
-    capabilities:
-      drop: ["ALL"]
-    seccompProfile:
-      type: "RuntimeDefault"
-  ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
-  ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers
-  ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers
-  ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 5
-    failureThreshold: 6
-    successThreshold: 1
-  ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers
-  ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 1
-    failureThreshold: 15
-    successThreshold: 1
-  ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port
-  ##
-  containerPorts:
-    metrics: 9187
-  ## PostgreSQL Prometheus exporter resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Service configuration
-  ##
-  service:
-    ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port
-    ##
-    ports:
-      metrics: 9187
-    ## @param metrics.service.clusterIP Static clusterIP or None for headless services
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
-    ##
-    clusterIP: ""
-    ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin
-    ## Values: ClientIP or None
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/
-    ##
-    sessionAffinity: None
-    ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint
-    ##
-    annotations:
-      prometheus.io/scrape: "true"
-      prometheus.io/port: "{{ `{{ .Values.metrics.service.ports.metrics }}` }}"
-  ## Prometheus Operator ServiceMonitor configuration
-  ##
-  serviceMonitor:
-    ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
-    ##
-    enabled: true
-    ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
-    ##
-    namespace: ""
-    ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-    ##
-    interval: ""
-    ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
-    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-    ##
-    scrapeTimeout: ""
-    ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
-    ##
-    labels: {}
-    ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
-    ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
-    ##
-    selector: {}
-    ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
-    ##
-    relabelings: []
-    ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion
-    ##
-    metricRelabelings: []
-    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
-    ##
-    honorLabels: false
-    ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
-    ##
-    jobLabel: ""
-  ## Custom PrometheusRule to be defined
-  ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
-  ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
-  ##
-  prometheusRule:
-    ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator
-    ##
-    enabled: false
-    ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace)
-    ##
-    namespace: ""
-    ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus
-    ##
-    labels: {}
-    ## @param metrics.prometheusRule.rules PrometheusRule definitions
-    ## Make sure to constraint the rules to the current postgresql service.
-    ## rules:
-    ##   - alert: HugeReplicationLag
-    ##     expr: pg_replication_lag{service="{{ `{{ printf "%s-metrics" (include "common.names.fullname" .) }}` }}"} / 3600 > 1
-    ##     for: 1m
-    ##     labels:
-    ##       severity: critical
-    ##     annotations:
-    ##       description: replication for {{ `{{ include "common.names.fullname" . }}`  }}PostgreSQL is lagging by {{ `{{ "{{ $value }}" }}`  }}hour(s).
-    ##       summary: PostgreSQL replication is lagging by {{ `{{ "{{ $value }}" }}`  }}hour(s).
-    ##
-    rules: []
-
diff --git a/k8s/helmfile.d/values/rancher/values.yaml.gotmpl b/k8s/helmfile.d/values/rancher/values.yaml.gotmpl
deleted file mode 100644
index 1c153d9..0000000
--- a/k8s/helmfile.d/values/rancher/values.yaml.gotmpl
+++ /dev/null
@@ -1,132 +0,0 @@
-# Additional Trusted CAs.
-# Enable this flag and add your CA certs as a secret named tls-ca-additional in the namespace.
-# See README.md for details.
-additionalTrustedCAs: false
-
-antiAffinity: preferred
-topologyKey: kubernetes.io/hostname
-
-# Audit Logs
-# Source: https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/enable-api-audit-log
-# The audit log is piped to the console of the rancher-audit-log container in the rancher pod.
-# level: Verbosity of logs, 0 to 3. 0 is off, 3 most verbose.
-# Docs: https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/enable-api-audit-log#audit-log-levels
-auditLog:
-  destination: sidecar
-  hostPath: /var/log/rancher/audit/
-  level: 0
-  maxAge: 1
-  maxBackup: 1
-  maxSize: 100
-
-  # Image for collecting rancher audit logs.
-  # Important: update pkg/image/export/resolve.go when this default image is changed, so that it's reflected accordingly in rancher-images.txt generated for air-gapped setups.
-  image:
-    repository: "rancher/mirrored-bci-micro"
-    tag: 15.6.24.2
-    # Override imagePullPolicy image
-    # options: Always, Never, IfNotPresent
-    pullPolicy: "IfNotPresent"
-
-# As of Rancher v2.5.0 this flag is deprecated and must be set to 'true' in order for Rancher to start
-addLocal: "true"
-
-# Add debug flag to Rancher server
-debug: false
-
-# When starting Rancher for the first time, bootstrap the admin as restricted-admin
-restrictedAdmin: false
-
-# Control how the Rancher agents validate TLS connections
-# Valid options: strict, or system-store
-# Note, for new installations empty will default to strict on 2.9+, or system-store on 2.8 or older
-agentTLSMode: ""
-
-# Extra environment variables passed to the rancher pods.
-# extraEnv:
-# - name: CATTLE_TLS_MIN_VERSION
-#   value: "1.0"
-
-# Fully qualified name to reach your Rancher server
-hostname: {{ .Values.globals.rancher.hostname }}
-
-### ingress ###
-# Readme for details and instruction on adding tls secrets.
-ingress:
-  # If set to false, ingress will not be created
-  # Defaults to true
-  # options: true, false
-  enabled: true
-  includeDefaultExtraAnnotations: true
-  extraAnnotations:
-    kubernetes.io/ingress.class: {{ .Values.globals.rancher.ingressClass }}
-    cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-  ingressClassName: {{ .Values.globals.rancher.ingressClass }}
-  # backend port number
-  servicePort: 80
-
-  tls:
-    # options: rancher, letsEncrypt, secret
-    source: secret
-    secretName: rancher-tls
-
-### service ###
-# Override to use NodePort or LoadBalancer service type - default is ClusterIP
-service:
-  type: ""
-  annotations: {}
-
-### LetsEncrypt config ###
-# ProTip: The production environment only allows you to register a name 5 times a week.
-#         Use staging until you have your config right.
-letsEncrypt:
-  # email: none@example.com
-  environment: {{ .Values.globals.certs.certIssuerMode }}
-  ingress:
-    # options: traefik, nginx
-    class: {{ .Values.globals.rancher.ingressClass }}
-# If you are using certs signed by a private CA set to 'true' and set the 'tls-ca'
-# in the 'rancher-system' namespace. See the README.md for details
-privateCA: false
-
-# http[s] proxy server passed into rancher server.
-# proxy: http://<username>@<password>:<url>:<port>
-
-# comma separated list of domains or ip addresses that will not use the proxy
-noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
-
-# Override rancher image location for Air Gap installs
-rancherImage: rancher/rancher
-# rancher/rancher image tag. https://hub.docker.com/r/rancher/rancher/tags/
-# Defaults to .Chart.appVersion
-# rancherImageTag: v2.0.7
-
-# Override imagePullPolicy for rancher server images
-# options: Always, Never, IfNotPresent
-# Defaults to IfNotPresent
-# rancherImagePullPolicy: <pullPolicy>
-
-# Number of Rancher server replicas. Setting to negative number will dynamically between 0 and the abs(replicas) based on available nodes.
-# of available nodes in the cluster
-replicas: {{ .Values.globals.rancher.replicas }}
-
-# Set priorityClassName to avoid eviction
-priorityClassName: rancher-critical
-
-# Set pod resource requests/limits for Rancher.
-resources: {}
-
-#
-# tls
-#   Where to offload the TLS/SSL encryption
-# - ingress (default)
-# - external
-tls: ingress
-
-systemDefaultRegistry: ""
-
-# Set to use the packaged system charts
-useBundledSystemChart: false
-
-# Set a bootstrap password. If leave empty, a random password will be generated.
-bootstrapPassword: {{ .Values.globals.rancher.bootstrapPassword }}
diff --git a/k8s/helmfile.d/values/redis/values.yaml.gotmpl b/k8s/helmfile.d/values/redis/values.yaml.gotmpl
deleted file mode 100644
index d99ff59..0000000
--- a/k8s/helmfile.d/values/redis/values.yaml.gotmpl
+++ /dev/null
@@ -1,2236 +0,0 @@
-# Copyright Broadcom, Inc. All Rights Reserved.
-# SPDX-License-Identifier: APACHE-2.0
-
-## @section Global parameters
-## Global Docker image parameters
-## Please, note that this will override the image parameters, including dependencies, configured to use the global value
-## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
-##
-
-## @param global.imageRegistry Global Docker image registry
-## @param global.imagePullSecrets Global Docker registry secret names as an array
-## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
-## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
-## @param global.redis.password Global Redis&reg; password (overrides `auth.password`)
-##
-global:
-  imageRegistry: ""
-  ## E.g.
-  ## imagePullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  imagePullSecrets: []
-  defaultStorageClass: {{ .Values.globals.redis.storageClass }}
-  storageClass: ""
-  ## Security parameters
-  ##
-  security:
-    ## @param global.security.allowInsecureImages Allows skipping image verification
-    allowInsecureImages: false
-  redis:
-    password: {{ .Values.globals.redis.password }}
-  ## Compatibility adaptations for Kubernetes platforms
-  ##
-  compatibility:
-    ## Compatibility adaptations for Openshift
-    ##
-    openshift:
-      ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
-      ##
-      adaptSecurityContext: auto
-## @section Common parameters
-##
-
-## @param kubeVersion Override Kubernetes version
-##
-kubeVersion: ""
-## @param nameOverride String to partially override common.names.fullname
-##
-nameOverride: ""
-## @param fullnameOverride String to fully override common.names.fullname
-##
-fullnameOverride: ""
-## @param namespaceOverride String to fully override common.names.namespace
-##
-namespaceOverride: ""
-## @param commonLabels Labels to add to all deployed objects
-##
-commonLabels: {}
-## @param commonAnnotations Annotations to add to all deployed objects
-##
-commonAnnotations: {}
-## @param secretAnnotations Annotations to add to secret
-##
-secretAnnotations: {}
-## @param clusterDomain Kubernetes cluster domain name
-##
-clusterDomain: cluster.local
-## @param extraDeploy Array of extra objects to deploy with the release
-##
-extraDeploy: []
-## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address
-##
-useHostnames: true
-## @param nameResolutionThreshold Failure threshold for internal hostnames resolution
-##
-nameResolutionThreshold: 5
-## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution
-##
-nameResolutionTimeout: 5
-## Enable diagnostic mode in the deployment
-##
-diagnosticMode:
-  ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
-  ##
-  enabled: false
-  ## @param diagnosticMode.command Command to override all containers in the deployment
-  ##
-  command:
-    - sleep
-  ## @param diagnosticMode.args Args to override all containers in the deployment
-  ##
-  args:
-    - infinity
-## @section Redis&reg; Image parameters
-##
-
-## Bitnami Redis&reg; image
-## ref: https://hub.docker.com/r/bitnami/redis/tags/
-## @param image.registry [default: REGISTRY_NAME] Redis&reg; image registry
-## @param image.repository [default: REPOSITORY_NAME/redis] Redis&reg; image repository
-## @skip image.tag Redis&reg; image tag (immutable tags are recommended)
-## @param image.digest Redis&reg; image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-## @param image.pullPolicy Redis&reg; image pull policy
-## @param image.pullSecrets Redis&reg; image pull secrets
-## @param image.debug Enable image debug mode
-##
-image:
-  registry: docker.io
-  repository: bitnami/redis
-  tag: 7.4.2-debian-12-r0
-  digest: ""
-  ## Specify a imagePullPolicy
-  ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-  ##
-  pullPolicy: IfNotPresent
-  ## Optionally specify an array of imagePullSecrets.
-  ## Secrets must be manually created in the namespace.
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-  ## e.g:
-  ## pullSecrets:
-  ##   - myRegistryKeySecretName
-  ##
-  pullSecrets: []
-  ## Enable debug mode
-  ##
-  debug: false
-## @section Redis&reg; common configuration parameters
-## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration
-##
-
-## @param architecture Redis&reg; architecture. Allowed values: `standalone` or `replication`
-##
-architecture: replication
-## Redis&reg; Authentication parameters
-## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run
-##
-auth:
-  ## @param auth.enabled Enable password authentication
-  ##
-  enabled: true
-  ## @param auth.sentinel Enable password authentication on sentinels too
-  ##
-  sentinel: true
-  ## @param auth.password Redis&reg; password
-  ## Defaults to a random 10-character alphanumeric string if not set
-  ##
-  password: ""
-  ## @param auth.existingSecret The name of an existing secret with Redis&reg; credentials
-  ## NOTE: When it's set, the previous `auth.password` parameter is ignored
-  ##
-  existingSecret: ""
-  ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret
-  ## NOTE: ignored unless `auth.existingSecret` parameter is set
-  ##
-  existingSecretPasswordKey: ""
-  ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable
-  ##
-  usePasswordFiles: false
-  ## @param auth.usePasswordFileFromSecret Mount password file from secret
-  ##
-  usePasswordFileFromSecret: true
-## @param commonConfiguration [string] Common configuration to be added into the ConfigMap
-## ref: https://redis.io/topics/config
-##
-commonConfiguration: |-
-  # Enable AOF https://redis.io/topics/persistence#append-only-file
-  appendonly yes
-  # Disable RDB persistence, AOF persistence already enabled.
-  save ""
-## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis&reg; nodes
-##
-existingConfigmap: ""
-## @section Redis&reg; master configuration parameters
-##
-master:
-  ## @param master.count Number of Redis&reg; master instances to deploy (experimental, requires additional configuration)
-  ##
-  count: 1
-  ## @param master.revisionHistoryLimit The number of old history to retain to allow rollback
-  ## NOTE: Explicitly setting this field to 0, will result in cleaning up all the history, breaking ability to rollback
-  revisionHistoryLimit: 10
-  ## @param master.configuration Configuration for Redis&reg; master nodes
-  ## ref: https://redis.io/topics/config
-  ##
-  configuration: ""
-  ## @param master.disableCommands Array with Redis&reg; commands to disable on master nodes
-  ## Commands will be completely disabled by renaming each to an empty string.
-  ## ref: https://redis.io/topics/security#disabling-of-specific-commands
-  ##
-  disableCommands:
-    - FLUSHDB
-    - FLUSHALL
-  ## @param master.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param master.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param master.enableServiceLinks Whether information about services should be injected into pod's environment variable
-  ##
-  enableServiceLinks: true
-  ## @param master.preExecCmds Additional commands to run prior to starting Redis&reg; master
-  ##
-  preExecCmds: []
-  ## @param master.extraFlags Array with additional command line flags for Redis&reg; master
-  ## e.g:
-  ## extraFlags:
-  ##  - "--maxmemory-policy volatile-ttl"
-  ##  - "--repl-backlog-size 1024mb"
-  ##
-  extraFlags: []
-  ## @param master.extraEnvVars Array with extra environment variables to add to Redis&reg; master nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis&reg; master nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis&reg; master nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param master.containerPorts.redis Container port to open on Redis&reg; master nodes
-  ##
-  containerPorts:
-    redis: 6379
-  ## Configure extra options for Redis&reg; containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param master.startupProbe.enabled Enable startupProbe on Redis&reg; master nodes
-  ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param master.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param master.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 20
-    periodSeconds: 5
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 5
-  ## @param master.livenessProbe.enabled Enable livenessProbe on Redis&reg; master nodes
-  ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 5
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 5
-  ## @param master.readinessProbe.enabled Enable readinessProbe on Redis&reg; master nodes
-  ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 5
-    timeoutSeconds: 1
-    successThreshold: 1
-    failureThreshold: 5
-  ## @param master.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Redis&reg; master resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param master.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param master.podSecurityContext.enabled Enabled Redis&reg; master pods' Security Context
-  ## @param master.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param master.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param master.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param master.podSecurityContext.fsGroup Set Redis&reg; master pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param master.containerSecurityContext.enabled Enabled Redis&reg; master containers' Security Context
-  ## @param master.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param master.containerSecurityContext.runAsUser Set Redis&reg; master containers' Security Context runAsUser
-  ## @param master.containerSecurityContext.runAsGroup Set Redis&reg; master containers' Security Context runAsGroup
-  ## @param master.containerSecurityContext.runAsNonRoot Set Redis&reg; master containers' Security Context runAsNonRoot
-  ## @param master.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Redis&reg; pod(s) privileges
-  ## @param master.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ## @param master.containerSecurityContext.seccompProfile.type Set Redis&reg; master containers' Security Context seccompProfile
-  ## @param master.containerSecurityContext.capabilities.drop Set Redis&reg; master containers' Security Context capabilities to drop
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    readOnlyRootFilesystem: true
-    seccompProfile:
-      type: RuntimeDefault
-    capabilities:
-      drop: ["ALL"]
-  ## @param master.kind Use either Deployment, StatefulSet (default) or DaemonSet
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/
-  ##
-  kind: StatefulSet
-  ## @param master.schedulerName Alternate scheduler for Redis&reg; master pods
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param master.updateStrategy.type Redis&reg; master statefulset strategy type
-  ## @skip master.updateStrategy.rollingUpdate
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    ## StrategyType
-    ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment)
-    ##
-    type: RollingUpdate
-  ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update
-  ##
-  minReadySeconds: 0
-  ## @param master.priorityClassName Redis&reg; master pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param master.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param master.hostAliases Redis&reg; master pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param master.podLabels Extra labels for Redis&reg; master pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param master.podAnnotations Annotations for Redis&reg; master pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis&reg; master pods
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
-  ##
-  shareProcessNamespace: false
-  ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node master.affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set
-    ##
-    key: ""
-    ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param master.affinity Affinity for Redis&reg; master pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param master.nodeSelector Node labels for Redis&reg; master pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param master.tolerations Tolerations for Redis&reg; master pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param master.topologySpreadConstraints Spread Constraints for Redis&reg; master pod assignment
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## E.g.
-  ## topologySpreadConstraints:
-  ##   - maxSkew: 1
-  ##     topologyKey: node
-  ##     whenUnsatisfiable: DoNotSchedule
-  ##
-  topologySpreadConstraints: []
-  ## @param master.dnsPolicy DNS Policy for Redis&reg; master pod
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
-  ## E.g.
-  ## dnsPolicy: ClusterFirst
-  ##
-  dnsPolicy: ""
-  ## @param master.dnsConfig DNS Configuration for Redis&reg; master pod
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
-  ## E.g.
-  ## dnsConfig:
-  ##   options:
-  ##   - name: ndots
-  ##     value: "4"
-  ##   - name: single-request-reopen
-  ##
-  dnsConfig: {}
-  ## @param master.lifecycleHooks for the Redis&reg; master container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis&reg; master pod(s)
-  ##
-  extraVolumes: []
-  ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis&reg; master container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param master.sidecars Add additional sidecar containers to the Redis&reg; master pod(s)
-  ## e.g:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param master.initContainers Add additional init containers to the Redis&reg; master pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-  ## e.g:
-  ## initContainers:
-  ##  - name: your-image-name
-  ##    image: your-image
-  ##    imagePullPolicy: Always
-  ##    command: ['sh', '-c', 'echo "hello world"']
-  ##
-  initContainers: []
-  ## Persistence parameters
-  ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-  ##
-  persistence:
-    ## @param master.persistence.enabled Enable persistence on Redis&reg; master nodes using Persistent Volume Claims
-    ##
-    enabled: true
-    ## @param master.persistence.medium Provide a medium for `emptyDir` volumes.
-    ##
-    medium: ""
-    ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes.
-    ##
-    sizeLimit: ""
-    ## @param master.persistence.path The path the volume will be mounted at on Redis&reg; master containers
-    ## NOTE: Useful when using different Redis&reg; images
-    ##
-    path: /data
-    ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis&reg; master containers
-    ## NOTE: Useful in dev environments
-    ##
-    subPath: ""
-    ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis&reg; master containers
-    ##
-    subPathExpr: ""
-    ## @param master.persistence.storageClass Persistent Volume storage class
-    ## If defined, storageClassName: <storageClass>
-    ## If set to "-", storageClassName: "", which disables dynamic provisioning
-    ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
-    ##
-    storageClass: ""
-    ## @param master.persistence.accessModes Persistent Volume access modes
-    ##
-    accessModes:
-      - {{ .Values.globals.redis.accessMode }}
-    ## @param master.persistence.size Persistent Volume size
-    ##
-    size: {{ .Values.globals.redis.storageSize }}
-    ## @param master.persistence.annotations Additional custom annotations for the PVC
-    ##
-    annotations: {}
-    ## @param master.persistence.labels Additional custom labels for the PVC
-    ##
-    labels: {}
-    ## @param master.persistence.selector Additional labels to match for the PVC
-    ## e.g:
-    ## selector:
-    ##   matchLabels:
-    ##     app: my-app
-    ##
-    selector: {}
-    ## @param master.persistence.dataSource Custom PVC data source
-    ##
-    dataSource: {}
-    ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound
-    ## NOTE: requires master.persistence.enabled: true
-    ##
-    existingClaim: ""
-  ## persistentVolumeClaimRetentionPolicy
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-  ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet
-  ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
-  ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
-  ##
-  persistentVolumeClaimRetentionPolicy:
-    enabled: false
-    whenScaled: Retain
-    whenDeleted: Retain
-  ## Redis&reg; master service parameters
-  ##
-  service:
-    ## @param master.service.type Redis&reg; master service type
-    ##
-    type: ClusterIP
-    ## @param master.service.portNames.redis Redis&reg; master service port name
-    ##
-    portNames:
-      redis: "tcp-redis"
-    ## @param master.service.ports.redis Redis&reg; master service port
-    ##
-    ports:
-      redis: 6379
-    ## @param master.service.nodePorts.redis Node port for Redis&reg; master
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ## NOTE: choose port between <30000-32767>
-    ##
-    nodePorts:
-      redis: ""
-    ## @param master.service.externalTrafficPolicy Redis&reg; master service external traffic policy
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param master.service.internalTrafficPolicy Redis&reg; master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable)
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/
-    ##
-    internalTrafficPolicy: Cluster
-    ## @param master.service.clusterIP Redis&reg; master service Cluster IP
-    ##
-    clusterIP: ""
-    ## @param master.service.loadBalancerIP Redis&reg; master service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param master.service.loadBalancerClass master service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerClass: ""
-    ## @param master.service.loadBalancerSourceRanges Redis&reg; master service Load Balancer sources
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g.
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param master.service.externalIPs Redis&reg; master service External IPs
-    ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
-    ## e.g.
-    ## externalIPs:
-    ##   - 10.10.10.1
-    ##   - 201.22.30.1
-    ##
-    externalIPs: []
-    ## @param master.service.annotations Additional custom annotations for Redis&reg; master service
-    ##
-    annotations: {}
-    ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-  ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods
-  ##
-  terminationGracePeriodSeconds: 30
-  ## ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: true
-    ## @param master.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
-    ##
-    automountServiceAccountToken: false
-    ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param master.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param master.pdb.minAvailable [object] Minimum number/percentage of pods that should remain scheduled
-  ## @param master.pdb.maxUnavailable [object] Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `master.pdb.minAvailable` and `master.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param master.extraPodSpec Optionally specify extra PodSpec for the Redis&reg; master pod(s)
-  ##
-  extraPodSpec: {}
-  ## @param master.annotations Additional custom annotations for Redis&reg; Master resource
-  ##
-  annotations: {}
-## @section Redis&reg; replicas configuration parameters
-##
-replica:
-  ## @param replica.kind Use either DaemonSet or StatefulSet (default)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/
-  ##
-  kind: StatefulSet
-  ## @param replica.replicaCount Number of Redis&reg; replicas to deploy
-  ##
-  replicaCount: 3
-  ## @param replica.revisionHistoryLimit The number of old history to retain to allow rollback
-  ## NOTE: Explicitly setting this field to 0, will result in cleaning up all the history, breaking ability to rollback
-  revisionHistoryLimit: 10
-  ## @param replica.configuration Configuration for Redis&reg; replicas nodes
-  ## ref: https://redis.io/topics/config
-  ##
-  configuration: ""
-  ## @param replica.disableCommands Array with Redis&reg; commands to disable on replicas nodes
-  ## Commands will be completely disabled by renaming each to an empty string.
-  ## ref: https://redis.io/topics/security#disabling-of-specific-commands
-  ##
-  disableCommands:
-    - FLUSHDB
-    - FLUSHALL
-  ## @param replica.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param replica.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param replica.enableServiceLinks Whether information about services should be injected into pod's environment variable
-  ##
-  enableServiceLinks: true
-  ## @param replica.preExecCmds Additional commands to run prior to starting Redis&reg; replicas
-  ##
-  preExecCmds: []
-  ## @param replica.extraFlags Array with additional command line flags for Redis&reg; replicas
-  ## e.g:
-  ## extraFlags:
-  ##  - "--maxmemory-policy volatile-ttl"
-  ##  - "--repl-backlog-size 1024mb"
-  ##
-  extraFlags: []
-  ## @param replica.extraEnvVars Array with extra environment variables to add to Redis&reg; replicas nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis&reg; replicas nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis&reg; replicas nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param replica.externalMaster.enabled Use external master for bootstrapping
-  ## @param replica.externalMaster.host External master host to bootstrap from
-  ## @param replica.externalMaster.port Port for Redis service external master host
-  ##
-  externalMaster:
-    enabled: false
-    host: ""
-    port: 6379
-  ## @param replica.containerPorts.redis Container port to open on Redis&reg; replicas nodes
-  ##
-  containerPorts:
-    redis: 6379
-  ## Configure extra options for Redis&reg; containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param replica.startupProbe.enabled Enable startupProbe on Redis&reg; replicas nodes
-  ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param replica.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 22
-  ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis&reg; replicas nodes
-  ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 5
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 5
-  ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis&reg; replicas nodes
-  ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 5
-    timeoutSeconds: 1
-    successThreshold: 1
-    failureThreshold: 5
-  ## @param replica.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Redis&reg; replicas resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param replica.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if replica.resources is set (replica.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param replica.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Pods Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param replica.podSecurityContext.enabled Enabled Redis&reg; replicas pods' Security Context
-  ## @param replica.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
-  ## @param replica.podSecurityContext.sysctls Set kernel settings using the sysctl interface
-  ## @param replica.podSecurityContext.supplementalGroups Set filesystem extra groups
-  ## @param replica.podSecurityContext.fsGroup Set Redis&reg; replicas pod's Security Context fsGroup
-  ##
-  podSecurityContext:
-    enabled: true
-    fsGroupChangePolicy: Always
-    sysctls: []
-    supplementalGroups: []
-    fsGroup: 1001
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param replica.containerSecurityContext.enabled Enabled Redis&reg; replicas containers' Security Context
-  ## @param replica.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param replica.containerSecurityContext.runAsUser Set Redis&reg; replicas containers' Security Context runAsUser
-  ## @param replica.containerSecurityContext.runAsGroup Set Redis&reg; replicas containers' Security Context runAsGroup
-  ## @param replica.containerSecurityContext.runAsNonRoot Set Redis&reg; replicas containers' Security Context runAsNonRoot
-  ## @param replica.containerSecurityContext.allowPrivilegeEscalation Set Redis&reg; replicas pod's Security Context allowPrivilegeEscalation
-  ## @param replica.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ## @param replica.containerSecurityContext.seccompProfile.type Set Redis&reg; replicas containers' Security Context seccompProfile
-  ## @param replica.containerSecurityContext.capabilities.drop Set Redis&reg; replicas containers' Security Context capabilities to drop
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    readOnlyRootFilesystem: true
-    seccompProfile:
-      type: RuntimeDefault
-    capabilities:
-      drop: ["ALL"]
-  ## @param replica.schedulerName Alternate scheduler for Redis&reg; replicas pods
-  ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
-  ##
-  schedulerName: ""
-  ## @param replica.updateStrategy.type Redis&reg; replicas statefulset strategy type
-  ## @skip replica.updateStrategy.rollingUpdate
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
-  ##
-  updateStrategy:
-    ## StrategyType
-    ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment)
-    ##
-    type: RollingUpdate
-  ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update
-  ##
-  minReadySeconds: 0
-  ## @param replica.priorityClassName Redis&reg; replicas pods' priorityClassName
-  ##
-  priorityClassName: ""
-  ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
-  ##
-  podManagementPolicy: ""
-  ## @param replica.automountServiceAccountToken Mount Service Account token in pod
-  ##
-  automountServiceAccountToken: false
-  ## @param replica.hostAliases Redis&reg; replicas pods host aliases
-  ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
-  ##
-  hostAliases: []
-  ## @param replica.podLabels Extra labels for Redis&reg; replicas pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param replica.podAnnotations Annotations for Redis&reg; replicas pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations: {}
-  ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis&reg; replicas pods
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
-  ##
-  shareProcessNamespace: false
-  ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAffinityPreset: ""
-  ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard`
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
-  ##
-  podAntiAffinityPreset: soft
-  ## Node affinity preset
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
-  ##
-  nodeAffinityPreset:
-    ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard`
-    ##
-    type: ""
-    ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set
-    ##
-    key: ""
-    ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set
-    ## E.g.
-    ## values:
-    ##   - e2e-az1
-    ##   - e2e-az2
-    ##
-    values: []
-  ## @param replica.affinity Affinity for Redis&reg; replicas pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-  ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set
-  ##
-  affinity: {}
-  ## @param replica.nodeSelector Node labels for Redis&reg; replicas pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-  ##
-  nodeSelector: {}
-  ## @param replica.tolerations Tolerations for Redis&reg; replicas pods assignment
-  ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
-  ##
-  tolerations: []
-  ## @param replica.topologySpreadConstraints Spread Constraints for Redis&reg; replicas pod assignment
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-  ## E.g.
-  ## topologySpreadConstraints:
-  ##   - maxSkew: 1
-  ##     topologyKey: node
-  ##     whenUnsatisfiable: DoNotSchedule
-  ##
-  topologySpreadConstraints: []
-  ## @param replica.dnsPolicy DNS Policy for Redis&reg; replica pods
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
-  ## E.g.
-  ## dnsPolicy: ClusterFirst
-  ##
-  dnsPolicy: ""
-  ## @param replica.dnsConfig DNS Configuration for Redis&reg; replica pods
-  ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
-  ## E.g.
-  ## dnsConfig:
-  ##   options:
-  ##   - name: ndots
-  ##     value: "4"
-  ##   - name: single-request-reopen
-  ##
-  dnsConfig: {}
-  ## @param replica.lifecycleHooks for the Redis&reg; replica container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis&reg; replicas pod(s)
-  ##
-  extraVolumes: []
-  ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis&reg; replicas container(s)
-  ##
-  extraVolumeMounts: []
-  ## @param replica.sidecars Add additional sidecar containers to the Redis&reg; replicas pod(s)
-  ## e.g:
-  ## sidecars:
-  ##   - name: your-image-name
-  ##     image: your-image
-  ##     imagePullPolicy: Always
-  ##     ports:
-  ##       - name: portname
-  ##         containerPort: 1234
-  ##
-  sidecars: []
-  ## @param replica.initContainers Add additional init containers to the Redis&reg; replicas pod(s)
-  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
-  ## e.g:
-  ## initContainers:
-  ##  - name: your-image-name
-  ##    image: your-image
-  ##    imagePullPolicy: Always
-  ##    command: ['sh', '-c', 'echo "hello world"']
-  ##
-  initContainers: []
-  ## Persistence Parameters
-  ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-  ##
-  persistence:
-    ## @param replica.persistence.enabled Enable persistence on Redis&reg; replicas nodes using Persistent Volume Claims
-    ##
-    enabled: true
-    ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes.
-    ##
-    medium: ""
-    ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes.
-    ##
-    sizeLimit: ""
-    ##  @param replica.persistence.path The path the volume will be mounted at on Redis&reg; replicas containers
-    ## NOTE: Useful when using different Redis&reg; images
-    ##
-    path: /data
-    ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis&reg; replicas containers
-    ## NOTE: Useful in dev environments
-    ##
-    subPath: ""
-    ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis&reg; replicas containers
-    ##
-    subPathExpr: ""
-    ## @param replica.persistence.storageClass Persistent Volume storage class
-    ## If defined, storageClassName: <storageClass>
-    ## If set to "-", storageClassName: "", which disables dynamic provisioning
-    ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
-    ##
-    storageClass: ""
-    ## @param replica.persistence.accessModes Persistent Volume access modes
-    ##
-    accessModes:
-      - {{ .Values.globals.redis.accessMode }}
-    ## @param replica.persistence.size Persistent Volume size
-    ##
-    size: {{ .Values.globals.redis.storageSize }}
-    ## @param replica.persistence.annotations Additional custom annotations for the PVC
-    ##
-    annotations: {}
-    ## @param replica.persistence.labels Additional custom labels for the PVC
-    ##
-    labels: {}
-    ## @param replica.persistence.selector Additional labels to match for the PVC
-    ## e.g:
-    ## selector:
-    ##   matchLabels:
-    ##     app: my-app
-    ##
-    selector: {}
-    ## @param replica.persistence.dataSource Custom PVC data source
-    ##
-    dataSource: {}
-    ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound
-    ## NOTE: requires replica.persistence.enabled: true
-    ##
-    existingClaim: ""
-  ## persistentVolumeClaimRetentionPolicy
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-  ## @param replica.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet
-  ## @param replica.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
-  ## @param replica.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
-  ##
-  persistentVolumeClaimRetentionPolicy:
-    enabled: false
-    whenScaled: Retain
-    whenDeleted: Retain
-  ## Redis&reg; replicas service parameters
-  ##
-  service:
-    ## @param replica.service.type Redis&reg; replicas service type
-    ##
-    type: ClusterIP
-    ## @param replica.service.ports.redis Redis&reg; replicas service port
-    ##
-    ports:
-      redis: 6379
-    ## @param replica.service.nodePorts.redis Node port for Redis&reg; replicas
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ## NOTE: choose port between <30000-32767>
-    ##
-    nodePorts:
-      redis: ""
-    ## @param replica.service.externalTrafficPolicy Redis&reg; replicas service external traffic policy
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param replica.service.internalTrafficPolicy Redis&reg; replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable)
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/
-    ##
-    internalTrafficPolicy: Cluster
-    ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param replica.service.clusterIP Redis&reg; replicas service Cluster IP
-    ##
-    clusterIP: ""
-    ## @param replica.service.loadBalancerIP Redis&reg; replicas service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param replica.service.loadBalancerClass replicas service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerClass: ""
-    ## @param replica.service.loadBalancerSourceRanges Redis&reg; replicas service Load Balancer sources
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g.
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param replica.service.annotations Additional custom annotations for Redis&reg; replicas service
-    ##
-    annotations: {}
-    ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-  ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods
-  ##
-  terminationGracePeriodSeconds: 30
-  ## Autoscaling configuration
-  ##
-  autoscaling:
-    ## @param replica.autoscaling.enabled Enable replica autoscaling settings
-    ##
-    enabled: false
-    ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling
-    ##
-    minReplicas: 1
-    ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling
-    ##
-    maxReplicas: 11
-    ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling
-    ##
-    targetCPU: ""
-    ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling
-    ##
-    targetMemory: ""
-  ## ServiceAccount configuration
-  ##
-  serviceAccount:
-    ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created
-    ##
-    create: true
-    ## @param replica.serviceAccount.name The name of the ServiceAccount to use.
-    ## If not set and create is true, a name is generated using the common.names.fullname template
-    ##
-    name: ""
-    ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
-    ##
-    automountServiceAccountToken: false
-    ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount
-    ##
-    annotations: {}
-  ## Pod Disruption Budget configuration
-  ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
-  ## @param replica.pdb.create Enable/disable a Pod Disruption Budget creation
-  ## @param replica.pdb.minAvailable [object] Minimum number/percentage of pods that should remain scheduled
-  ## @param replica.pdb.maxUnavailable [object] Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `replica.pdb.minAvailable` and `replica.pdb.maxUnavailable` are empty.
-  ##
-  pdb:
-    create: true
-    minAvailable: ""
-    maxUnavailable: ""
-  ## @param replica.extraPodSpec Optionally specify extra PodSpec for the Redis&reg; replicas pod(s)
-  ##
-  extraPodSpec: {}
-  ## @param replica.annotations Additional custom annotations for Redis&reg; replicas resource
-  ##
-  annotations: {}
-## @section Redis&reg; Sentinel configuration parameters
-##
-
-sentinel:
-  ## @param sentinel.enabled Use Redis&reg; Sentinel on Redis&reg; pods.
-  ## IMPORTANT: this will disable the master and replicas services and
-  ## create a single Redis&reg; service exposing both the Redis and Sentinel ports
-  ##
-  enabled: false
-  ## Bitnami Redis&reg; Sentinel image version
-  ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/
-  ## @param sentinel.image.registry [default: REGISTRY_NAME] Redis&reg; Sentinel image registry
-  ## @param sentinel.image.repository [default: REPOSITORY_NAME/redis-sentinel] Redis&reg; Sentinel image repository
-  ## @skip sentinel.image.tag Redis&reg; Sentinel image tag (immutable tags are recommended)
-  ## @param sentinel.image.digest Redis&reg; Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param sentinel.image.pullPolicy Redis&reg; Sentinel image pull policy
-  ## @param sentinel.image.pullSecrets Redis&reg; Sentinel image pull secrets
-  ## @param sentinel.image.debug Enable image debug mode
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/redis-sentinel
-    tag: 7.4.2-debian-12-r0
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-    ## Enable debug mode
-    ##
-    debug: false
-  ## @param sentinel.annotations Additional custom annotations for Redis&reg; Sentinel resource
-  ##
-  annotations: {}
-  ## @param sentinel.masterSet Master set name
-  ##
-  masterSet: mymaster
-  ## @param sentinel.quorum Sentinel Quorum
-  ##
-  quorum: 2
-  ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out.
-  ##
-  getMasterTimeout: 90
-  ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it.
-  ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data.
-  ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000.
-  ##
-  automateClusterRecovery: false
-  ## @param sentinel.redisShutdownWaitFailover Whether the Redis&reg; master container waits for the failover at shutdown (in addition to the Redis&reg; Sentinel container).
-  ##
-  redisShutdownWaitFailover: true
-  ## Sentinel timing restrictions
-  ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis&reg; node is down
-  ## @param sentinel.failoverTimeout Timeout for performing a election failover
-  ##
-  downAfterMilliseconds: 60000
-  failoverTimeout: 180000
-  ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover
-  ##
-  parallelSyncs: 1
-  ## @param sentinel.configuration Configuration for Redis&reg; Sentinel nodes
-  ## ref: https://redis.io/topics/sentinel
-  ##
-  configuration: ""
-  ## @param sentinel.command Override default container command (useful when using custom images)
-  ##
-  command: []
-  ## @param sentinel.args Override default container args (useful when using custom images)
-  ##
-  args: []
-  ## @param sentinel.enableServiceLinks Whether information about services should be injected into pod's environment variable
-  ##
-  enableServiceLinks: true
-  ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis&reg; Sentinel
-  ##
-  preExecCmds: []
-  ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis&reg; Sentinel nodes
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis&reg; Sentinel nodes
-  ##
-  extraEnvVarsCM: ""
-  ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis&reg; Sentinel nodes
-  ##
-  extraEnvVarsSecret: ""
-  ## @param sentinel.externalMaster.enabled Use external master for bootstrapping
-  ## @param sentinel.externalMaster.host External master host to bootstrap from
-  ## @param sentinel.externalMaster.port Port for Redis service external master host
-  ##
-  externalMaster:
-    enabled: false
-    host: ""
-    port: 6379
-  ## @param sentinel.containerPorts.sentinel Container port to open on Redis&reg; Sentinel nodes
-  ##
-  containerPorts:
-    sentinel: 26379
-  ## Configure extra options for Redis&reg; containers' liveness and readiness probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
-  ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis&reg; Sentinel nodes
-  ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 22
-  ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis&reg; Sentinel nodes
-  ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 10
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 6
-  ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis&reg; Sentinel nodes
-  ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 20
-    periodSeconds: 5
-    timeoutSeconds: 1
-    successThreshold: 1
-    failureThreshold: 6
-  ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## Persistence parameters
-  ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-  ##
-  persistence:
-    ## @param sentinel.persistence.enabled Enable persistence on Redis&reg; sentinel nodes using Persistent Volume Claims (Experimental)
-    ##
-    enabled: false
-    ## @param sentinel.persistence.storageClass Persistent Volume storage class
-    ## If defined, storageClassName: <storageClass>
-    ## If set to "-", storageClassName: "", which disables dynamic provisioning
-    ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
-    ##
-    storageClass: ""
-    ## @param sentinel.persistence.accessModes Persistent Volume access modes
-    ##
-    accessModes:
-      - ReadWriteOnce
-    ## @param sentinel.persistence.size Persistent Volume size
-    ##
-    size: 100Mi
-    ## @param sentinel.persistence.annotations Additional custom annotations for the PVC
-    ##
-    annotations: {}
-    ## @param sentinel.persistence.labels Additional custom labels for the PVC
-    ##
-    labels: {}
-    ## @param sentinel.persistence.selector Additional labels to match for the PVC
-    ## e.g:
-    ## selector:
-    ##   matchLabels:
-    ##     app: my-app
-    ##
-    selector: {}
-    ## @param sentinel.persistence.dataSource Custom PVC data source
-    ##
-    dataSource: {}
-    ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes.
-    ##
-    medium: ""
-    ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes.
-    ##
-    sizeLimit: ""
-  ## persistentVolumeClaimRetentionPolicy
-  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
-  ## @param sentinel.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet
-  ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced
-  ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted
-  ##
-  persistentVolumeClaimRetentionPolicy:
-    enabled: false
-    whenScaled: Retain
-    whenDeleted: Retain
-  ## Redis&reg; Sentinel resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param sentinel.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if sentinel.resources is set (sentinel.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param sentinel.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param sentinel.containerSecurityContext.enabled Enabled Redis&reg; Sentinel containers' Security Context
-  ## @param sentinel.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param sentinel.containerSecurityContext.runAsUser Set Redis&reg; Sentinel containers' Security Context runAsUser
-  ## @param sentinel.containerSecurityContext.runAsGroup Set Redis&reg; Sentinel containers' Security Context runAsGroup
-  ## @param sentinel.containerSecurityContext.runAsNonRoot Set Redis&reg; Sentinel containers' Security Context runAsNonRoot
-  ## @param sentinel.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ## @param sentinel.containerSecurityContext.allowPrivilegeEscalation Set Redis&reg; Sentinel containers' Security Context allowPrivilegeEscalation
-  ## @param sentinel.containerSecurityContext.seccompProfile.type Set Redis&reg; Sentinel containers' Security Context seccompProfile
-  ## @param sentinel.containerSecurityContext.capabilities.drop Set Redis&reg; Sentinel containers' Security Context capabilities to drop
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    readOnlyRootFilesystem: true
-    seccompProfile:
-      type: RuntimeDefault
-    capabilities:
-      drop: ["ALL"]
-  ## @param sentinel.lifecycleHooks for the Redis&reg; sentinel container(s) to automate configuration before or after startup
-  ##
-  lifecycleHooks: {}
-  ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis&reg; Sentinel
-  ##
-  extraVolumes: []
-  ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis&reg; Sentinel container(s)
-  ##
-  extraVolumeMounts: []
-  ## Redis&reg; Sentinel service parameters
-  ## Note: values passed in this section also configure the master service, unless the sentinel.masterService is explicitly overridden.
-  service:
-    ## @param sentinel.service.type Redis&reg; Sentinel service type
-    ##
-    type: ClusterIP
-    ## @param sentinel.service.ports.redis Redis&reg; service port for Redis&reg;
-    ## @param sentinel.service.ports.sentinel Redis&reg; service port for Redis&reg; Sentinel
-    ##
-    ports:
-      redis: 6379
-      sentinel: 26379
-    ## @param sentinel.service.nodePorts.redis Node port for Redis&reg;
-    ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ## NOTE: choose port between <30000-32767>
-    ## NOTE: By leaving these values blank, they will be generated by ports-configmap
-    ##       If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port
-    ##
-    nodePorts:
-      redis: ""
-      sentinel: ""
-    ## @param sentinel.service.externalTrafficPolicy Redis&reg; Sentinel service external traffic policy
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param sentinel.service.clusterIP Redis&reg; Sentinel service Cluster IP
-    ##
-    clusterIP: ""
-    ## @param sentinel.service.createMaster Enable master service pointing to the current master (experimental)
-    ## NOTE: rbac.create need to be set to true
-    ##
-    createMaster: false
-
-    ## @param sentinel.service.loadBalancerIP Redis&reg; Sentinel service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param sentinel.service.loadBalancerClass sentinel service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerClass: ""
-    ## @param sentinel.service.loadBalancerSourceRanges Redis&reg; Sentinel service Load Balancer sources
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g.
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param sentinel.service.annotations Additional custom annotations for Redis&reg; Sentinel service
-    ##
-    annotations: {}
-    ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-    ## Headless service properties
-    ##
-    headless:
-      ## @param sentinel.service.headless.annotations Annotations for the headless service.
-      ##
-      annotations: {}
-      ## @param sentinel.service.headless.extraPorts Optionally specify extra ports to expose for the headless service.
-      ## Example:
-      ## extraPorts:
-      ##   - name: my-custom-port
-      ##     port: 12345
-      ##     protocol: TCP
-      ##     targetPort: 12345
-      ##
-      extraPorts: []
-  ## Redis&reg; master service parameters
-  ##
-  masterService:
-    ## @param sentinel.masterService.enabled Enable master service pointing to the current master (experimental)
-    ## NOTE: rbac.create need to be set to true
-    ##
-    enabled: false
-    ## @param sentinel.masterService.type Redis&reg; Sentinel master service type
-    ##
-    type: ClusterIP
-    ## @param sentinel.masterService.ports.redis Redis&reg; service port for Redis&reg;
-    ##
-    ports:
-      redis: 6379
-    ## @param sentinel.masterService.nodePorts.redis Node port for Redis&reg;
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-    ## NOTE: choose port between <30000-32767>
-    ## NOTE: By leaving these values blank, they will be generated by ports-configmap
-    ##       If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port
-    ##
-    nodePorts:
-      redis: ""
-    ## @param sentinel.masterService.externalTrafficPolicy Redis&reg; master service external traffic policy
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: ""
-    ## @param sentinel.masterService.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param sentinel.masterService.clusterIP Redis&reg; master service Cluster IP
-    ##
-    clusterIP: ""
-    ## @param sentinel.masterService.loadBalancerIP Redis&reg; master service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param sentinel.masterService.loadBalancerClass master service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerClass: ""
-    ## @param sentinel.masterService.loadBalancerSourceRanges Redis&reg; master service Load Balancer sources
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g.
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param sentinel.masterService.annotations Additional custom annotations for Redis&reg; master service
-    ##
-    annotations: {}
-    ## @param sentinel.masterService.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
-    ## If "ClientIP", consecutive client requests will be directed to the same Pod
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-    ##
-    sessionAffinity: None
-    ## @param sentinel.masterService.sessionAffinityConfig Additional settings for the sessionAffinity
-    ## sessionAffinityConfig:
-    ##   clientIP:
-    ##     timeoutSeconds: 300
-    ##
-    sessionAffinityConfig: {}
-  ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods
-  ##
-  terminationGracePeriodSeconds: 30
-  ## @param sentinel.extraPodSpec Optionally specify extra PodSpec for the Redis&reg; Sentinel pod(s)
-  ##
-  extraPodSpec: {}
-## @section Other Parameters
-##
-
-## @param serviceBindings.enabled Create secret for service binding (Experimental)
-## Ref: https://servicebinding.io/service-provider/
-##
-serviceBindings:
-  enabled: false
-## Network Policy configuration
-## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
-##
-networkPolicy:
-  ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources
-  ##
-  enabled: true
-  ## @param networkPolicy.allowExternal Don't require client label for connections
-  ## When set to false, only pods with the correct client label will have network access to the ports
-  ## Redis&reg; is listening on. When true, Redis&reg; will accept connections from any source
-  ## (with the correct destination port).
-  ##
-  allowExternal: true
-  ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
-  ##
-  allowExternalEgress: true
-  ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy
-  ## e.g:
-  ## extraIngress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     from:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  ##
-  extraIngress: []
-  ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy
-  ## e.g:
-  ## extraEgress:
-  ##   - ports:
-  ##       - port: 1234
-  ##     to:
-  ##       - podSelector:
-  ##           - matchLabels:
-  ##               - role: frontend
-  ##       - podSelector:
-  ##           - matchExpressions:
-  ##               - key: role
-  ##                 operator: In
-  ##                 values:
-  ##                   - frontend
-  ##
-  extraEgress: []
-  ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces
-  ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces
-  ##
-  ingressNSMatchLabels: {}
-  ingressNSPodMatchLabels: {}
-  metrics:
-    ## @param networkPolicy.metrics.allowExternal Don't require client label for connections for metrics endpoint
-    ## When set to false, only pods with the correct client label will have network access to the metrics port
-    ##
-    allowExternal: true
-    ## @param networkPolicy.metrics.ingressNSMatchLabels Labels to match to allow traffic from other namespaces to metrics endpoint
-    ## @param networkPolicy.metrics.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces to metrics endpoint
-    ##
-    ingressNSMatchLabels: {}
-    ingressNSPodMatchLabels: {}
-## PodSecurityPolicy configuration
-## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
-##
-podSecurityPolicy:
-  ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
-  ##
-  create: false
-  ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules
-  ##
-  enabled: false
-## RBAC configuration
-##
-rbac:
-  ## @param rbac.create Specifies whether RBAC resources should be created
-  ##
-  create: false
-  ## @param rbac.rules Custom RBAC rules to set
-  ## e.g:
-  ## rules:
-  ##   - apiGroups:
-  ##       - ""
-  ##     resources:
-  ##       - pods
-  ##     verbs:
-  ##       - get
-  ##       - list
-  ##
-  rules: []
-## ServiceAccount configuration
-##
-serviceAccount:
-  ## @param serviceAccount.create Specifies whether a ServiceAccount should be created
-  ##
-  create: true
-  ## @param serviceAccount.name The name of the ServiceAccount to use.
-  ## If not set and create is true, a name is generated using the common.names.fullname template
-  ##
-  name: ""
-  ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
-  ##
-  automountServiceAccountToken: false
-  ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
-  ##
-  annotations: {}
-## Redis&reg; Pod Disruption Budget configuration
-## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
-## @param pdb DEPRECATED Please use `master.pdb` and `replica.pdb` values instead
-##
-pdb: {}
-## TLS configuration
-##
-tls:
-  ## @param tls.enabled Enable TLS traffic
-  ##
-  enabled: false
-  ## @param tls.authClients Require clients to authenticate
-  ##
-  authClients: true
-  ## @param tls.autoGenerated Enable autogenerated certificates
-  ##
-  autoGenerated: false
-  ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates
-  ##
-  existingSecret: ""
-  ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead.
-  ##
-  certificatesSecret: ""
-  ## @param tls.certFilename Certificate filename
-  ##
-  certFilename: ""
-  ## @param tls.certKeyFilename Certificate Key filename
-  ##
-  certKeyFilename: ""
-  ## @param tls.certCAFilename CA Certificate filename
-  ##
-  certCAFilename: ""
-  ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers)
-  ##
-  dhParamsFilename: ""
-## @section Metrics Parameters
-##
-metrics:
-  ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis&reg; metrics
-  ##
-  enabled: true
-  ## Bitnami Redis&reg; Exporter image
-  ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/
-  ## @param metrics.image.registry [default: REGISTRY_NAME] Redis&reg; Exporter image registry
-  ## @param metrics.image.repository [default: REPOSITORY_NAME/redis-exporter] Redis&reg; Exporter image repository
-  ## @skip metrics.image.tag Redis&reg; Exporter image tag (immutable tags are recommended)
-  ## @param metrics.image.digest Redis&reg; Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param metrics.image.pullPolicy Redis&reg; Exporter image pull policy
-  ## @param metrics.image.pullSecrets Redis&reg; Exporter image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/redis-exporter
-    tag: 1.67.0-debian-12-r0
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## @param metrics.containerPorts.http Metrics HTTP container port
-  ##
-  containerPorts:
-    http: 9121
-  ## Configure extra options for Redis&reg; containers' liveness, readiness & startup probes
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
-  ## @param metrics.startupProbe.enabled Enable startupProbe on Redis&reg; replicas nodes
-  ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
-  ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe
-  ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe
-  ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe
-  ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe
-  ##
-  startupProbe:
-    enabled: false
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 5
-  ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis&reg; replicas nodes
-  ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
-  ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe
-  ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
-  ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe
-  ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe
-  ##
-  livenessProbe:
-    enabled: true
-    initialDelaySeconds: 10
-    periodSeconds: 10
-    timeoutSeconds: 5
-    successThreshold: 1
-    failureThreshold: 5
-  ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis&reg; replicas nodes
-  ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
-  ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe
-  ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
-  ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe
-  ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe
-  ##
-  readinessProbe:
-    enabled: true
-    initialDelaySeconds: 5
-    periodSeconds: 10
-    timeoutSeconds: 1
-    successThreshold: 1
-    failureThreshold: 3
-  ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one
-  ##
-  customStartupProbe: {}
-  ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one
-  ##
-  customLivenessProbe: {}
-  ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one
-  ##
-  customReadinessProbe: {}
-  ## @param metrics.command Override default metrics container init command (useful when using custom images)
-  ##
-  command: []
-  ## @param metrics.redisTargetHost A way to specify an alternative Redis&reg; hostname
-  ## Useful for certificate CN/SAN matching
-  ##
-  redisTargetHost: "localhost"
-  ## @param metrics.extraArgs Extra arguments for Redis&reg; exporter, for example:
-  ## e.g.:
-  ## extraArgs:
-  ##   check-keys: myKey,myOtherKey
-  ##
-  extraArgs: {}
-  ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis&reg; exporter
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param metrics.containerSecurityContext.enabled Enabled Redis&reg; exporter containers' Security Context
-  ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param metrics.containerSecurityContext.runAsUser Set Redis&reg; exporter containers' Security Context runAsUser
-  ## @param metrics.containerSecurityContext.runAsGroup Set Redis&reg; exporter containers' Security Context runAsGroup
-  ## @param metrics.containerSecurityContext.runAsNonRoot Set Redis&reg; exporter containers' Security Context runAsNonRoot
-  ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set Redis&reg; exporter containers' Security Context allowPrivilegeEscalation
-  ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ## @param metrics.containerSecurityContext.seccompProfile.type Set Redis&reg; exporter containers' Security Context seccompProfile
-  ## @param metrics.containerSecurityContext.capabilities.drop Set Redis&reg; exporter containers' Security Context capabilities to drop
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    readOnlyRootFilesystem: true
-    seccompProfile:
-      type: RuntimeDefault
-    capabilities:
-      drop: ["ALL"]
-  ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis&reg; metrics sidecar
-  ##
-  extraVolumes: []
-  ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis&reg; metrics sidecar
-  ##
-  extraVolumeMounts: []
-  ## Redis&reg; exporter resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## @param metrics.podLabels Extra labels for Redis&reg; exporter pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
-  ##
-  podLabels: {}
-  ## @param metrics.podAnnotations [object] Annotations for Redis&reg; exporter pods
-  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-  ##
-  podAnnotations:
-    prometheus.io/scrape: "true"
-    prometheus.io/port: "9121"
-  ## Redis&reg; exporter service parameters
-  ##
-  service:
-    ## @param metrics.service.enabled Create Service resource(s) for scraping metrics using PrometheusOperator ServiceMonitor, can be disabled when using a PodMonitor
-    ##
-    enabled: true
-    ## @param metrics.service.type Redis&reg; exporter service type
-    ##
-    type: ClusterIP
-    ## @param metrics.service.ports.http Redis&reg; exporter service port
-    ##
-    ports:
-      http: 9121
-    ## @param metrics.service.externalTrafficPolicy Redis&reg; exporter service external traffic policy
-    ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
-    ##
-    externalTrafficPolicy: Cluster
-    ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
-    ##
-    extraPorts: []
-    ## @param metrics.service.loadBalancerIP Redis&reg; exporter service Load Balancer IP
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
-    ##
-    loadBalancerIP: ""
-    ## @param metrics.service.loadBalancerClass exporter service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific)
-    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
-    ##
-    loadBalancerClass: ""
-    ## @param metrics.service.loadBalancerSourceRanges Redis&reg; exporter service Load Balancer sources
-    ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
-    ## e.g.
-    ## loadBalancerSourceRanges:
-    ##   - 10.10.10.0/24
-    ##
-    loadBalancerSourceRanges: []
-    ## @param metrics.service.annotations Additional custom annotations for Redis&reg; exporter service
-    ##
-    annotations: {}
-    ## @param metrics.service.clusterIP Redis&reg; exporter service Cluster IP
-    ##
-    clusterIP: ""
-  ## Prometheus Service Monitor
-  ## ref: https://github.com/coreos/prometheus-operator
-  ##      https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
-  ##
-  serviceMonitor:
-    ## @param metrics.serviceMonitor.port the service port to scrape metrics from
-    ##
-    port: http-metrics
-    ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator
-    ##
-    enabled: false
-    ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created
-    ##
-    namespace: ""
-    ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped
-    ##
-    interval: 30s
-    ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended
-    ##
-    scrapeTimeout: ""
-    ## @param metrics.serviceMonitor.relabelings Metrics RelabelConfigs to apply to samples before scraping.
-    ##
-    relabelings: []
-    ## @skip metrics.serviceMonitor.relabellings DEPRECATED: Use `metrics.serviceMonitor.relabelings` instead.
-    ##
-    relabellings: []
-    ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion.
-    ##
-    metricRelabelings: []
-    ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
-    ##
-    honorLabels: false
-    ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus
-    ##
-    additionalLabels: {}
-    ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics
-    ##
-    podTargetLabels: []
-    ## @param metrics.serviceMonitor.sampleLimit Limit of how many samples should be scraped from every Pod
-    ##
-    sampleLimit: false
-    ## @param metrics.serviceMonitor.targetLimit Limit of how many targets should be scraped
-    ##
-    targetLimit: false
-    ## @param metrics.serviceMonitor.additionalEndpoints  Additional endpoints to scrape (e.g sentinel)
-    ##
-    additionalEndpoints: []
-    # uncomment in order to scrape sentinel metrics, also to in order distinguish between Sentinel and Redis container metrics
-    # add metricRelabelings with label like app=redis to main redis pod-monitor port
-    # - interval: "30s"
-    #   path: "/scrape"
-    #   port: "http-metrics"
-    #   params:
-    #     target: ["localhost:26379"]
-    #   metricRelabelings:
-    #     - targetLabel: "app"
-    #       replacement: "sentinel"
-  ## Prometheus Pod Monitor
-  ## ref: https://github.com/coreos/prometheus-operator
-  ##      https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#podmonitor
-  ##
-  podMonitor:
-    ## @param metrics.podMonitor.port the pod port to scrape metrics from
-    ##
-    port: metrics
-    ## @param metrics.podMonitor.enabled Create PodMonitor resource(s) for scraping metrics using PrometheusOperator
-    ##
-    enabled: false
-    ## @param metrics.podMonitor.namespace The namespace in which the PodMonitor will be created
-    ##
-    namespace: ""
-    ## @param metrics.podMonitor.interval The interval at which metrics should be scraped
-    ##
-    interval: 30s
-    ## @param metrics.podMonitor.scrapeTimeout The timeout after which the scrape is ended
-    ##
-    scrapeTimeout: ""
-    ## @param metrics.podMonitor.relabelings Metrics RelabelConfigs to apply to samples before scraping.
-    ##
-    relabelings: []
-    ## @skip metrics.podMonitor.relabellings DEPRECATED: Use `metrics.podMonitor.relabelings` instead.
-    ##
-    relabellings: []
-    ## @param metrics.podMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion.
-    ##
-    metricRelabelings: []
-    # - targetLabel: "app"
-    #   replacement: "redis"
-    ## @param metrics.podMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
-    ##
-    honorLabels: false
-    ## @param metrics.podMonitor.additionalLabels Additional labels that can be used so PodMonitor resource(s) can be discovered by Prometheus
-    ##
-    additionalLabels: {}
-    ## @param metrics.podMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics
-    ##
-    podTargetLabels: []
-    ## @param metrics.podMonitor.sampleLimit Limit of how many samples should be scraped from every Pod
-    ##
-    sampleLimit: false
-    ## @param metrics.podMonitor.targetLimit Limit of how many targets should be scraped
-    ##
-    targetLimit: false
-    ## @param metrics.podMonitor.additionalEndpoints  Additional endpoints to scrape (e.g sentinel)
-    ##
-    additionalEndpoints: []
-    # - interval: "30s"
-    #   path: "/scrape"
-    #   port: "metrics"
-    #   params:
-    #     target: ["localhost:26379"]
-    #   metricRelabelings:
-    #     - targetLabel: "app"
-    #       replacement: "sentinel"
-  ## Custom PrometheusRule to be defined
-  ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
-  ##
-  prometheusRule:
-    ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator
-    ##
-    enabled: false
-    ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created
-    ##
-    namespace: ""
-    ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule
-    ##
-    additionalLabels: {}
-    rules: []
-## @section Init Container Parameters
-##
-
-## 'volumePermissions' init container parameters
-## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
-##   based on the *podSecurityContext/*containerSecurityContext parameters
-##
-volumePermissions:
-  ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
-  ##
-  enabled: false
-  ## OS Shell + Utility image
-  ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/
-  ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry
-  ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository
-  ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended)
-  ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy
-  ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r34
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## Init container's resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-  ## Init container Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
-  ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser
-  ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
-  ##   data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
-  ##   "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
-  ##
-  containerSecurityContext:
-    seLinuxOptions: {}
-    runAsUser: 0
-
-  ## @param volumePermissions.extraEnvVars Array with extra environment variables to add to volume permissions init container.
-  ## e.g:
-  ## extraEnvVars:
-  ##   - name: FOO
-  ##     value: "bar"
-  ##
-  extraEnvVars: []
-
-## Kubectl InitContainer
-## used by Sentinel to update the isMaster label on the Redis(TM) pods
-##
-kubectl:
-  ## Bitnami Kubectl image version
-  ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/
-  ## @param kubectl.image.registry [default: REGISTRY_NAME] Kubectl image registry
-  ## @param kubectl.image.repository [default: REPOSITORY_NAME/kubectl] Kubectl image repository
-  ## @skip kubectl.image.tag Kubectl image tag (immutable tags are recommended), by default, using the current version
-  ## @param kubectl.image.digest Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param kubectl.image.pullPolicy Kubectl image pull policy
-  ## @param kubectl.image.pullSecrets Kubectl pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/kubectl
-    tag: 1.32.0-debian-12-r0
-    digest: ""
-    ## Specify a imagePullPolicy
-    ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
-    ##
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## @param kubectl.command kubectl command to execute
-  ##
-  command: ["/opt/bitnami/scripts/kubectl-scripts/update-master-label.sh"]
-  ## Configure Container Security Context
-  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
-  ## @param kubectl.containerSecurityContext.enabled Enabled kubectl containers' Security Context
-  ## @param kubectl.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
-  ## @param kubectl.containerSecurityContext.runAsUser Set kubectl containers' Security Context runAsUser
-  ## @param kubectl.containerSecurityContext.runAsGroup Set kubectl containers' Security Context runAsGroup
-  ## @param kubectl.containerSecurityContext.runAsNonRoot Set kubectl containers' Security Context runAsNonRoot
-  ## @param kubectl.containerSecurityContext.allowPrivilegeEscalation Set kubectl containers' Security Context allowPrivilegeEscalation
-  ## @param kubectl.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
-  ## @param kubectl.containerSecurityContext.seccompProfile.type Set kubectl containers' Security Context seccompProfile
-  ## @param kubectl.containerSecurityContext.capabilities.drop Set kubectl containers' Security Context capabilities to drop
-  ##
-  containerSecurityContext:
-    enabled: true
-    seLinuxOptions: {}
-    runAsUser: 1001
-    runAsGroup: 1001
-    runAsNonRoot: true
-    allowPrivilegeEscalation: false
-    readOnlyRootFilesystem: true
-    seccompProfile:
-      type: RuntimeDefault
-    capabilities:
-      drop: ["ALL"]
-  ## Bitnami Kubectl resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param kubectl.resources.limits The resources limits for the kubectl containers
-  ## @param kubectl.resources.requests The requested resources for the kubectl containers
-  ##
-  resources:
-    limits: {}
-    requests: {}
-
-## init-sysctl container parameters
-## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings)
-##
-sysctl:
-  ## @param sysctl.enabled Enable init container to modify Kernel settings
-  ##
-  enabled: false
-  ## OS Shell + Utility image
-  ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/
-  ## @param sysctl.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry
-  ## @param sysctl.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository
-  ## @skip sysctl.image.tag OS Shell + Utility image tag (immutable tags are recommended)
-  ## @param sysctl.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
-  ## @param sysctl.image.pullPolicy OS Shell + Utility image pull policy
-  ## @param sysctl.image.pullSecrets OS Shell + Utility image pull secrets
-  ##
-  image:
-    registry: docker.io
-    repository: bitnami/os-shell
-    tag: 12-debian-12-r34
-    digest: ""
-    pullPolicy: IfNotPresent
-    ## Optionally specify an array of imagePullSecrets.
-    ## Secrets must be manually created in the namespace.
-    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-    ## e.g:
-    ## pullSecrets:
-    ##   - myRegistryKeySecretName
-    ##
-    pullSecrets: []
-  ## @param sysctl.command Override default init-sysctl container command (useful when using custom images)
-  ##
-  command: []
-  ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys`
-  ##
-  mountHostSys: false
-  ## Init container's resource requests and limits
-  ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
-  ## @param sysctl.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if sysctl.resources is set (sysctl.resources is recommended for production).
-  ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
-  ##
-  resourcesPreset: "nano"
-  ## @param sysctl.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
-  ## Example:
-  ## resources:
-  ##   requests:
-  ##     cpu: 2
-  ##     memory: 512Mi
-  ##   limits:
-  ##     cpu: 3
-  ##     memory: 1024Mi
-  ##
-  resources: {}
-## @section useExternalDNS Parameters
-##
-## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work.  Note this requires a working installation of `external-dns` to be usable.
-## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled.
-## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations.
-## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled.  Note that we prepend the suffix with the full name of the release.
-##
-useExternalDNS:
-  enabled: false
-  suffix: ""
-  annotationKey: external-dns.alpha.kubernetes.io/
-  additionalAnnotations: {}
-
diff --git a/k8s/helmfile.d/values/traefik/values.yaml.gotmpl b/k8s/helmfile.d/values/traefik/values.yaml.gotmpl
deleted file mode 100644
index 030cfa9..0000000
--- a/k8s/helmfile.d/values/traefik/values.yaml.gotmpl
+++ /dev/null
@@ -1,61 +0,0 @@
-globalArguments:
-  - "--global.sendanonymoususage=false"
-  - "--global.checknewversion=false"
-
-additionalArguments:
-  - "--serversTransport.insecureSkipVerify=true"
-  - "--log.level=INFO"
-
-deployment:
-  enabled: true
-  replicas: 3
-  annotations: {}
-  podAnnotations: {}
-  additionalContainers: []
-  initContainers: []
-
-ports:
-  web:
-    redirections:
-      entrypoint:
-        to: websecure
-        scheme: https
-        permanent: true
-  websecure:
-    http3:
-      enabled: true
-    advertisedPort: 4443
-    tls:
-      enabled: true
-
-ingressRoute:
-  dashboard:
-    enabled: false
-
-ingressClass:
-  name: {{ .Values.globals.traefik.ingressClass }}
-providers:
-  kubernetesCRD:
-    enabled: true
-    ingressClass: {{ .Values.globals.traefik.ingressClass }}
-    allowExternalNameServices: true
-  kubernetesIngress:
-    enabled: true
-    ingressClass: {{ .Values.globals.traefik.ingressClass }}
-    allowExternalNameServices: true
-    publishedService:
-      enabled: false
-
-rbac:
-  enabled: true
-
-service:
-  enabled: true
-  type: LoadBalancer
-  annotations: {}
-  labels: {}
-  spec:
-    loadBalancerIP: {{ .Values.globals.traefik.loadBalancerIP }}
-  loadBalancerSourceRanges: []
-  externalIPs: []
-
diff --git a/k8s/helmfile.d/values/uptime-kuma/values.yaml.gotmpl b/k8s/helmfile.d/values/uptime-kuma/values.yaml.gotmpl
deleted file mode 100644
index b295081..0000000
--- a/k8s/helmfile.d/values/uptime-kuma/values.yaml.gotmpl
+++ /dev/null
@@ -1,234 +0,0 @@
-# Default values for uptime-kuma.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-image:
-  repository: louislam/uptime-kuma
-  pullPolicy: IfNotPresent
-  # Overrides the image tag whose default is the chart appVersion.
-  tag: "1.23.13-debian"
-
-imagePullSecrets: []
-nameOverride: ""
-fullnameOverride: ""
-# -- A custom namespace to override the default namespace for the deployed resources.
-namespaceOverride: ""
-
-# If this option is set to false a StateFulset instead of a Deployment is used
-useDeploy: true
-
-serviceAccount:
-  # Specifies whether a service account should be created
-  create: false
-  # Annotations to add to the service account
-  annotations: {}
-  # The name of the service account to use.
-  # If not set and create is true, a name is generated using the fullname template
-  name: ""
-
-podAnnotations: {}
-podLabels:
-  {}
-  # app: uptime-kuma
-podEnv: []
-  # optional additional environment variables
-  # - name: "A_VARIABLE"
-  #   value: "a-value"
-
-podSecurityContext:
-  {}
-  # fsGroup: 2000
-
-securityContext:
-  {}
-  # capabilities:
-  #   drop:
-  #   - ALL
-  # readOnlyRootFilesystem: true
-  # runAsNonRoot: true
-  # runAsUser: 1000
-
-service:
-  type: ClusterIP
-  port: 3001
-  nodePort:
-  annotations: {}
-
-ingress:
-  enabled: true
-  className: {{ .Values.globals.uptimeKuma.ingressClass }}
-  extraLabels:
-    {}
-    # vhost: uptime-kuma.company.corp
-  annotations:
-    cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
-    kubernetes.io/ingress.class: {{ .Values.globals.uptimeKuma.ingressClass }}
-    # nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
-    # nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
-    # nginx.ingress.kubernetes.io/server-snippets: |
-      # location / {
-        # proxy_set_header Upgrade $http_upgrade;
-        # proxy_http_version 1.1;
-        # proxy_set_header X-Forwarded-Host $http_host;
-        # proxy_set_header X-Forwarded-Proto $scheme;
-        # proxy_set_header X-Forwarded-For $remote_addr;
-        # proxy_set_header Host $host;
-        # proxy_set_header Connection "upgrade";
-        # proxy_set_header X-Real-IP $remote_addr;
-        # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-        # proxy_set_header   Upgrade $http_upgrade;
-        # proxy_cache_bypass $http_upgrade;
-      # }
-  hosts:
-    {{- range .Values.globals.uptimeKuma.hosts }}
-    - host: {{ . }}
-      paths:
-        - path: /
-          pathType: ImplementationSpecific
-    {{- end}}
-
-  tls:
-    []
-    # - secretName: chart-example-tls
-    #   hosts:
-    #     - chart-example.local
-
-resources:
-  # We usually recommend not to specify default resources and to leave this as a conscious
-  # choice for the user. This also increases chances charts run on environments with little
-  # resources, such as Minikube. If you do want to specify resources, uncomment the following
-  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
-  limits:
-    cpu: 200m
-    memory: 256Mi
-  requests:
-    cpu: 100m
-    memory: 128Mi
-
-nodeSelector: {}
-
-tolerations: []
-
-affinity: {}
-
-livenessProbe:
-  enabled: true
-  failureThreshold: 3
-  # Uptime-Kuma recommends to configure a delay of 180 seconds until the server fully started.
-  # https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.go#L3
-  initialDelaySeconds: 180
-  periodSeconds: 10
-  successThreshold: 1
-  timeoutSeconds: 2
-  # The NodeJS Version of this Healthcheck is no longer supported, therefore we don't specify a node command.
-  # https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.js#L6
-  exec:
-    command:
-      - "extra/healthcheck"
-
-readinessProbe:
-  enabled: true
-  initialDelaySeconds: 10
-  periodSeconds: 10
-  timeoutSeconds: 1
-  failureThreshold: 3
-  successThreshold: 1
-  exec:
-    command: []
-  httpGet:
-    path: /
-    port: 3001
-    scheme: HTTP
-    httpHeaders: []
-
-volume:
-  enabled: true
-  accessMode: ReadWriteMany
-  size: 4Gi
-  # If you want to use a storage class other than the default, uncomment this
-  # line and define the storage class name
-  storageClassName: {{ .Values.globals.uptimeKuma.storageClass }}
-  # Reuse your own pre-existing PVC.
-  existingClaim: ""
-
-# -- A list of additional volumes to be added to the pod
-additionalVolumes:
-  []
-  # - name: "additional-certificates"
-  #   configMap:
-  #     name: "additional-certificates"
-  #     optional: true
-  #     defaultMode: 420
-
-# -- A list of additional volumeMounts to be added to the pod
-additionalVolumeMounts:
-  []
-  # - name: "additional-certificates"
-  #   mountPath: "/etc/ssl/certs/additional/additional-ca.pem"
-  #   readOnly: true
-  #   subPath: "additional-ca.pem"
-
-strategy:
-  type: Recreate
-
-# Prometheus ServiceMonitor configuration
-serviceMonitor:
-  enabled: false
-  # -- Scrape interval. If not set, the Prometheus default scrape interval is used.
-  interval: 60s
-  # -- Timeout if metrics can't be retrieved in given time interval
-  scrapeTimeout: 10s
-  # -- Scheme to use when scraping, e.g. http (default) or https.
-  scheme: ~
-  # -- TLS configuration to use when scraping, only applicable for scheme https.
-  tlsConfig: {}
-  # -- Prometheus [RelabelConfigs] to apply to samples before scraping
-  relabelings: []
-  # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
-  metricRelabelings: []
-  # -- Prometheus ServiceMonitor selector, only select Prometheus's with these
-  # labels (if not set, select any Prometheus)
-  selector: {}
-
-  # -- Namespace where the ServiceMonitor resource should be created, default is
-  # the same as the release namespace
-  namespace: ~
-  # -- Additional labels to add to the ServiceMonitor
-  additionalLabels: {}
-  # -- Additional annotations to add to the ServiceMonitor
-  annotations: {}
-
-  # -- BasicAuth credentials for scraping metrics, use API token and any string for username
-  # basicAuth:
-  #   username: "metrics"
-  #   password: ""
-
-# -- Use this option to set a custom DNS policy to the created deployment
-dnsPolicy: ""
-
-# -- Use this option to set custom DNS configurations to the created deployment
-dnsConfig: {}
-
-# -- Use this option to set custom PriorityClass to the created deployment
-# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
-priorityClassName: ""
-
-# -- Create a NetworkPolicy
-networkPolicy:
-  # -- Enable/disable Network Policy
-  enabled: false
-  # -- Enable/disable Ingress policy type
-  ingress: true
-  # -- Enable/disable Egress policy type
-  egress: true
-  # -- Allow incoming connections only from specific Pods
-  # When set to true, the geoserver will accept connections from any source.
-  # When false, only Pods with the label \{\{ include "geoserver.fullname" . \}\}-client=true will have network access
-  allowExternal: true
-  # -- Selects particular namespaces for which all Pods are allowed as ingress sources
-  namespaceSelector: {}
-  #  matchLabels:
-  #    role: frontend
-  #  matchExpressions:
-  #   - {key: role, operator: In, values: [frontend]}
-
diff --git a/k8s/infrastructure/traefik/traefik.yaml b/k8s/infrastructure/traefik/traefik.yaml
index df919f4..e129fa8 100644
--- a/k8s/infrastructure/traefik/traefik.yaml
+++ b/k8s/infrastructure/traefik/traefik.yaml
@@ -64,21 +64,29 @@ spec:
         tls:
           enabled: true
 
+    # Not publicly accessible though. To view it, run
+    # `kubectl port-forward -n traefik $(kubectl get pods -n traefik --selector "app.kubernetes.io/name=traefik" --output=name | head -n1) 8080:8080`
+    # and then visit http://127.0.0.1:8080 (the 127.0.0.1 CAN'T be interchanged
+    # with localhost! idk why)
     ingressRoute:
       dashboard:
         enabled: true
 
     ingressClass:
       name: traefik
+
     providers:
       kubernetesCRD:
         enabled: true
-        ingressClass: traefik
+        # I thought we should be specifying this, but if we do, it prevents
+        # proxying external services from working for some reason.
+        # ingressClass: traefik
         allowExternalNameServices: true
         allowCrossNamespace: true
       kubernetesIngress:
         enabled: true
-        ingressClass: traefik
+        # Ditto above
+        # ingressClass: traefik
         allowExternalNameServices: true
         publishedService:
           enabled: false
@@ -95,7 +103,44 @@ spec:
         loadBalancerIP: 10.0.185.128
       loadBalancerSourceRanges: []
       externalIPs: []
+
     tlsStore:
       default:
         defaultCertificate:
           secretName: wildcard-mnke-org-tls
+
+    metrics:
+      prometheus:
+        service:
+          enabled: true
+        disableAPICheck: false
+        serviceMonitor:
+          enabled: true
+          metricRelabelings:
+            - sourceLabels: [__name__]
+              separator: ;
+              regex: ^fluentd_output_status_buffer_(oldest|newest)_.+
+              replacement: $1
+              action: drop
+          relabelings:
+            - sourceLabels: [__meta_kubernetes_pod_node_name]
+              separator: ;
+              regex: ^()$
+              targetLabel: nodename
+              replacement: $1
+              action: replace
+          jobLabel: traefik
+          interval: 30s
+          honorLabels: true
+        prometheusRule:
+          enabled: true
+          rules:
+            - alert: TraefikDown
+              expr: up{job="traefik"} == 0
+              for: 5m
+              labels:
+                context: traefik
+                severity: warning
+              annotations:
+                summary: "Traefik Down"
+                description: "{{ $labels.pod }} on {{ $labels.nodename }} is down"