From f8be48ead20962b655ac33609c76a3e38f33b3b4 Mon Sep 17 00:00:00 2001 From: Tony Du Date: Sat, 8 Feb 2025 23:48:15 -0800 Subject: [PATCH] feat: Enable monitoring on traefik --- .gitignore | 2 + .../templates/traefik/docker-stack.yml.j2 | 1 - k8s/helmfile.d/00-core.yaml | 83 - k8s/helmfile.d/01-databases.yaml | 67 - k8s/helmfile.d/02-applications.yaml | 64 - k8s/helmfile.d/charts/certs/.helmignore | 23 - k8s/helmfile.d/charts/certs/Chart.yaml | 24 - .../charts/certs/templates/NOTES.txt | 0 .../charts/certs/templates/_helpers.tpl | 62 - .../charts/certs/templates/certificate.yml | 17 - .../charts/certs/templates/cf-secret.yml | 10 - .../charts/certs/templates/clusterissuer.yml | 24 - k8s/helmfile.d/charts/certs/values.yaml | 28 - k8s/helmfile.d/charts/init-dbs/.helmignore | 23 - k8s/helmfile.d/charts/init-dbs/Chart.yaml | 24 - .../charts/init-dbs/templates/NOTES.txt | 0 .../charts/init-dbs/templates/_helpers.tpl | 51 - .../charts/init-dbs/templates/jobs.yaml | 112 - .../charts/init-dbs/templates/secrets.yaml | 33 - k8s/helmfile.d/charts/init-dbs/values.yaml | 36 - .../values/argo-cd/values.yaml.gotmpl | 4190 -------------- .../values/authentik/values.yaml.gotmpl | 1036 ---- k8s/helmfile.d/values/cert-manager/values.yml | 11 - .../values/certs/values.yaml.gotmpl | 28 - .../values/ghost/values.yaml.gotmpl | 876 --- .../values/gitea/values.yaml.gotmpl | 782 --- .../values/gitlab/values.yaml.gotmpl | 1398 ----- k8s/helmfile.d/values/globals/.gitignore | 1 - .../values/globals/staging.yaml.gotmpl | 216 - .../values/harbor/values.yaml.gotmpl | 3815 ------------ .../values/init-dbs/values.yaml.gotmpl | 34 - .../kube-prometheus-stack/values.yaml.gotmpl | 5142 ----------------- .../values/longhorn/values.yaml.gotmpl | 539 -- .../values/mysql/values.yaml.gotmpl | 1614 ------ .../values.yaml.gotmpl | 115 - .../values/pgadmin4/values.yaml.gotmpl | 420 -- .../values/phpmyadmin/values.yaml.gotmpl | 811 --- .../values/postgres/values.yaml.gotmpl | 1936 ------- .../values/rancher/values.yaml.gotmpl | 132 - .../values/redis/values.yaml.gotmpl | 2236 ------- .../values/traefik/values.yaml.gotmpl | 61 - .../values/uptime-kuma/values.yaml.gotmpl | 234 - k8s/infrastructure/traefik/traefik.yaml | 49 +- 43 files changed, 49 insertions(+), 26311 deletions(-) delete mode 100644 k8s/helmfile.d/00-core.yaml delete mode 100644 k8s/helmfile.d/01-databases.yaml delete mode 100644 k8s/helmfile.d/02-applications.yaml delete mode 100644 k8s/helmfile.d/charts/certs/.helmignore delete mode 100644 k8s/helmfile.d/charts/certs/Chart.yaml delete mode 100644 k8s/helmfile.d/charts/certs/templates/NOTES.txt delete mode 100644 k8s/helmfile.d/charts/certs/templates/_helpers.tpl delete mode 100644 k8s/helmfile.d/charts/certs/templates/certificate.yml delete mode 100644 k8s/helmfile.d/charts/certs/templates/cf-secret.yml delete mode 100644 k8s/helmfile.d/charts/certs/templates/clusterissuer.yml delete mode 100644 k8s/helmfile.d/charts/certs/values.yaml delete mode 100644 k8s/helmfile.d/charts/init-dbs/.helmignore delete mode 100644 k8s/helmfile.d/charts/init-dbs/Chart.yaml delete mode 100644 k8s/helmfile.d/charts/init-dbs/templates/NOTES.txt delete mode 100644 k8s/helmfile.d/charts/init-dbs/templates/_helpers.tpl delete mode 100644 k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml delete mode 100644 k8s/helmfile.d/charts/init-dbs/templates/secrets.yaml delete mode 100644 k8s/helmfile.d/charts/init-dbs/values.yaml delete mode 100644 k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/authentik/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/cert-manager/values.yml delete mode 100644 k8s/helmfile.d/values/certs/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/ghost/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/gitea/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/gitlab/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/globals/.gitignore delete mode 100644 k8s/helmfile.d/values/globals/staging.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/harbor/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/kube-prometheus-stack/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/longhorn/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/mysql/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/nfs-subdir-external-provisioner/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/pgadmin4/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/phpmyadmin/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/postgres/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/rancher/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/redis/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/traefik/values.yaml.gotmpl delete mode 100644 k8s/helmfile.d/values/uptime-kuma/values.yaml.gotmpl diff --git a/.gitignore b/.gitignore index 2217a41..7e36af7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ .env +secrets.yaml +secrets.yml venv diff --git a/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2 b/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2 index 8c7a61a..fd5cd9f 100644 --- a/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2 +++ b/ansible/roles/swarm-bootstrap/templates/traefik/docker-stack.yml.j2 @@ -1,6 +1,5 @@ networks: traefik: - driver: overlay attachable: true name: traefik diff --git a/k8s/helmfile.d/00-core.yaml b/k8s/helmfile.d/00-core.yaml deleted file mode 100644 index 537f5d2..0000000 --- a/k8s/helmfile.d/00-core.yaml +++ /dev/null @@ -1,83 +0,0 @@ ---- -repositories: - - name: traefik - url: https://helm.traefik.io/traefik - - name: jetstack - url: https://charts.jetstack.io - - name: rancher-stable - url: https://releases.rancher.com/server-charts/stable - - name: longhorn - url: https://charts.longhorn.io - - name: bitnami - url: https://charts.bitnami.com/bitnami - - name: nfs-subdir-external-provisioner - url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ - - name: prometheus-community - url: https://prometheus-community.github.io/helm-charts - -environments: - staging: &staging - values: - - ./values/globals/staging.yaml.gotmpl - production: - default: *staging - ---- -releases: - - name: traefik - namespace: {{ .Values.globals.traefik.namespace }} - createNamespace: true - chart: traefik/traefik - values: - - ./values/traefik/values.yaml.gotmpl - - - name: cert-manager - namespace: {{ .Values.globals.certManager.namespace }} - createNamespace: true - chart: jetstack/cert-manager - values: - - ./values/cert-manager/values.yml - - - name: certs - chart: ./charts/certs - needs: - - {{ .Values.globals.certManager.namespace }}/cert-manager - values: - - ./values/certs/values.yaml.gotmpl - - - name: nfs-subdir-external-provisioner - namespace: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }} - createNamespace: true - chart: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }}/nfs-subdir-external-provisioner - values: - - ./values/nfs-subdir-external-provisioner/values.yaml.gotmpl - - - name: rancher - namespace: {{ .Values.globals.rancher.namespace }} - createNamespace: true - chart: rancher-stable/rancher - needs: - - {{ .Values.globals.certManager.namespace }}/cert-manager - values: - - ./values/rancher/values.yaml.gotmpl - - - name: longhorn - namespace: {{ .Values.globals.longhorn.namespace }} - createNamespace: true - chart: longhorn/longhorn - values: - - ./values/longhorn/values.yaml.gotmpl - needs: - - {{ .Values.globals.rancher.namespace }}/rancher - - - - name: kube-prometheus-stack - namespace: {{ .Values.globals.kubePrometheusStack.namespace }} - createNamespace: true - chart: prometheus-community/kube-prometheus-stack - needs: - - {{ .Values.globals.certManager.namespace }}/cert-manager - - {{ .Values.globals.longhorn.namespace }}/longhorn - values: - - ./values/kube-prometheus-stack/values.yaml.gotmpl - diff --git a/k8s/helmfile.d/01-databases.yaml b/k8s/helmfile.d/01-databases.yaml deleted file mode 100644 index bcff3f4..0000000 --- a/k8s/helmfile.d/01-databases.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -repositories: - - name: bitnami - url: https://charts.bitnami.com/bitnami - - name: runix - url: https://helm.runix.net - -environments: - staging: &staging - values: - - ./values/globals/staging.yaml.gotmpl - production: - default: *staging - ---- -releases: - - name: mysql - namespace: {{ .Values.globals.mysql.namespace }} - createNamespace: true - chart: bitnami/mysql - values: - - ./values/mysql/values.yaml.gotmpl - - - name: phpmyadmin - namespace: {{ .Values.globals.phpmyadmin.namespace }} - createNamespace: true - chart: bitnami/phpmyadmin - values: - - ./values/phpmyadmin/values.yaml.gotmpl - needs: - - {{ .Values.globals.mysql.namespace }}/mysql - - - name: postgres - namespace: {{ .Values.globals.postgres.namespace }} - createNamespace: true - chart: bitnami/postgresql - values: - - ./values/postgres/values.yaml.gotmpl - - - name: pgadmin4 - namespace: {{ .Values.globals.pgadmin4.namespace }} - createNamespace: true - chart: runix/pgadmin4 - values: - - ./values/pgadmin4/values.yaml.gotmpl - needs: - - {{ .Values.globals.postgres.namespace }}/postgres - - - name: init-dbs - # It doesn't really matter where we put this, but I don't want it polluting - # the default namespace - namespace: init-dbs - createNamespace: true - chart: ./charts/init-dbs - values: - - ./values/init-dbs/values.yaml.gotmpl - needs: - - {{ .Values.globals.postgres.namespace }}/postgres - - {{ .Values.globals.mysql.namespace }}/mysql - - - name: redis - namespace: {{ .Values.globals.redis.namespace }} - createNamespace: true - chart: bitnami/redis - values: - - ./values/redis/values.yaml.gotmpl - diff --git a/k8s/helmfile.d/02-applications.yaml b/k8s/helmfile.d/02-applications.yaml deleted file mode 100644 index 83d0de7..0000000 --- a/k8s/helmfile.d/02-applications.yaml +++ /dev/null @@ -1,64 +0,0 @@ ---- -repositories: - - name: gitlab - url: https://charts.gitlab.io - - name: bitnami - url: https://charts.bitnami.com/bitnami - - name: uptime-kuma - url: https://helm.irsigler.cloud - - name: authentik - url: https://charts.goauthentik.io - - name: harbor - url: https://helm.goharbor.io - -environments: - staging: &staging - values: - - ./values/globals/staging.yaml.gotmpl - production: - default: *staging - ---- -releases: - - name: uptime-kuma - namespace: {{ .Values.globals.uptimeKuma.namespace }} - createNamespace: true - chart: uptime-kuma/uptime-kuma - values: - - ./values/uptime-kuma/values.yaml.gotmpl - - - name: authentik - namespace: {{ .Values.globals.authentik.namespace }} - createNamespace: true - chart: authentik/authentik - values: - - ./values/authentik/values.yaml.gotmpl - - - name: argo-cd - namespace: {{ .Values.globals.argocd.namespace }} - createNamespace: true - chart: bitnami/argo-cd - values: - - ./values/argo-cd/values.yaml.gotmpl - - - name: harbor - namespace: {{ .Values.globals.harbor.namespace }} - createNamespace: true - chart: bitnami/harbor - values: - - ./values/harbor/values.yaml.gotmpl - - - name: ghost - namespace: {{ .Values.globals.ghost.namespace }} - createNamespace: true - chart: bitnami/ghost - values: - - ./values/ghost/values.yaml.gotmpl - - - name: gitea - installed: false - namespace: {{ .Values.globals.gitea.namespace }} - createNamespace: true - chart: bitnami/gitea - values: - - ./values/gitea/values.yaml.gotmpl diff --git a/k8s/helmfile.d/charts/certs/.helmignore b/k8s/helmfile.d/charts/certs/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/k8s/helmfile.d/charts/certs/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/k8s/helmfile.d/charts/certs/Chart.yaml b/k8s/helmfile.d/charts/certs/Chart.yaml deleted file mode 100644 index 06cd0fa..0000000 --- a/k8s/helmfile.d/charts/certs/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: certs -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/k8s/helmfile.d/charts/certs/templates/NOTES.txt b/k8s/helmfile.d/charts/certs/templates/NOTES.txt deleted file mode 100644 index e69de29..0000000 diff --git a/k8s/helmfile.d/charts/certs/templates/_helpers.tpl b/k8s/helmfile.d/charts/certs/templates/_helpers.tpl deleted file mode 100644 index 27f9fc8..0000000 --- a/k8s/helmfile.d/charts/certs/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "certs.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "certs.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "certs.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "certs.labels" -}} -helm.sh/chart: {{ include "certs.chart" . }} -{{ include "certs.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "certs.selectorLabels" -}} -app.kubernetes.io/name: {{ include "certs.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "certs.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "certs.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/k8s/helmfile.d/charts/certs/templates/certificate.yml b/k8s/helmfile.d/charts/certs/templates/certificate.yml deleted file mode 100644 index 64fd72a..0000000 --- a/k8s/helmfile.d/charts/certs/templates/certificate.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: {{ .Values.certificateName }} - namespace: {{ .Values.certificateNamespace }} -spec: - secretName: {{ .Values.certificateSecretName }} - issuerRef: - name: {{ .Values.issuerName | quote }} - kind: ClusterIssuer - commonName: {{ .Values.commonName | quote }} - dnsNames: - {{- range .Values.dnsNames }} - - {{ . | quote }} - {{- end}} - diff --git a/k8s/helmfile.d/charts/certs/templates/cf-secret.yml b/k8s/helmfile.d/charts/certs/templates/cf-secret.yml deleted file mode 100644 index a5db44b..0000000 --- a/k8s/helmfile.d/charts/certs/templates/cf-secret.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Values.cloudflareTokenSecretName }} - namespace: {{ .Values.certManagerNamespace }} -type: Opaque -stringData: - cloudflare-token: {{ .Values.cloudflareSecretToken }} - diff --git a/k8s/helmfile.d/charts/certs/templates/clusterissuer.yml b/k8s/helmfile.d/charts/certs/templates/clusterissuer.yml deleted file mode 100644 index bcb1f74..0000000 --- a/k8s/helmfile.d/charts/certs/templates/clusterissuer.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: {{ .Values.issuerName }} -spec: - acme: - server: {{- if eq .Values.issuerMode "staging" }} "https://acme-staging-v02.api.letsencrypt.org/directory" {{- else }} "https://acme-v02.api.letsencrypt.org/directory" {{- end }} - email: {{ .Values.acmeEmail }} - privateKeySecretRef: - name: {{ .Values.privateKeySecretRef }} - solvers: - - dns01: - cloudflare: - email: {{ .Values.cloudflareEmail }} - apiTokenSecretRef: - name: {{ .Values.cloudflareTokenSecretName }} - key: cloudflare-token - selector: - dnsZones: - {{- range .Values.dnsZones }} - - {{ . | quote }} - {{- end}} - diff --git a/k8s/helmfile.d/charts/certs/values.yaml b/k8s/helmfile.d/charts/certs/values.yaml deleted file mode 100644 index 97a1db8..0000000 --- a/k8s/helmfile.d/charts/certs/values.yaml +++ /dev/null @@ -1,28 +0,0 @@ -acmeEmail: tonydu121@hotmail.com -cloudflareEmail: tonydu121@hotmail.com - -# staging or production -issuerMode: staging - -issuerName: letsencrypt -privateKeySecretRef: letsencrypt - -certManagerNamespace: cert-manager - -cloudflareSecretToken: redacted -cloudflareTokenSecretName: cloudflare-token-secret - -dnsZones: - - "mnke.org" - - "*.mnke.org" - - "*.hl.mnke.org" - -# TODO: Allow for multiple creation -certificateName: hl-mnke-org -certificateNamespace: default -certificateSecretName: hl-mnke-org-tls - -commonName: "*.hl.mnke.org" -dnsNames: - - "hl.mnke.org" - - "*.hl.mnke.org" diff --git a/k8s/helmfile.d/charts/init-dbs/.helmignore b/k8s/helmfile.d/charts/init-dbs/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/k8s/helmfile.d/charts/init-dbs/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/k8s/helmfile.d/charts/init-dbs/Chart.yaml b/k8s/helmfile.d/charts/init-dbs/Chart.yaml deleted file mode 100644 index d530203..0000000 --- a/k8s/helmfile.d/charts/init-dbs/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: init-dbs -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/k8s/helmfile.d/charts/init-dbs/templates/NOTES.txt b/k8s/helmfile.d/charts/init-dbs/templates/NOTES.txt deleted file mode 100644 index e69de29..0000000 diff --git a/k8s/helmfile.d/charts/init-dbs/templates/_helpers.tpl b/k8s/helmfile.d/charts/init-dbs/templates/_helpers.tpl deleted file mode 100644 index c58e41b..0000000 --- a/k8s/helmfile.d/charts/init-dbs/templates/_helpers.tpl +++ /dev/null @@ -1,51 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "init-dbs.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "init-dbs.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "init-dbs.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "init-dbs.labels" -}} -helm.sh/chart: {{ include "init-dbs.chart" . }} -{{ include "init-dbs.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "init-dbs.selectorLabels" -}} -app.kubernetes.io/name: {{ include "init-dbs.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} diff --git a/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml b/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml deleted file mode 100644 index 32403a3..0000000 --- a/k8s/helmfile.d/charts/init-dbs/templates/jobs.yaml +++ /dev/null @@ -1,112 +0,0 @@ -{{- range .Values.postgres.databases }} ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }} -spec: - template: - spec: - imagePullSecrets: - {{- toYaml $.Values.imagePullSecrets | nindent 8 }} - restartPolicy: OnFailure - containers: - - name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }} - image: {{ $.Values.postgres.image.ref }} - imagePullPolicy: {{ $.Values.postgres.image.pullPolicy }} - command: - - /bin/sh - - -c - args: - # If the username and database exists, whatever, just exit. - # Yeah, if something else went wrong, we're still exiting with code 0, - # but it should be fine. - - | - sleep 10s && \ - psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \ - -tc "SELECT 1 FROM pg_database WHERE datname = '$DATABASE'" |\ - grep -q 1 ||\ - psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \ - -c "CREATE USER $USERNAME WITH ENCRYPTED PASSWORD '$PASSWORD';" \ - -c "CREATE DATABASE $DATABASE WITH OWNER = $USERNAME;" - env: - - name: PGUSER - valueFrom: - secretKeyRef: - key: username - name: {{ include "init-dbs.fullname" $ }}-postgres - - name: PGPASSWORD - valueFrom: - secretKeyRef: - key: password - name: {{ include "init-dbs.fullname" $ }}-postgres - - name: USERNAME - valueFrom: - secretKeyRef: - key: {{ .database }}-username - name: {{ include "init-dbs.fullname" $ }}-postgres - - name: PASSWORD - valueFrom: - secretKeyRef: - key: {{ .database }}-password - name: {{ include "init-dbs.fullname" $ }}-postgres - - name: DATABASE - valueFrom: - secretKeyRef: - key: {{ .database }}-database - name: {{ include "init-dbs.fullname" $ }}-postgres -{{- end }} - -{{- range .Values.mysql.databases }} ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }} -spec: - template: - spec: - imagePullSecrets: - {{- toYaml $.Values.imagePullSecrets | nindent 8 }} - restartPolicy: OnFailure - containers: - - name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }} - image: {{ $.Values.mysql.image.ref }} - imagePullPolicy: {{ $.Values.mysql.image.pullPolicy }} - command: - - /bin/sh - - -c - args: - - | - sleep 10s && \ - mysql -h {{ $.Values.mysql.host }} -u $MYUSER mysql --password=$MYPASSWORD \ - -e "CREATE DATABASE IF NOT EXISTS $DATABASE;" \ - -e "CREATE USER IF NOT EXISTS '$USERNAME'@'%' IDENTIFIED BY '$PASSWORD';" \ - -e "GRANT ALL PRIVILEGES ON $DATABASE TO '$USERNAME'@'%';" - env: - - name: MYUSER - valueFrom: - secretKeyRef: - key: username - name: {{ include "init-dbs.fullname" $ }}-mysql - - name: MYPASSWORD - valueFrom: - secretKeyRef: - key: password - name: {{ include "init-dbs.fullname" $ }}-mysql - - name: USERNAME - valueFrom: - secretKeyRef: - key: {{ .database }}-username - name: {{ include "init-dbs.fullname" $ }}-mysql - - name: PASSWORD - valueFrom: - secretKeyRef: - key: {{ .database }}-password - name: {{ include "init-dbs.fullname" $ }}-mysql - - name: DATABASE - valueFrom: - secretKeyRef: - key: {{ .database }}-database - name: {{ include "init-dbs.fullname" $ }}-mysql -{{- end }} diff --git a/k8s/helmfile.d/charts/init-dbs/templates/secrets.yaml b/k8s/helmfile.d/charts/init-dbs/templates/secrets.yaml deleted file mode 100644 index 2f4a021..0000000 --- a/k8s/helmfile.d/charts/init-dbs/templates/secrets.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "init-dbs.fullname" $ }}-postgres - labels: - {{- include "init-dbs.labels" $ | nindent 4 }} -type: Opaque -stringData: - username: {{ .Values.postgres.username }} - password: {{ .Values.postgres.password }} - {{- range .Values.postgres.databases }} - {{ .database }}-database: {{ .database }} - {{ .database }}-username: {{ .username }} - {{ .database }}-password: {{ .password }} - {{- end }} - ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "init-dbs.fullname" $ }}-mysql - labels: - {{- include "init-dbs.labels" $ | nindent 4 }} -type: Opaque -stringData: - username: {{ .Values.mysql.username }} - password: {{ .Values.mysql.password }} - {{- range .Values.mysql.databases }} - {{ .database }}-database: {{ .database }} - {{ .database }}-username: {{ .username }} - {{ .database }}-password: {{ .password }} - {{- end }} diff --git a/k8s/helmfile.d/charts/init-dbs/values.yaml b/k8s/helmfile.d/charts/init-dbs/values.yaml deleted file mode 100644 index 2c53126..0000000 --- a/k8s/helmfile.d/charts/init-dbs/values.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# This is to override the chart name. -nameOverride: "" -fullnameOverride: "" - -postgres: - image: - ref: postgres - # This sets the pull policy for images. - pullPolicy: IfNotPresent - host: "" - username: postgres - password: "" - databases: - - database: test - username: test - password: test - - database: test1 - username: test1 - password: test1 -mysql: - image: - ref: mysql - # This sets the pull policy for images. - pullPolicy: IfNotPresent - host: "" - username: root - password: "" - databases: - - database: test - username: test - password: test - - database: test1 - username: test1 - password: test1 diff --git a/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl b/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl deleted file mode 100644 index f89abd7..0000000 --- a/k8s/helmfile.d/values/argo-cd/values.yaml.gotmpl +++ /dev/null @@ -1,4190 +0,0 @@ -# Copyright Broadcom, Inc. All Rights Reserved. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) -## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - defaultStorageClass: {{ .Values.globals.argocd.storageClass }} - storageClass: "" - ## Security parameters - ## - security: - ## @param global.security.allowInsecureImages Allows skipping image verification - allowInsecureImages: false - ## Compatibility adaptations for Kubernetes platforms - ## - compatibility: - ## Compatibility adaptations for Openshift - ## - openshift: - ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) - ## - adaptSecurityContext: auto -## @section Common parameters - -## @param kubeVersion Override Kubernetes version -## -kubeVersion: "" -## @param nameOverride String to partially override common.names.fullname -## -nameOverride: "" -## @param fullnameOverride String to fully override common.names.fullname -## -fullnameOverride: "" -## @param commonLabels Labels to add to all deployed objects -## -commonLabels: {} -## @param commonAnnotations Annotations to add to all deployed objects -## -commonAnnotations: {} -## @param clusterDomain Kubernetes cluster domain name -## -clusterDomain: cluster.local -## @param extraDeploy Array of extra objects to deploy with the release -## -extraDeploy: [] -## @section Argo CD image parameters - -## Bitnami Argo CD image -## ref: https://hub.docker.com/r/bitnami/argo-cd/tags/ -## @param image.registry [default: REGISTRY_NAME] Argo CD image registry -## @param image.repository [default: REPOSITORY_NAME/argo-cd] Argo CD image repository -## @skip image.tag Argo CD image tag (immutable tags are recommended) -## @param image.digest Argo CD image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag -## @param image.pullPolicy Argo CD image pull policy -## @param image.pullSecrets Argo CD image pull secrets -## @param image.debug Enable Argo CD image debug mode -## -image: - registry: docker.io - repository: bitnami/argo-cd - tag: 2.13.4-debian-12-r0 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false -## @section Argo CD application controller parameters - -## Argo CD Controller -## -controller: - ## @param controller.kind Kind to deploy ArgoCD application controller in. - ## Use either StatefulSet or Deployment (default). StatefulSet is required when running in HA mode. - ## ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/high_availability/ - ## - kind: Deployment - ## @param controller.replicaCount Number of Argo CD replicas to deploy - ## - replicaCount: 1 - ## Configure extra options for Argo CD containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param controller.startupProbe.enabled Enable startupProbe on Argo CD nodes - ## @param controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param controller.startupProbe.periodSeconds Period seconds for startupProbe - ## @param controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param controller.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param controller.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param controller.livenessProbe.enabled Enable livenessProbe on Argo CD nodes - ## @param controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param controller.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param controller.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param controller.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param controller.readinessProbe.enabled Enable readinessProbe on Argo CD nodes - ## @param controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param controller.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param controller.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param controller.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param controller.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param controller.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param controller.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Argo CD resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "micro" - ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param controller.podSecurityContext.enabled Enabled Argo CD pods' Security Context - ## @param controller.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param controller.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param controller.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param controller.podSecurityContext.fsGroup Set Argo CD pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param controller.containerSecurityContext.enabled Enabled Argo CD containers' Security Context - ## @param controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param controller.containerSecurityContext.runAsUser Set Argo CD containers' Security Context runAsUser - ## @param controller.containerSecurityContext.runAsGroup Set Argo CD containers' Security Context runAsGroup - ## @param controller.containerSecurityContext.allowPrivilegeEscalation Set Argo CD containers' Security Context allowPrivilegeEscalation - ## @param controller.containerSecurityContext.capabilities.drop Set Argo CD containers' Security Context capabilities to be dropped - ## @param controller.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' Security Context readOnlyRootFilesystem - ## @param controller.containerSecurityContext.runAsNonRoot Set Argo CD container's Security Context runAsNonRoot - ## @param controller.containerSecurityContext.privileged Set controller container's Security Context privileged - ## @param controller.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## ServiceAccount configuration for the Argo CD application controller - ## - serviceAccount: - ## @param controller.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param controller.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param controller.serviceAccount.automountServiceAccountToken Automount service account token for the application controller service account - ## - automountServiceAccountToken: false - ## @param controller.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. - ## - annotations: {} - ## Enable admin clusterrole resources. Allows to Argo CD to deploy to the K8s cluster - ## @param controller.clusterAdminAccess Enable K8s cluster admin access for the application controller - ## - clusterAdminAccess: true - ## Enable Custom Rules for the Application Controller cluster role - ## @param controller.clusterRoleRules Use custom rules for the application controller's cluster role - ## - clusterRoleRules: [] - ## Argo CD application controller log format: text|json - ## @param controller.logFormat Format for the Argo CD application controller logs. Options: [text, json] - ## - logFormat: text - ## Argo CD application controller log level - ## @param controller.logLevel Log level for the Argo CD application controller - ## - logLevel: info - ## Argo CD application controller ports - ## @param controller.containerPorts.metrics Argo CD application controller metrics port number - containerPorts: - metrics: 8082 - ## Argo CD application controller service parameters - ## - service: - ## @param controller.service.type Argo CD service type - ## - type: ClusterIP - ## @param controller.service.ports.metrics Argo CD application controller service port - ## - ports: - metrics: 8082 - ## Node ports to expose - ## @param controller.service.nodePorts.metrics Node port for Argo CD application controller service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - metrics: "" - ## @param controller.service.clusterIP Argo CD application controller service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param controller.service.loadBalancerIP Argo CD application controller service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param controller.service.loadBalancerSourceRanges Argo CD application controller service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param controller.service.externalTrafficPolicy Argo CD application controller service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param controller.service.annotations Additional custom annotations for Argo CD application controller service - ## - annotations: {} - ## @param controller.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param controller.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param controller.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param controller.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param controller.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param controller.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param controller.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param controller.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param controller.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param controller.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param controller.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## Metrics configuration for Argo CD application controller - ## - metrics: - ## @param controller.metrics.enabled Enable Argo CD application controller metrics - ## - enabled: true - service: - ## @param controller.metrics.service.type Argo CD application controller service type - ## - type: ClusterIP - ## @param controller.metrics.service.ports.metrics Argo CD application controller metrics service port - ## - ports: - metrics: 8082 - ## Node ports to expose - ## @param controller.metrics.service.nodePorts.metrics Node port for the application controller service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - metrics: "" - ## @param controller.metrics.service.clusterIP Argo CD application controller metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param controller.metrics.service.loadBalancerIP Argo CD application controller service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param controller.metrics.service.loadBalancerSourceRanges Argo CD application controller service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param controller.metrics.service.externalTrafficPolicy Argo CD application controller service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param controller.metrics.service.annotations Additional custom annotations for Argo CD application controller service - ## - annotations: {} - ## @param controller.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param controller.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Argo CD application controller metrics service monitor configuration - ## - serviceMonitor: - ## @param controller.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param controller.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param controller.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## @param controller.metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: 30s - ## @param controller.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: 10s - ## @param controller.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param controller.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param controller.metrics.serviceMonitor.selector ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param controller.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## Rules for PrometheusRule object if enabled - ## - ## E.g. - ## @param controller.metrics.rules.enabled Enable render extra rules for PrometheusRule object - ## @param controller.metrics.rules.spec Rules to render into the PrometheusRule object - ## @param controller.metrics.rules.selector Selector for the PrometheusRule object - ## @param controller.metrics.rules.namespace Namespace where to create the PrometheusRule object - ## @param controller.metrics.rules.additionalLabels Additional lables to add to the PrometheusRule object - ## - rules: - enabled: false - ## E.g - ## - alert: ArgoAppMissing - ## expr: | - ## absent(argocd_app_info) - ## for: 15m - ## labels: - ## severity: critical - ## annotations: - ## summary: "[ArgoCD] No reported applications" - ## description: > - ## ArgoCD has not reported any applications data for the past 15 minutes which - ## means that it must be down or not functioning properly. This needs to be - ## resolved for this cloud to continue to maintain state. - ## - alert: ArgoAppNotSynced - ## expr: | - ## argocd_app_info{sync_status!="Synced"} == 1 - ## for: 12h - ## labels: - ## severity: warning - ## annotations: - ## summary: "[{{`{{ $labels.name }}`}}] Application not synchronized" - ## description: > - ## The application [{{`{{ $labels.name }}`}} has not been synchronized for over - ## 12 hours which means that the state of this cloud has drifted away from the - ## state inside Git. - ## - spec: [] - ## E.g - ## selector: - ## prometheus: kube-prometheus - ## - selector: {} - namespace: monitoring - additionalLabels: {} - ## @param controller.command Override default container command (useful when using custom images) - ## - command: [] - ## Arguments that will be used by default for the application controller - ## @param controller.defaultArgs.statusProcessors Default status processors for Argo CD controller - ## @param controller.defaultArgs.operationProcessors Default operation processors for Argo CD controller - ## @param controller.defaultArgs.appResyncPeriod Default application resync period for Argo CD controller - ## @param controller.defaultArgs.selfHealTimeout Default self heal timeout for Argo CD controller - ## - defaultArgs: - statusProcessors: "20" - operationProcessors: "10" - appResyncPeriod: "180" - selfHealTimeout: "5" - ## @param controller.args Override default container args (useful when using custom images). Overrides the defaultArgs. - ## - args: [] - ## @param controller.extraArgs Add extra arguments to the default arguments for the Argo CD controller - ## - extraArgs: [] - ## ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution/ - ## - dynamicClusterDistribution: - ## @param controller.dynamicClusterDistribution.enabled Whether dynamic cluster distribution is enabled. - ## - enabled: false - ## @param controller.dynamicClusterDistribution.heartbeatDuration Time to update the cluster sharding (defaults to 10 seconds). - ## ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution/#working-of-dynamic-distribution - ## - heartbeatDuration: "" - ## @param controller.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: true - ## @param controller.hostAliases Argo CD pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param controller.podLabels Extra labels for Argo CD pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param controller.podAnnotations Annotations for Argo CD pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param controller.podAffinityPreset Pod affinity preset. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param controller.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node controller.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param controller.nodeAffinityPreset.type Node affinity preset type. Ignored if `controller.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param controller.nodeAffinityPreset.key Node label key to match. Ignored if `controller.affinity` is set - ## - key: "" - ## @param controller.nodeAffinityPreset.values Node label values to match. Ignored if `controller.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param controller.affinity Affinity for Argo CD pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `controller.podAffinityPreset`, `controller.podAntiAffinityPreset`, and `controller.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param controller.nodeSelector Node labels for Argo CD pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param controller.tolerations Tolerations for Argo CD pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param controller.schedulerName Name of the k8s scheduler (other than default) - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param controller.shareProcessNamespace Enable shared process namespace in a pod. - ## If set to false (default), each container will run in separate namespace, controller will have PID=1. - ## If set to true, the /pause will run as init process and will reap any zombie PIDs, - ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds. - ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param controller.topologySpreadConstraints Topology Spread Constraints for pod assignment - ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## The value is evaluated as a template - ## - topologySpreadConstraints: [] - ## @param controller.updateStrategy.type Argo CD statefulset strategy type - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - ## @param controller.priorityClassName Argo CD pods' priorityClassName - ## - priorityClassName: "" - ## @param controller.runtimeClassName Name of the runtime class to be used by pod(s) - ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ - ## - runtimeClassName: "" - ## @param controller.lifecycleHooks for the Argo CD container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param controller.podManagementPolicy podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode) - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies - ## - podManagementPolicy: "" - ## @param controller.extraEnvVars Array with extra environment variables to add to Argo CD nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param controller.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD nodes - ## - extraEnvVarsCM: "" - ## @param controller.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD nodes - ## - extraEnvVarsSecret: "" - ## @param controller.extraVolumes Optionally specify extra list of additional volumes for the Argo CD pod(s) - ## - extraVolumes: [] - ## @param controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD container(s) - ## - extraVolumeMounts: [] - ## @param controller.sidecars Add additional sidecar containers to the Argo CD pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param controller.initContainers Add additional init containers to the Argo CD pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param controller.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param controller.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param controller.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `controller.pdb.minAvailable` and `controller.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @section Argo CD ApplicationSet controller parameters - -## ApplicationSet controller -## -applicationSet: - ## @param applicationSet.enabled Enable ApplicationSet controller - ## - enabled: false - ## @param applicationSet.replicaCount The number of ApplicationSet controller pods to run - ## - replicaCount: 1 - ## @param applicationSet.command Override default container command (useful when using custom images) - ## - command: [] - ## Arguments that will be used by default for the application controller - ## @param applicationSet.defaultArgs.enableLeaderElection Enable leader election - ## @param applicationSet.defaultArgs.policy Default policy - ## @param applicationSet.defaultArgs.debug Enable debug mode - ## @param applicationSet.defaultArgs.dryRun Enable dry-run mode - ## - defaultArgs: - enableLeaderElection: false - policy: "sync" - debug: false - dryRun: false - ## @param applicationSet.args Override default container args (useful when using custom images). Overrides the defaultArgs. - ## - args: [] - ## @param applicationSet.extraArgs Add extra arguments to the default arguments for the Argo CD applicationSet controller - ## - extraArgs: [] - ## Argo CD applicationSet controller log format: text|json - ## @param applicationSet.logFormat Format for the Argo CD applicationSet controller logs. Options: [text, json] - ## - logFormat: text - ## Argo CD applicationSet controller log level - ## @param applicationSet.logLevel Log level for the Argo CD applicationSet controller - ## - logLevel: info - ## Argo CD applicationSet controller ports - ## @param applicationSet.containerPorts.metrics Argo CD applicationSet controller metrics port number - ## @param applicationSet.containerPorts.probe Argo CD applicationSet controller probe port number - ## - containerPorts: - metrics: 8085 - probe: 8081 - ## Metrics configuration for Argo CD applicationSet controller - ## - metrics: - ## @param applicationSet.metrics.enabled Enable Argo CD applicationSet controller metrics - ## - enabled: false - service: - ## @param applicationSet.metrics.service.type Argo CD applicationSet controller service type - ## - type: ClusterIP - ## @param applicationSet.metrics.service.ports.metrics Argo CD applicationSet controller metrics service port - ## - ports: - metrics: 8085 - ## Node ports to expose - ## @param applicationSet.metrics.service.nodePorts.metrics Node port for the applicationSet controller service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - metrics: "" - ## @param applicationSet.metrics.service.clusterIP Argo CD applicationSet controller metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param applicationSet.metrics.service.loadBalancerIP Argo CD applicationSet controller service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param applicationSet.metrics.service.loadBalancerSourceRanges Argo CD applicationSet controller service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param applicationSet.metrics.service.externalTrafficPolicy Argo CD applicationSet controller service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param applicationSet.metrics.service.annotations Additional custom annotations for Argo CD applicationSet controller service - ## - annotations: {} - ## @param applicationSet.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param applicationSet.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Argo CD applicationSet controller metrics service monitor configuration - ## - serviceMonitor: - ## @param applicationSet.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param applicationSet.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param applicationSet.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## @param applicationSet.metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: 30s - ## @param applicationSet.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: 10s - ## @param applicationSet.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param applicationSet.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param applicationSet.metrics.serviceMonitor.selector ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param applicationSet.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## Argo CD applicationSet controller service parameters - ## - service: - ## @param applicationSet.service.type Argo CD applicationSet controller service type - ## - type: ClusterIP - ## @param applicationSet.service.ports.webhook Argo CD applicationSet controller service port - ## - ports: - webhook: 7000 - ## Node ports to expose - ## @param applicationSet.service.nodePorts.webhook Node port for Argo CD applicationSet controller service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - webhook: "" - ## @param applicationSet.service.clusterIP Argo CD applicationSet controller service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param applicationSet.service.loadBalancerIP Argo CD applicationSet controller service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param applicationSet.service.loadBalancerSourceRanges Argo CD applicationSet controller service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param applicationSet.service.externalTrafficPolicy Argo CD applicationSet controller service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param applicationSet.service.annotations Additional custom annotations for Argo CD applicationSet controller service - ## - annotations: {} - ## @param applicationSet.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param applicationSet.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param applicationSet.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param applicationSet.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param applicationSet.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param applicationSet.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param applicationSet.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param applicationSet.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param applicationSet.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param applicationSet.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param applicationSet.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## ServiceAccount configuration for the Argo CD applicationSet controller - ## - serviceAccount: - ## @param applicationSet.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param applicationSet.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param applicationSet.serviceAccount.automountServiceAccountToken Automount service account token for the applicationSet controller service account - ## - automountServiceAccountToken: false - ## @param applicationSet.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. - ## - annotations: {} - ## Enable admin clusterrole resources. Allows Argo CD applicationSet controller to have access to multiple namespaces - ## @param applicationSet.clusterAdminAccess Enable K8s cluster admin access for the application controller - ## - clusterAdminAccess: false - ## Enable Custom Rules for Argo CD applicationSet controller cluster role - ## @param applicationSet.clusterRoleRules Use custom rules for Argo CD applicationSet controller's cluster role - ## - clusterRoleRules: [] - ## @param applicationSet.podAffinityPreset Pod affinity preset. Ignored if `applicationSet.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param applicationSet.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `applicationSet.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node applicationSet.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param applicationSet.nodeAffinityPreset.type Node affinity preset type. Ignored if `applicationSet.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param applicationSet.nodeAffinityPreset.key Node label key to match. Ignored if `applicationSet.affinity` is set - ## - key: "" - ## @param applicationSet.nodeAffinityPreset.values Node label values to match. Ignored if `applicationSet.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param applicationSet.affinity Affinity for Argo CD applicationSet controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `applicationSet.podAffinityPreset`, `applicationSet.podAntiAffinityPreset`, and `applicationSet.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param applicationSet.podAnnotations Annotations for Argo CD applicationSet controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param applicationSet.podLabels Extra labels for Argo CD applicationSet controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param applicationSet.containerSecurityContext.enabled Enabled Argo CD applicationSet controller containers' Security Context - ## @param applicationSet.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param applicationSet.containerSecurityContext.runAsUser Set Argo CD applicationSet controller containers' Security Context runAsUser - ## @param applicationSet.containerSecurityContext.runAsGroup Set Argo CD applicationSet controller containers' Security Context runAsGroup - ## @param applicationSet.containerSecurityContext.allowPrivilegeEscalation Set Argo CD applicationSet controller containers' Security Context allowPrivilegeEscalation - ## @param applicationSet.containerSecurityContext.capabilities.drop Set Argo CD applicationSet controller containers' Security Context capabilities to be dropped - ## @param applicationSet.containerSecurityContext.readOnlyRootFilesystem Set Argo CD applicationSet controller containers' Security Context readOnlyRootFilesystem - ## @param applicationSet.containerSecurityContext.runAsNonRoot Set Argo CD applicationSet controller container's Security Context runAsNonRoot - ## @param applicationSet.containerSecurityContext.privileged Set applicationSet container's Security Context privileged - ## @param applicationSet.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param applicationSet.livenessProbe.enabled Enable livenessProbe on Argo CD applicationSet controller nodes - ## @param applicationSet.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param applicationSet.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param applicationSet.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param applicationSet.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param applicationSet.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param applicationSet.readinessProbe.enabled Enable readinessProbe on Argo CD applicationSet controller nodes - ## @param applicationSet.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param applicationSet.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param applicationSet.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param applicationSet.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param applicationSet.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param applicationSet.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param applicationSet.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Argo CD applicationSet controller resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param applicationSet.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if applicationSet.resources is set (applicationSet.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param applicationSet.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param applicationSet.podSecurityContext.enabled Enabled Argo CD applicationSet controller pods' Security Context - ## @param applicationSet.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param applicationSet.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param applicationSet.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param applicationSet.podSecurityContext.fsGroup Set Argo CD applicationSet controller pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## @param applicationSet.nodeSelector Node labels for Argo CD applicationSet controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param applicationSet.tolerations Tolerations for Argo CD applicationSet controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param applicationSet.updateStrategy.type Argo CD applicationSet controller statefulset strategy type - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - ## @param applicationSet.priorityClassName Argo CD applicationSet controller pods' priorityClassName - ## - priorityClassName: "" - ## @param applicationSet.extraVolumes Optionally specify extra list of additional volumes for the Argo CD applicationSet controller pod(s) - ## - extraVolumes: [] - ## @param applicationSet.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD applicationSet controller container(s) - ## - extraVolumeMounts: [] - ## @param applicationSet.extraEnvVars Array with extra environment variables to add to Argo CD applicationSet controller nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param applicationSet.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD applicationSet controller nodes - ## - extraEnvVarsCM: "" - ## @param applicationSet.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD applicationSet controller nodes - ## - extraEnvVarsSecret: "" - ## Webhook for the Git Generator - ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration) - ## - webhook: - ingress: - ## @param applicationSet.webhook.ingress.enabled Enable an ingress resource for Webhooks - ## - enabled: false - ## DEPRECATED: Use server.ingress.annotations instead of server.ingress.certManager - ## certManager: false - ## - ## @param applicationSet.webhook.ingress.annotations Additional ingress annotations - ## - annotations: {} - ## @param applicationSet.webhook.ingress.labels Additional ingress labels - ## - labels: {} - ## @param applicationSet.webhook.ingress.ingressClassName Defines which ingress controller will implement the resource - ## - ingressClassName: "" - ## @param applicationSet.webhook.ingress.hostname Ingress hostname for the Argo CD applicationSet ingress - ## Hostname must be provided if Ingress is enabled. - ## - hostname: "" - ## @param applicationSet.webhook.ingress.path Argo CD applicationSet ingress path - ## - path: /api/webhook - ## @param applicationSet.webhook.ingress.pathType Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` - ## - pathType: Prefix - ## @param applicationSet.webhook.ingress.extraHosts Extra hosts array for the Argo CD applicationSet ingress - ## The list of additional hostnames to be covered with this ingress record. - ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array - ## - extraHosts: [] - ## @param applicationSet.webhook.ingress.extraPaths Extra paths for the Argo CD applicationSet ingress - ## Any additional arbitrary paths that may need to be added to the ingress under the main host. - ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. - ## - extraPaths: [] - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - ## @param applicationSet.webhook.ingress.extraTls Extra TLS configuration for the Argo CD applicationSet ingress - ## The tls configuration for additional hostnames to be covered with this ingress record. - ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - ## - extraTls: [] - ## - hosts: - ## - argocd.server.local - ## secretName: argocd.server.local-tls - ## - ## @param applicationSet.webhook.ingress.tls Ingress TLS configuration - ## - tls: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param applicationSet.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param applicationSet.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param applicationSet.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `controller.pdb.minAvailable` and `controller.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @section Argo CD notifications controller parameters - -## notifications controller -## -notifications: - ## @param notifications.enabled Enable notifications controller - ## - enabled: false - ## @param notifications.command Override default container command (useful when using custom images) - ## - command: [] - ## @param notifications.args Override default container args (useful when using custom images). - ## - args: [] - ## @param notifications.extraArgs Add extra arguments to the default arguments for the Argo CD notifications controller - ## - extraArgs: [] - ## @param notifications.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: true - ## Argo CD notifications controller log format: text|json - ## @param notifications.logFormat Format for the Argo CD notifications controller logs. Options: [text, json] - ## - logFormat: text - ## Argo CD notifications controller log level - ## @param notifications.logLevel Log level for the Argo CD notifications controller - ## - logLevel: info - ## Argo CD notifications controller ports - ## @param notifications.containerPorts.metrics Argo CD notifications controller metrics port number - ## - containerPorts: - metrics: 8085 - ## Metrics configuration for Argo CD notifications controller - ## - metrics: - ## @param notifications.metrics.enabled Enable Argo CD notifications controller metrics - ## - enabled: false - service: - ## @param notifications.metrics.service.type Argo CD notifications controller service type - ## - type: ClusterIP - ## @param notifications.metrics.service.ports.metrics Argo CD notifications controller metrics service port - ## - ports: - metrics: 8085 - ## Node ports to expose - ## @param notifications.metrics.service.nodePorts.metrics Node port for the notifications controller service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - metrics: "" - ## @param notifications.metrics.service.clusterIP Argo CD notifications controller metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param notifications.metrics.service.loadBalancerIP Argo CD notifications controller service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param notifications.metrics.service.loadBalancerSourceRanges Argo CD notifications controller service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param notifications.metrics.service.externalTrafficPolicy Argo CD notifications controller service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param notifications.metrics.service.annotations Additional custom annotations for Argo CD notifications controller service - ## - annotations: {} - ## @param notifications.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param notifications.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Argo CD notifications controller metrics service monitor configuration - ## - serviceMonitor: - ## @param notifications.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param notifications.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param notifications.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## @param notifications.metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: 30s - ## @param notifications.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: 10s - ## @param notifications.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param notifications.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param notifications.metrics.serviceMonitor.selector ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param notifications.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param notifications.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param notifications.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param notifications.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param notifications.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param notifications.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param notifications.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param notifications.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param notifications.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## ServiceAccount configuration for the Argo CD notifications controller - ## - serviceAccount: - ## @param notifications.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param notifications.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param notifications.serviceAccount.automountServiceAccountToken Automount service account token for the notifications controller service account - ## - automountServiceAccountToken: false - ## @param notifications.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. - ## - annotations: {} - ## Enable admin clusterrole resources. Allows Argo CD notifications controller to have access to multiple namespaces - ## @param notifications.clusterAdminAccess Enable K8s cluster admin access for the notifications controller - ## - clusterAdminAccess: false - ## Enable Custom Rules for Argo CD notifications controller cluster role - ## @param notifications.clusterRoleRules Use custom rules for notifications controller's cluster role - ## - clusterRoleRules: [] - ## @param notifications.podAffinityPreset Pod affinity preset. Ignored if `notifications.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param notifications.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `notifications.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node notifications.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param notifications.nodeAffinityPreset.type Node affinity preset type. Ignored if `notifications.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param notifications.nodeAffinityPreset.key Node label key to match. Ignored if `notifications.affinity` is set - ## - key: "" - ## @param notifications.nodeAffinityPreset.values Node label values to match. Ignored if `notifications.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param notifications.affinity Affinity for Argo CD notifications controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `notifications.podAffinityPreset`, `notifications.podAntiAffinityPreset`, and `notifications.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param notifications.podAnnotations Annotations for Argo CD notifications controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param notifications.podLabels Extra labels for Argo CD notifications controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param notifications.containerSecurityContext.enabled Enabled Argo CD notifications controller containers' Security Context - ## @param notifications.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param notifications.containerSecurityContext.runAsUser Set Argo CD notifications controller containers' Security Context runAsUser - ## @param notifications.containerSecurityContext.runAsGroup Set Argo CD notifications controller containers' Security Context runAsGroup - ## @param notifications.containerSecurityContext.allowPrivilegeEscalation Set Argo CD notifications controller containers' Security Context allowPrivilegeEscalation - ## @param notifications.containerSecurityContext.capabilities.drop Set Argo CD notifications controller containers' Security Context capabilities to be dropped - ## @param notifications.containerSecurityContext.readOnlyRootFilesystem Set Argo CD notifications controller containers' Security Context readOnlyRootFilesystem - ## @param notifications.containerSecurityContext.runAsNonRoot Set Argo CD notifications controller container's Security Context runAsNonRoot - ## @param notifications.containerSecurityContext.privileged Set notifications container's Security Context privileged - ## @param notifications.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## Argo CD notifications controller resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param notifications.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if notifications.resources is set (notifications.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param notifications.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param notifications.podSecurityContext.enabled Enabled Argo CD notifications controller pods' Security Context - ## @param notifications.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param notifications.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param notifications.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param notifications.podSecurityContext.fsGroup Set Argo CD notifications controller pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## @param notifications.nodeSelector Node labels for Argo CD notifications controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param notifications.tolerations Tolerations for Argo CD notifications controller pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param notifications.priorityClassName Argo CD notifications controller pods' priorityClassName - ## - priorityClassName: "" - ## @param notifications.extraVolumes Optionally specify extra list of additional volumes for the Argo CD notifications controller pod(s) - ## - extraVolumes: [] - ## @param notifications.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD notifications controller container(s) - ## - extraVolumeMounts: [] - ## @param notifications.extraEnvVars Array with extra environment variables to add to Argo CD notifications controller nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param notifications.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD notifications controller nodes - ## - extraEnvVarsCM: "" - ## @param notifications.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD notifications controller nodes - ## - extraEnvVarsSecret: "" - ## Configure extra options for Notification containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param notifications.startupProbe.enabled Enable startupProbe on Notification nodes - ## @param notifications.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param notifications.startupProbe.periodSeconds Period seconds for startupProbe - ## @param notifications.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param notifications.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param notifications.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param notifications.livenessProbe.enabled Enable livenessProbe on Notification nodes - ## @param notifications.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param notifications.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param notifications.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param notifications.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param notifications.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param notifications.readinessProbe.enabled Enable readinessProbe on Notification nodes - ## @param notifications.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param notifications.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param notifications.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param notifications.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param notifications.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param notifications.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param notifications.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param notifications.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Webhook for the Git Generator - ## Ref: https://argocd-notifications.readthedocs.io/en/master/Generators-Git/#webhook-configuration) - ## - webhook: - ingress: - ## @param notifications.webhook.ingress.enabled Enable an ingress resource for Webhooks - ## - enabled: false - ## DEPRECATED: Use server.ingress.annotations instead of server.ingress.certManager - ## certManager: false - ## - ## @param notifications.webhook.ingress.annotations Additional ingress annotations - ## - annotations: {} - ## @param notifications.webhook.ingress.labels Additional ingress labels - ## - labels: {} - ## @param notifications.webhook.ingress.ingressClassName Defines which ingress controller will implement the resource - ## - ingressClassName: "" - ## @param notifications.webhook.ingress.hostname Ingress hostname for the Argo CD notifications ingress - ## Hostname must be provided if Ingress is enabled. - ## - hostname: "" - ## @param notifications.webhook.ingress.path Argo CD notifications ingress path - ## - path: /api/webhook - ## @param notifications.webhook.ingress.pathType Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` - ## - pathType: Prefix - ## @param notifications.webhook.ingress.extraHosts Extra hosts array for the Argo CD notifications ingress - ## The list of additional hostnames to be covered with this ingress record. - ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array - ## - extraHosts: [] - ## @param notifications.webhook.ingress.extraPaths Extra paths for the Argo CD notifications ingress - ## Any additional arbitrary paths that may need to be added to the ingress under the main host. - ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. - ## - extraPaths: [] - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - ## @param notifications.webhook.ingress.extraTls Extra TLS configuration for the Argo CD notifications ingress - ## The tls configuration for additional hostnames to be covered with this ingress record. - ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - ## - extraTls: [] - ## - hosts: - ## - argocd.server.local - ## secretName: argocd.server.local-tls - ## - ## @param notifications.webhook.ingress.tls Ingress TLS configuration - ## - tls: [] - ## The optional bot component simplifies managing subscriptions - ## For more information: https://argocd-notifications.readthedocs.io/en/stable/bots/overview/ - bots: - slack: - ## @param notifications.bots.slack.enabled Enable notifications controller - ## - enabled: false - ## @param notifications.bots.slack.command Override default container command (useful when using custom images) - ## - command: [] - ## @param notifications.bots.slack.args Override default container args (useful when using custom images). - ## - args: [] - ## @param notifications.bots.slack.extraArgs Add extra arguments to the default arguments for the Argo CD Slack bot - ## - extraArgs: [] - ## Argo CD Slack bot service parameters - ## - service: - ## @param notifications.bots.slack.service.type Argo CD Slack bot service type - ## - type: LoadBalancer - ## @param notifications.bots.slack.service.ports.http Argo CD Slack bot service port - ## - ports: - http: 80 - ## Node ports to expose - ## @param notifications.bots.slack.service.nodePorts.http Node port for Argo CD Slack bot service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - ## @param notifications.bots.slack.service.clusterIP Argo CD Slack bot service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param notifications.bots.slack.service.loadBalancerIP Argo CD Slack bot service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param notifications.bots.slack.service.loadBalancerSourceRanges Argo CD Slack bot service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param notifications.bots.slack.service.externalTrafficPolicy Argo CD Slack bot service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param notifications.bots.slack.service.annotations Additional custom annotations for Argo CD Slack bot service - ## - annotations: {} - ## @param notifications.bots.slack.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param notifications.bots.slack.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param notifications.bots.slack.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param notifications.bots.slack.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param notifications.bots.slack.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param notifications.bots.slack.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param notifications.bots.slack.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param notifications.bots.slack.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param notifications.bots.slack.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param notifications.bots.slack.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param notifications.bots.slack.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## ServiceAccount configuration for the Argo CD Slack bot - ## - serviceAccount: - ## @param notifications.bots.slack.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param notifications.bots.slack.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param notifications.bots.slack.serviceAccount.automountServiceAccountToken Automount service account token for the notifications controller service account - ## - automountServiceAccountToken: false - ## @param notifications.bots.slack.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. - ## - annotations: {} - ## @param notifications.bots.slack.podAffinityPreset Pod affinity preset. Ignored if `notifications.bots.slack.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param notifications.bots.slack.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `notifications.bots.slack.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node notifications.bots.slack.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param notifications.bots.slack.nodeAffinityPreset.type Node affinity preset type. Ignored if `notifications.bots.slack.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param notifications.bots.slack.nodeAffinityPreset.key Node label key to match. Ignored if `notifications.bots.slack.affinity` is set - ## - key: "" - ## @param notifications.bots.slack.nodeAffinityPreset.values Node label values to match. Ignored if `notifications.bots.slack.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## Argo CD Slack Bot controller ports - ## @param notifications.bots.slack.containerPorts.metrics Slack Bot controller metrics port number - ## - containerPorts: - metrics: 9001 - ## Configure extra options for Slack Bot containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param notifications.bots.slack.startupProbe.enabled Enable startupProbe on Slack Bot nodes - ## @param notifications.bots.slack.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param notifications.bots.slack.startupProbe.periodSeconds Period seconds for startupProbe - ## @param notifications.bots.slack.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param notifications.bots.slack.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param notifications.bots.slack.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param notifications.bots.slack.livenessProbe.enabled Enable livenessProbe on Slack Bot nodes - ## @param notifications.bots.slack.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param notifications.bots.slack.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param notifications.bots.slack.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param notifications.bots.slack.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param notifications.bots.slack.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param notifications.bots.slack.readinessProbe.enabled Enable readinessProbe on Slack Bot nodes - ## @param notifications.bots.slack.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param notifications.bots.slack.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param notifications.bots.slack.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param notifications.bots.slack.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param notifications.bots.slack.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param notifications.bots.slack.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param notifications.bots.slack.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param notifications.bots.slack.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param notifications.bots.slack.affinity Affinity for Argo CD Slack bot pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `notifications.bots.slack.podAffinityPreset`, `notifications.bots.slack.podAntiAffinityPreset`, and `notifications.bots.slack.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param notifications.bots.slack.podAnnotations Annotations for Argo CD Slack bot pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param notifications.bots.slack.podLabels Extra labels for Argo CD Slack bot pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param notifications.bots.slack.containerSecurityContext.enabled Enabled Argo CD Slack bot containers' Security Context - ## @param notifications.bots.slack.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param notifications.bots.slack.containerSecurityContext.runAsUser Set Argo CD Slack bot containers' Security Context runAsUser - ## @param notifications.bots.slack.containerSecurityContext.runAsGroup Set Argo CD Slack bot containers' Security Context runAsGroup - ## @param notifications.bots.slack.containerSecurityContext.allowPrivilegeEscalation Set Argo CD Slack bot containers' Security Context allowPrivilegeEscalation - ## @param notifications.bots.slack.containerSecurityContext.capabilities.drop Set Argo CD Slack bot containers' Security Context capabilities to be dropped - ## @param notifications.bots.slack.containerSecurityContext.readOnlyRootFilesystem Set Argo CD Slack bot containers' Security Context readOnlyRootFilesystem - ## @param notifications.bots.slack.containerSecurityContext.runAsNonRoot Set Argo CD Slack bot container's Security Context runAsNonRoot - ## @param notifications.bots.slack.containerSecurityContext.privileged Set notifications container's Security Context privileged - ## @param notifications.bots.slack.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## Argo CD Slack bot resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param notifications.bots.slack.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if notifications.bots.slack.resources is set (notifications.bots.slack.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param notifications.bots.slack.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param notifications.bots.slack.podSecurityContext.enabled Enabled Argo CD Slack bot pods' Security Context - ## @param notifications.bots.slack.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param notifications.bots.slack.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param notifications.bots.slack.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param notifications.bots.slack.podSecurityContext.fsGroup Set Argo CD Slack bot pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## @param notifications.bots.slack.nodeSelector Node labels for Argo CD Slack bot pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param notifications.bots.slack.tolerations Tolerations for Argo CD Slack bot pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param notifications.bots.slack.priorityClassName Argo CD Slack bot pods' priorityClassName - ## - priorityClassName: "" - ## @param notifications.bots.slack.extraVolumes Optionally specify extra list of additional volumes for the Argo CD Slack bot pod(s) - ## - extraVolumes: [] - ## @param notifications.bots.slack.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD Slack bot container(s) - ## - extraVolumeMounts: [] - ## @param notifications.bots.slack.extraEnvVars Array with extra environment variables to add to Argo CD Slack bot nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param notifications.bots.slack.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD Slack bot nodes - ## - extraEnvVarsCM: "" - ## @param notifications.bots.slack.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD Slack bot nodes - ## - extraEnvVarsSecret: "" - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param notifications.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param notifications.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param notifications.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `notifications.pdb.minAvailable` and `notifications.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @section Argo CD server Parameters - -## Argo CD server configuration -## -server: - ## @param server.replicaCount Number of Argo CD server replicas to deploy - ## - replicaCount: 1 - ## Configure extra options for Argo CD server containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param server.startupProbe.enabled Enable startupProbe on Argo CD server nodes - ## @param server.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param server.startupProbe.periodSeconds Period seconds for startupProbe - ## @param server.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param server.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param server.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param server.livenessProbe.enabled Enable livenessProbe on Argo CD server nodes - ## @param server.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param server.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param server.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param server.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param server.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param server.readinessProbe.enabled Enable readinessProbe on Argo CD server nodes - ## @param server.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param server.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param server.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param server.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param server.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param server.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param server.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param server.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Argo CD server resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if server.resources is set (server.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param server.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param server.podSecurityContext.enabled Enabled Argo CD server pods' Security Context - ## @param server.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param server.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param server.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param server.podSecurityContext.fsGroup Set Argo CD server pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param server.containerSecurityContext.enabled Enabled Argo CD server containers' Security Context - ## @param server.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param server.containerSecurityContext.runAsUser Set Argo CD server containers' Security Context runAsUser - ## @param server.containerSecurityContext.runAsGroup Set Argo CD server containers' Security Context runAsGroup - ## @param server.containerSecurityContext.allowPrivilegeEscalation Set Argo CD server containers' Security Context allowPrivilegeEscalation - ## @param server.containerSecurityContext.capabilities.drop Set Argo CD containers' server Security Context capabilities to be dropped - ## @param server.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' server Security Context readOnlyRootFilesystem - ## @param server.containerSecurityContext.runAsNonRoot Set Argo CD server containers' Security Context runAsNonRoot - ## @param server.containerSecurityContext.privileged Set server container's Security Context privileged - ## @param server.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## Argo CD server deployment autoscaling - ## @param server.autoscaling.enabled Enable Argo CD server deployment autoscaling - ## @param server.autoscaling.minReplicas Argo CD server deployment autoscaling minimum number of replicas - ## @param server.autoscaling.maxReplicas Argo CD server deployment autoscaling maximum number of replicas - ## @param server.autoscaling.targetCPU Argo CD server deployment autoscaling target CPU percentage - ## @param server.autoscaling.targetMemory Argo CD server deployment autoscaling target CPU memory - ## - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: 50 - targetMemory: 50 - ## Redirect all request to https - ## @param server.insecure Disable HTTPS redirection for Argo CD server - ## - insecure: false - ## @param server.logFormat ArgoCD server logs format. Options: [text, json] - ## - logFormat: text - ## @param server.logLevel ArgoCD server logs level - ## - logLevel: info - ## Argo CD server enable config - ## @param server.configEnabled Enable Argo CD server config - ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml - ## - configEnabled: true - ## Argo CD server URL - ## @param server.url Argo CD server base URL. Required when configuring SSO. Required when enabling dex. - ## - url: "" - ## Argo CD server config. This object will be directly rendered - ## @param server.config [object] Argo CD server configuration that will end on the argocd-cm Config Map - ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/ - ## E.g: - ## repositories: - ## - url: git@github.com:group/repo.git - ## sshPrivateKeySecret: - ## name: secret-name - ## key: sshPrivateKey - ## - type: helm - ## url: https://charts.helm.sh/stable - ## name: stable - ## - type: helm - ## url: https://argoproj.github.io/argo-helm - ## name: argo - ## oidc.config: - ## name: AzureAD - ## issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 - ## clientID: CLIENT_ID - ## clientSecret: $oidc.azuread.clientSecret - ## requestedIDTokenClaims: - ## groups: - ## essential: true - ## requestedScopes: - ## - openid - ## - profile - ## - email - ## dex.config: - ## connectors: - ## # GitHub example - ## - type: github - ## id: github - ## name: GitHub - ## config: - ## clientID: aabbccddeeff00112233 - ## clientSecret: $dex.github.clientSecret - ## orgs: - ## - name: your-github-org - config: - ## Argo CD external base URL. Required when configuring SSO. Required when enabling dex. - ## E.g: - ## url: https://argocd.example.com - ## - url: "{{ `{{ .Values.server.url }}` }}" - ## Argo CD instance label key - ## - application.instanceLabelKey: argocd.argoproj.io/instance - ## If Dex is enabled you need to add connectors here - ## dex.config: | - ## connectors: [] - ## - dex.config: "" - ## Configure the ingress for the Argo CD server - ## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ## @param server.ingress.enabled Enable the creation of an ingress for the Argo CD server - ## @param server.ingress.pathType Path type for the Argo CD server ingress - ## @param server.ingress.apiVersion Ingress API version for the Argo CD server ingress - ## @param server.ingress.hostname Ingress hostname for the Argo CD server ingress - ## @param server.ingress.annotations Annotations for the Argo CD server ingress. To enable certificate autogeneration, place here your cert-manager annotations. - ## @param server.ingress.tls Enable TLS for the Argo CD server ingress - ## @param server.ingress.extraHosts Extra hosts array for the Argo CD server ingress - ## @param server.ingress.path Path array for the Argo CD server ingress - ## @param server.ingress.extraPaths Extra paths for the Argo CD server ingress - ## @param server.ingress.extraTls Extra TLS configuration for the Argo CD server ingress - ## @param server.ingress.secrets Secrets array to mount into the Ingress - ## @param server.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) - ## - ingress: - ## Set to true to enable ingress record generation - ## - enabled: true - ## @param server.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm - ## - selfSigned: false - ## DEPRECATED: Use server.ingress.annotations instead of server.ingress.certManager - ## certManager: false - ## - - ## Ingress Path type - ## - pathType: ImplementationSpecific - ## Override API Version (automatically detected if not set) - ## - apiVersion: "" - ## When the ingress is enabled, a host pointing to this will be created - ## - hostname: {{ .Values.globals.argocd.hostname }} - ## The Path to Argo CD server. You may need to set this to '/*' in order to use this - ## with ALB ingress controllers. - ## - path: / - ## For a full list of possible ingress annotations, please see - ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md - ## Use this parameter to set the required annotations for cert-manager, see - ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## - ## e.g: - ## annotations: - ## kubernetes.io/ingress.class: nginx - ## cert-manager.io/cluster-issuer: cluster-issuer-name - ## - annotations: - kubernetes.io/ingress.class: {{ .Values.globals.argocd.ingressClass }} - cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }} - ## Enable TLS configuration for the hostname defined at ingress.hostname parameter - ## You can use the ingress.secrets parameter to create this TLS secret or rely on cert-manager to create it - ## - tls: true - ## The list of additional hostnames to be covered with this ingress record. - ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array - extraHosts: [] - ## - name: argocd.server.local - ## path: / - ## - - ## Any additional arbitrary paths that may need to be added to the ingress under the main host. - ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. - extraPaths: [] - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - - ## The tls configuration for additional hostnames to be covered with this ingress record. - ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - extraTls: [] - ## - hosts: - ## - argocd.server.local - ## secretName: argocd.server.local-tls - ## - - ## If you're providing your own certificates, please use this to add the certificates as secrets - ## key and certificate should start with -----BEGIN CERTIFICATE----- or - ## REDACTED - ## - ## name should line up with a tlsSecret set further up - ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set - ## - ## It is also possible to create and manage the certificates outside of this helm chart - ## Please see README.md for more information - ## - secrets: [] - ## - name: argocd.server.local-tls - ## key: - ## certificate: - ## - - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: {{ .Values.globals.argocd.ingressClass }} - ## @param server.ingress.extraRules Additional rules to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules - ## e.g: - ## extraRules: - ## - host: example.local - ## http: - ## path: / - ## backend: - ## service: - ## name: example-svc - ## port: - ## name: http - ## - extraRules: [] - ## Metrics configuration for Argo CD server - ## - metrics: - ## Enable metrics for Argo CD server - ## @param server.metrics.enabled Enable metrics for the Argo CD server - ## - enabled: true - service: - ## @param server.metrics.service.type Argo CD server service type - ## - type: ClusterIP - ## @param server.metrics.service.ports.metrics Argo CD server metrics service port - ## - ports: - metrics: 8083 - ## Node ports to expose - ## @param server.metrics.service.nodePorts.metrics Node port for Argo CD server metrics service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - metrics: "" - ## @param server.metrics.service.clusterIP Argo CD server metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param server.metrics.service.loadBalancerIP Argo CD server service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param server.metrics.service.loadBalancerSourceRanges Argo CD server service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param server.metrics.service.externalTrafficPolicy Argo CD server service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param server.metrics.service.annotations Additional custom annotations for Argo CD server service - ## - annotations: {} - ## @param server.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param server.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Argo CD server metrics service monitor configuration - ## - serviceMonitor: - ## @param server.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param server.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param server.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## @param server.metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: 30s - ## @param server.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: 10s - ## @param server.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param server.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param server.metrics.serviceMonitor.selector ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param server.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## Configure the ingress resource that allows you to access the Argo CD gRPC API - ## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/ - ## @param server.ingressGrpc.enabled Enable the creation of an ingress for the Argo CD gRPC server - ## @param server.ingressGrpc.pathType Path type for the Argo CD gRPC server ingress - ## @param server.ingressGrpc.apiVersion Ingress API version for the Argo CD gRPC server ingress - ## @param server.ingressGrpc.hostname Ingress hostname for the Argo CD gRPC server ingress - ## @param server.ingressGrpc.annotations Annotations for the Argo CD gRPC server ingress. To enable certificate autogeneration, place here your cert-manager annotations - ## @param server.ingressGrpc.tls Enable TLS for the Argo CD server ingress - ## @param server.ingressGrpc.extraHosts Extra hosts array for the Argo CD gRPC server ingress - ## @param server.ingressGrpc.path Path array for the Argo CD gRPC server ingress - ## @param server.ingressGrpc.extraPaths Extra paths for the Argo CD gRPC server ingress - ## @param server.ingressGrpc.extraTls Extra TLS configuration for the Argo CD gRPC server ingress - ## @param server.ingressGrpc.secrets Secrets array to mount into the Ingress - ## @param server.ingressGrpc.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) - ## - ingressGrpc: - ## Set to true to enable ingress record generation - ## - enabled: false - ## @param server.ingressGrpc.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm - ## - selfSigned: false - ## DEPRECATED: Use server.ingressGrpc.annotations instead of server.ingressGrpc.certManager - ## certManager: false - ## - - ## Ingress Path type - ## - pathType: ImplementationSpecific - ## Override API Version (automatically detected if not set) - ## - apiVersion: "" - ## When the ingress is enabled, a host pointing to this will be created - ## - hostname: argocd.server.local - ## The Path to Argo CD server gRPC API. You may need to set this to '/*' in order to use this - ## with ALB ingress controllers. - ## - path: / - ## For a full list of possible ingress annotations, please see - ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md - ## Use this parameter to set the required annotations for cert-manager, see - ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## - ## e.g: - ## annotations: - ## kubernetes.io/ingress.class: nginx - ## cert-manager.io/cluster-issuer: cluster-issuer-name - ## - annotations: {} - ## Enable TLS configuration for the hostname defined at ingress.hostname parameter - ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it - ## - tls: false - ## The list of additional hostnames to be covered with this ingress record. - ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array - extraHosts: [] - ## - name: argocd.server.local - ## path: / - ## - - ## Any additional arbitrary paths that may need to be added to the ingress under the main host. - ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. - extraPaths: [] - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - - ## The tls configuration for additional hostnames to be covered with this ingress record. - ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - extraTls: [] - ## - hosts: - ## - argocd.server.local - ## secretName: argocd.server.local-tls - ## - - ## If you're providing your own certificates, please use this to add the certificates as secrets - ## key and certificate should start with -----BEGIN CERTIFICATE----- or - ## REDACTED - ## - ## name should line up with a tlsSecret set further up - ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set - ## - ## It is also possible to create and manage the certificates outside of this helm chart - ## Please see README.md for more information - ## - secrets: [] - ## - name: argocd.server.local-tls - ## key: - ## certificate: - ## - - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: "" - ## @param server.ingressGrpc.extraRules Additional rules to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules - ## e.g: - ## extraRules: - ## - host: example.server.local - ## http: - ## path: / - ## backend: - ## service: - ## name: example-svc - ## port: - ## name: http - ## - extraRules: [] - ## Argo CD server container port - ## @param server.containerPorts.http Argo CD server HTTP container port - ## @param server.containerPorts.https Argo CD server HTTPS container port - ## @param server.containerPorts.metrics Argo CD server metrics container port - containerPorts: - http: 8080 - https: 8443 - metrics: 8083 - ## Argo CD server service parameters - ## - service: - ## @param server.service.type Argo CD service type - ## - type: ClusterIP - ## @param server.service.ports.http HTTP port for the gRPC ingress when enabled - ## @param server.service.ports.https HTTPS port for the gRPC ingress when enabled - ## - ports: - http: 80 - https: 443 - ## Node ports to expose - ## @param server.service.nodePorts.http Node port for HTTP - ## @param server.service.nodePorts.https Node port for HTTPS - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - https: "" - ## @param server.service.clusterIP Argo CD service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param server.service.loadBalancerIP Argo CD service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param server.service.loadBalancerSourceRanges Argo CD service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param server.service.externalTrafficPolicy Argo CD service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param server.service.annotations Additional custom annotations for Argo CD service - ## - annotations: {} - ## @param server.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param server.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param server.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param server.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param server.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param server.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param server.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param server.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param server.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param server.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param server.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## @param server.command Override default container command (useful when using custom images) - ## - command: [] - ## @param server.args Override default container args (useful when using custom images) - ## - args: [] - ## @param server.extraArgs concat to the default args - ## - extraArgs: [] - ## @param server.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: true - ## @param server.hostAliases Argo CD server pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param server.podLabels Extra labels for Argo CD server pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param server.podAnnotations Annotations for Argo CD server pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param server.podAffinityPreset Pod affinity preset. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param server.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node server.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param server.nodeAffinityPreset.type Node affinity preset type. Ignored if `server.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param server.nodeAffinityPreset.key Node label key to match. Ignored if `server.affinity` is set - ## - key: "" - ## @param server.nodeAffinityPreset.values Node label values to match. Ignored if `server.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param server.affinity Affinity for Argo CD server pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `server.podAffinityPreset`, `server.podAntiAffinityPreset`, and `server.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param server.nodeSelector Node labels for Argo CD server pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param server.tolerations Tolerations for Argo CD server pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param server.schedulerName Name of the k8s scheduler (other than default) - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param server.shareProcessNamespace Enable shared process namespace in a pod. - ## If set to false (default), each container will run in separate namespace, server will have PID=1. - ## If set to true, the /pause will run as init process and will reap any zombie PIDs, - ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds. - ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param server.topologySpreadConstraints Topology Spread Constraints for pod assignment - ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## The value is evaluated as a template - ## - topologySpreadConstraints: [] - ## @param server.updateStrategy.type Argo CD server statefulset strategy type - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - ## @param server.priorityClassName Argo CD server pods' priorityClassName - ## - priorityClassName: "" - ## @param server.runtimeClassName Name of the runtime class to be used by pod(s) - ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ - ## - runtimeClassName: "" - ## @param server.lifecycleHooks for the Argo CD server container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param server.extraEnvVars Array with extra environment variables to add to Argo CD server nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param server.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD server nodes - ## - extraEnvVarsCM: "" - ## @param server.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD server nodes - ## - extraEnvVarsSecret: "" - ## @param server.extraVolumes Optionally specify extra list of additional volumes for the Argo CD server pod(s) - ## - extraVolumes: [] - ## @param server.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD server container(s) - ## - extraVolumeMounts: [] - ## @param server.sidecars Add additional sidecar containers to the Argo CD server pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param server.initContainers Add additional init containers to the Argo CD server pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## ServiceAccount configuration for the Argo CD server - ## - serviceAccount: - ## @param server.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param server.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param server.serviceAccount.automountServiceAccountToken Automount service account token for the server service account - ## - automountServiceAccountToken: false - ## @param server.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. - ## - annotations: {} - ## Enable admin clusterrole resources. Allows Argo CD server to have access to multiple namespaces - ## @param server.clusterAdminAccess Enable K8s cluster admin access for the server - ## - clusterAdminAccess: true - ## Enable Custom Rules for Argo CD server cluster role - ## @param server.clusterRoleRules Use custom rules for server's cluster role - ## - clusterRoleRules: [] - - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param server.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param server.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param server.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `server.pdb.minAvailable` and `server.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @section Argo CD repo server Parameters - -## Argo CD repository server configuration -## -repoServer: - ## @param repoServer.replicaCount Number of Argo CD repo server replicas to deploy - ## - replicaCount: 1 - ## Configure extra options for Argo CD repo server containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param repoServer.startupProbe.enabled Enable startupProbe on Argo CD repo server nodes - ## @param repoServer.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param repoServer.startupProbe.periodSeconds Period seconds for startupProbe - ## @param repoServer.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param repoServer.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param repoServer.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param repoServer.livenessProbe.enabled Enable livenessProbe on Argo CD repo server nodes - ## @param repoServer.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param repoServer.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param repoServer.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param repoServer.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param repoServer.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param repoServer.readinessProbe.enabled Enable readinessProbe on Argo CD repo server nodes - ## @param repoServer.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param repoServer.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param repoServer.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param repoServer.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param repoServer.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param repoServer.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param repoServer.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param repoServer.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Argo CD repo server resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param repoServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if repoServer.resources is set (repoServer.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param repoServer.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param repoServer.podSecurityContext.enabled Enabled Argo CD repo server pods' Security Context - ## @param repoServer.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param repoServer.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param repoServer.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param repoServer.podSecurityContext.fsGroup Set Argo CD repo server pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param repoServer.containerSecurityContext.enabled Enabled Argo CD repo server containers' Security Context - ## @param repoServer.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param repoServer.containerSecurityContext.runAsUser Set Argo CD repo server containers' Security Context runAsUser - ## @param repoServer.containerSecurityContext.runAsGroup Set Argo CD repo server containers' Security Context runAsGroup - ## @param repoServer.containerSecurityContext.allowPrivilegeEscalation Set Argo CD repo server containers' Security Context allowPrivilegeEscalation - ## @param repoServer.containerSecurityContext.capabilities.drop Set Argo CD containers' repo server Security Context capabilities to be dropped - ## @param repoServer.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' repo server Security Context readOnlyRootFilesystem - ## @param repoServer.containerSecurityContext.runAsNonRoot Set Argo CD repo server containers' Security Context runAsNonRoot - ## @param repoServer.containerSecurityContext.privileged Set repoServer container's Security Context privileged - ## @param repoServer.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## Repo server service parameters - ## - service: - ## @param repoServer.service.type Repo server service type - ## - type: ClusterIP - ## @param repoServer.service.ports.repoServer Repo server service port - ## - ports: - repoServer: 8081 - ## Node ports to expose - ## @param repoServer.service.nodePorts.repoServer Node port for the repo server service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - repoServer: "" - ## @param repoServer.service.clusterIP Repo server service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param repoServer.service.loadBalancerIP Repo server service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param repoServer.service.loadBalancerSourceRanges Repo server service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param repoServer.service.externalTrafficPolicy Repo server service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param repoServer.service.annotations Additional custom annotations for Repo server service - ## - annotations: {} - ## @param repoServer.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param repoServer.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param repoServer.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param repoServer.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param repoServer.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param repoServer.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param repoServer.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param repoServer.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param repoServer.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param repoServer.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param repoServer.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## Argo CD repo server log format: text|json - ## @param repoServer.logFormat Format for the Argo CD repo server logs. Options: [text, json] - ## - logFormat: text - ## Argo CD application controller log level - ## @param repoServer.logLevel Log level for the Argo CD repo server - ## - logLevel: info - ## Argo CD repo server container port - ## @param repoServer.containerPorts.repoServer Container port for Argo CD repo server - ## @param repoServer.containerPorts.metrics Metrics port for Argo CD repo server - ## - containerPorts: - repoServer: 8081 - metrics: 8084 - ## Metrics configuration for Argo CD repo server - ## - metrics: - ## Enable metrics for Argo CD repo server - ## @param repoServer.metrics.enabled Enable metrics for the Argo CD repo server - ## - enabled: true - service: - ## @param repoServer.metrics.service.type Argo CD repo server service type - ## - type: ClusterIP - ## @param repoServer.metrics.service.ports.metrics Argo CD repo server metrics service port - ## - ports: - metrics: 8084 - ## Node ports to expose - ## @param repoServer.metrics.service.nodePorts.metrics Node port for the repo server metrics service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - metrics: "" - ## @param repoServer.metrics.service.clusterIP Argo CD repo server metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param repoServer.metrics.service.loadBalancerIP Argo CD repo server service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param repoServer.metrics.service.loadBalancerSourceRanges Argo CD repo server service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param repoServer.metrics.service.externalTrafficPolicy Argo CD repo server service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param repoServer.metrics.service.annotations Additional custom annotations for Argo CD repo server service - ## - annotations: {} - ## @param repoServer.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param repoServer.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Argo CD repo server metrics service monitor configuration - ## - serviceMonitor: - ## @param repoServer.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param repoServer.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param repoServer.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## @param repoServer.metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: 30s - ## @param repoServer.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: 10s - ## @param repoServer.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param repoServer.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param repoServer.metrics.serviceMonitor.selector ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param repoServer.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## Argo CD repo server deployment autoscaling - ## @param repoServer.autoscaling.enabled Enable Argo CD repo server deployment autoscaling - ## @param repoServer.autoscaling.minReplicas Argo CD repo server deployment autoscaling minimum number of replicas - ## @param repoServer.autoscaling.maxReplicas Argo CD repo server deployment autoscaling maximum number of replicas - ## @param repoServer.autoscaling.targetCPU Argo CD repo server deployment autoscaling target CPU percentage - ## @param repoServer.autoscaling.targetMemory Argo CD repo server deployment autoscaling target CPU memory - ## - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: 50 - targetMemory: 50 - ## ServiceAccount configuration for the Argo CD repo server - ## - serviceAccount: - ## @param repoServer.serviceAccount.create Specifies whether a ServiceAccount for repo server should be created - ## - create: true - ## @param repoServer.serviceAccount.name The name of the ServiceAccount for repo server to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param repoServer.serviceAccount.automountServiceAccountToken Automount service account token for the repo server service account - ## - automountServiceAccountToken: false - ## @param repoServer.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. - ## - annotations: {} - ## Enable admin clusterrole resources. Allows Argo CD repo server to have access to multiple namespaces - ## @param repoServer.clusterAdminAccess Enable K8s cluster admin access for the repo server - ## - clusterAdminAccess: false - ## Enable Custom Rules for Argo CD server cluster role - ## @param repoServer.clusterRoleRules Use custom rules for repo server's cluster role - ## - clusterRoleRules: [] - ## @param repoServer.command Override default container command (useful when using custom images) - ## - command: [] - ## @param repoServer.args Override default container args (useful when using custom images) - ## - args: [] - ## @param repoServer.extraArgs Add extra args to the default repo server args - ## - extraArgs: [] - ## @param repoServer.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: true - ## @param repoServer.hostAliases Argo CD repo server pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param repoServer.podLabels Extra labels for Argo CD repo server pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param repoServer.podAnnotations Annotations for Argo CD repo server pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param repoServer.podAffinityPreset Pod affinity preset. Ignored if `repoServer.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param repoServer.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `repoServer.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node repoServer.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param repoServer.nodeAffinityPreset.type Node affinity preset type. Ignored if `repoServer.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param repoServer.nodeAffinityPreset.key Node label key to match. Ignored if `repoServer.affinity` is set - ## - key: "" - ## @param repoServer.nodeAffinityPreset.values Node label values to match. Ignored if `repoServer.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param repoServer.affinity Affinity for Argo CD repo server pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `repoServer.podAffinityPreset`, `repoServer.podAntiAffinityPreset`, and `repoServer.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param repoServer.nodeSelector Node labels for Argo CD repo server pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param repoServer.tolerations Tolerations for Argo CD repo server pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param repoServer.schedulerName Name of the k8s scheduler (other than default) - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param repoServer.shareProcessNamespace Enable shared process namespace in a pod. - ## If set to false (default), each container will run in separate namespace, repoServer will have PID=1. - ## If set to true, the /pause will run as init process and will reap any zombie PIDs, - ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds. - ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param repoServer.topologySpreadConstraints Topology Spread Constraints for pod assignment - ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## The value is evaluated as a template - ## - topologySpreadConstraints: [] - ## @param repoServer.updateStrategy.type Argo CD repo server statefulset strategy type - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - ## @param repoServer.priorityClassName Argo CD repo server pods' priorityClassName - ## - priorityClassName: "" - ## @param repoServer.runtimeClassName Name of the runtime class to be used by pod(s) - ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ - ## - runtimeClassName: "" - ## @param repoServer.lifecycleHooks for the Argo CD repo server container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param repoServer.extraEnvVars Array with extra environment variables to add to Argo CD repo server nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param repoServer.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Argo CD repo server nodes - ## - extraEnvVarsCM: "" - ## @param repoServer.extraEnvVarsSecret Name of existing Secret containing extra env vars for Argo CD repo server nodes - ## - extraEnvVarsSecret: "" - ## @param repoServer.extraVolumes Optionally specify extra list of additional volumes for the Argo CD repo server pod(s) - ## - extraVolumes: [] - ## @param repoServer.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Argo CD repo server container(s) - ## - extraVolumeMounts: [] - ## @param repoServer.sidecars Add additional sidecar containers to the Argo CD repo server pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param repoServer.initContainers Add additional init containers to the Argo CD repo server pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param repoServer.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param repoServer.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param repoServer.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `repoServer.pdb.minAvailable` and `repoServer.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @section Dex Parameters - -## Dex configuration -## -dex: - ## Bitnami Dex image - ## ref: https://hub.docker.com/r/bitnami/argo-cd/tags/ - ## @param dex.image.registry [default: REGISTRY_NAME] Dex image registry - ## @param dex.image.repository [default: REPOSITORY_NAME/dex] Dex image repository - ## @skip dex.image.tag Dex image tag (immutable tags are recommended) - ## @param dex.image.digest Dex image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param dex.image.pullPolicy Dex image pull policy - ## @param dex.image.pullSecrets Dex image pull secrets - ## @param dex.image.debug Enable Dex image debug mode - ## - image: - registry: docker.io - repository: bitnami/dex - tag: 2.41.1-debian-12-r12 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## Enable Dex deployment - ## @param dex.enabled Enable the creation of a Dex deployment for SSO - ## - enabled: false - ## @param dex.replicaCount Number of Dex replicas to deploy - ## - replicaCount: 1 - ## Configure extra options for Dex containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param dex.startupProbe.enabled Enable startupProbe on Dex nodes - ## @param dex.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param dex.startupProbe.periodSeconds Period seconds for startupProbe - ## @param dex.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param dex.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param dex.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param dex.livenessProbe.enabled Enable livenessProbe on Dex nodes - ## @param dex.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param dex.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param dex.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param dex.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param dex.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param dex.readinessProbe.enabled Enable readinessProbe on Dex nodes - ## @param dex.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param dex.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param dex.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param dex.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param dex.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - ## @param dex.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param dex.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param dex.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Dex resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param dex.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if dex.resources is set (dex.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param dex.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param dex.podSecurityContext.enabled Enabled Dex pods' Security Context - ## @param dex.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param dex.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param dex.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param dex.podSecurityContext.fsGroup Set Dex pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param dex.containerSecurityContext.enabled Enabled Dex containers' Security Context - ## @param dex.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param dex.containerSecurityContext.runAsUser Set Dex containers' Security Context runAsUser - ## @param dex.containerSecurityContext.runAsGroup Set Dex containers' Security Context runAsGroup - ## @param dex.containerSecurityContext.allowPrivilegeEscalation Set Dex containers' Security Context allowPrivilegeEscalation - ## @param dex.containerSecurityContext.readOnlyRootFilesystem Set Dex containers' server Security Context readOnlyRootFilesystem - ## @param dex.containerSecurityContext.runAsNonRoot Set Dex containers' Security Context runAsNonRoot - ## @param dex.containerSecurityContext.capabilities.drop Set Argo CD containers' repo server Security Context capabilities to be dropped - ## @param dex.containerSecurityContext.privileged Set dex container's Security Context privileged - ## @param dex.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## Dex service parameters - ## - service: - ## @param dex.service.type Dex service type - ## - type: ClusterIP - ## @param dex.service.ports.http Dex HTTP service port - ## @param dex.service.ports.grpc Dex grpc service port - ## - ports: - http: 5556 - grpc: 5557 - ## Node ports to expose - ## @param dex.service.nodePorts.http HTTP node port for the Dex service - ## @param dex.service.nodePorts.grpc gRPC node port for the Dex service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - grpc: "" - ## @param dex.service.clusterIP Dex service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param dex.service.loadBalancerIP Dex service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param dex.service.loadBalancerSourceRanges Dex service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param dex.service.externalTrafficPolicy Dex service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param dex.service.annotations Additional custom annotations for Dex service - ## - annotations: {} - ## @param dex.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param dex.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param dex.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param dex.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param dex.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param dex.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param dex.networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param dex.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param dex.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy (ignored if allowExternalEgress=true) - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param dex.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param dex.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## Dex container ports - ## @param dex.containerPorts.http Dex container HTTP port - ## @param dex.containerPorts.grpc Dex gRPC port - ## @param dex.containerPorts.metrics Dex metrics port - ## - containerPorts: - http: 5556 - grpc: 5557 - metrics: 5558 - ## Metrics configuration for Dex - ## - metrics: - ## Enable metrics for Argo Dex - ## @param dex.metrics.enabled Enable metrics service for Dex - ## - enabled: false - service: - ## @param dex.metrics.service.type Dex service type - ## - type: ClusterIP - ## @param dex.metrics.service.ports.metrics Dex metrics service port - ## - ports: - metrics: 5558 - ## Node ports to expose - ## @param dex.metrics.service.nodePorts.metrics Node port for the Dex service - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - metrics: "" - ## @param dex.metrics.service.clusterIP Dex service metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param dex.metrics.service.loadBalancerIP Dex service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param dex.metrics.service.loadBalancerSourceRanges Dex service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param dex.metrics.service.externalTrafficPolicy Dex service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param dex.metrics.service.annotations Additional custom annotations for Dex service - ## - annotations: {} - ## @param dex.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param dex.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} - ## Dex metrics service monitor configuration - ## - serviceMonitor: - ## @param dex.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param dex.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param dex.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## @param dex.metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: 30s - ## @param dex.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: 10s - ## @param dex.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param dex.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param dex.metrics.serviceMonitor.selector ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param dex.metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## ServiceAccount configuration for the Dex - ## - serviceAccount: - ## @param dex.serviceAccount.create Specifies whether a ServiceAccount should be created for Dex - ## - create: true - ## @param dex.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param dex.serviceAccount.automountServiceAccountToken Automount service account token for the Dex service account - ## - automountServiceAccountToken: false - ## @param dex.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. - ## - annotations: {} - ## @param dex.command Override default container command (useful when using custom images) - ## - command: [] - ## @param dex.args Override default container args (useful when using custom images) - ## - args: [] - ## @param dex.extraArgs Add extra args to the default args for Dex - ## - extraArgs: [] - ## @param dex.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: true - ## @param dex.hostAliases Dex pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param dex.podLabels Extra labels for Dex pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param dex.podAnnotations Annotations for Dex pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param dex.podAffinityPreset Pod affinity preset. Ignored if `dex.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param dex.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `dex.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node dex.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param dex.nodeAffinityPreset.type Node affinity preset type. Ignored if `dex.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param dex.nodeAffinityPreset.key Node label key to match. Ignored if `dex.affinity` is set - ## - key: "" - ## @param dex.nodeAffinityPreset.values Node label values to match. Ignored if `dex.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param dex.affinity Affinity for Dex pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `dex.podAffinityPreset`, `dex.podAntiAffinityPreset`, and `dex.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param dex.nodeSelector Node labels for Dex pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param dex.tolerations Tolerations for Dex pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param dex.schedulerName Name of the k8s scheduler (other than default) - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param dex.shareProcessNamespace Enable shared process namespace in a pod. - ## If set to false (default), each container will run in separate namespace, dex will have PID=1. - ## If set to true, the /pause will run as init process and will reap any zombie PIDs, - ## for example, generated by a custom exec probe running longer than a probe timeoutSeconds. - ## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param dex.topologySpreadConstraints Topology Spread Constraints for pod assignment - ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## The value is evaluated as a template - ## - topologySpreadConstraints: [] - ## @param dex.updateStrategy.type Dex statefulset strategy type - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - ## @param dex.priorityClassName Dex pods' priorityClassName - ## - priorityClassName: "" - ## @param dex.runtimeClassName Name of the runtime class to be used by pod(s) - ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ - ## - runtimeClassName: "" - ## @param dex.lifecycleHooks for the Dex container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param dex.extraEnvVars Array with extra environment variables to add to Dex nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param dex.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Dex nodes - ## - extraEnvVarsCM: "" - ## @param dex.extraEnvVarsSecret Name of existing Secret containing extra env vars for Dex nodes - ## - extraEnvVarsSecret: "" - ## @param dex.extraVolumes Optionally specify extra list of additional volumes for the Dex pod(s) - ## - extraVolumes: [] - ## @param dex.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Dex container(s) - ## - extraVolumeMounts: [] - ## @param dex.sidecars Add additional sidecar containers to the Dex pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param dex.initContainers Add additional init containers to the Dex pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param dex.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param dex.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param dex.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `dex.pdb.minAvailable` and `dex.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @section Shared config for Argo CD components -config: - ## @param config.knownHosts [string] Known hosts to be added to the known hosts list by default. Check the values to see the default value - ## - knownHosts: | - bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== - github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== - gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= - gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf - gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 - ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H - vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H - ## @param config.extraKnownHosts Add extra known hosts to the known hosts list - ## E.g.: - ## extraKnownHosts: | - ## gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf - ## gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 - ## - extraKnownHosts: "" - ## @param config.createExtraKnownHosts Whether to create or not the extra known hosts configmap - ## - createExtraKnownHosts: true - ## @param config.styles Custom CSS styles - ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/ - ## E.g.: - ## styles: | - ## .nav-bar { - ## background: linear-gradient(to bottom, #999, #777, #333, #222, #111); - ## } - ## - styles: "" - ## @param config.existingStylesConfigmap Use an existing styles configmap - ## - existingStylesConfigmap: "" - ## @param config.tlsCerts TLS certificates used to verify the authenticity of the repository servers - ## Certificates will be generated by default if the values are not set. - ## E.g: - ## tlsCerts: - ## argocd-1.example.com: | - ## -----BEGIN CERTIFICATE----- - ## (...) - ## -----END CERTIFICATE----- - ## argocd-2.example.com: | - ## -----BEGIN CERTIFICATE----- - ## (...) - ## -----END CERTIFICATE----- - ## - tlsCerts: {} - ## @param config.gpgKeys GnuPG public keys to add to the keyring - ## Keys will be generated by default if the values are not set. - ## Note: Public keys should be exported with `gpg --export --armor ` - ## - gpgKeys: {} - # 4AEE18F83AFDEB23: | - # -----BEGIN PGP PUBLIC KEY BLOCK----- - # ... - # -----END PGP PUBLIC KEY BLOCK----- - - ## @param config.rbac Role-based authentication configuration - ## - rbac: {} - # policy.default: role:readonly - # policy.csv: | - # # Grant all members of the group 'my-org:team-alpha; the ability to sync apps in 'my-project' - # p, my-org:team-alpha, applications, sync, my-project/*, allow - # # Grant all members of 'my-org:team-beta' admins - # g, my-org:team-beta, role:admin - - ## Argo CD general secret configuration - ## - secret: - ## @param config.secret.create Whether to create or not the secret - ## - create: true - ## Annotations to be added to argocd-secret - ## @param config.secret.annotations General secret extra annotations - ## - annotations: {} - ## Webhook Configs - ## @param config.secret.githubSecret GitHub secret to configure webhooks - ## @param config.secret.gitlabSecret GitLab secret to configure webhooks - ## @param config.secret.bitbucketServerSecret BitBucket secret to configure webhooks - ## @param config.secret.bitbucketUUID BitBucket UUID to configure webhooks - ## @param config.secret.gogsSecret Gogs secret to configure webhooks - ## - githubSecret: "" - gitlabSecret: "" - bitbucketServerSecret: "" - bitbucketUUID: "" - gogsSecret: "" - ## Extra keys to add to the general config secret. Useful for injecting SSO secrets into environment variables. - ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sso - ## @param config.secret.extra Extra keys to add to the configuration secret. - ## All values must be non-empty. - ## E.g: - ## LDAP_PASSWORD: "mypassword" - ## - extra: {} - ## Argo CD TLS Data. - ## @param config.secret.argocdServerTlsConfig.key TLS key for the Argo CD config secret - ## @param config.secret.argocdServerTlsConfig.crt TLS certificate for the Argo CD config secret - ## E.g: - ## key: - ## crt: | - ## -----BEGIN CERTIFICATE----- - ## - ## -----END CERTIFICATE----- - ## -----BEGIN CERTIFICATE----- - ## - ## -----END CERTIFICATE----- - ## - argocdServerTlsConfig: - key: "" - crt: "" - ## Argo admin password - ## @param config.secret.argocdServerAdminPassword Argo CD server admin password. Autogenerated by default. - ## - argocdServerAdminPassword: {{ .Values.globals.argocd.adminPassword }} - ## Password modification time defaults to current time if not set - ## @param config.secret.argocdServerAdminPasswordMtime Argo CD server password modification time - ## E.g: - ## argocdServerAdminPasswordMtime: "2006-01-02T15:04:05Z" - ## - argocdServerAdminPasswordMtime: "" - ## Create a secret with optional repository credentials - ## @param config.secret.repositoryCredentials Repository credentials to add to the Argo CD server confgi secret - ## E.g. - ## repositoryCredentials: - ## sample-ssh-key: | - ## REDACTED - ## - ## REDACTED - ## - repositoryCredentials: {} - ## External Cluster Credentials - ## Refs: - ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters - ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/security/#external-cluster-credentials - ## @param config.clusterCredentials Configure external cluster credentials - ## E.g - ## - name: mycluster - ## server: https://mycluster.com - ## labels: {} - ## annotations: {} - ## config: - ## bearerToken: "" - ## tlsClientConfig: - ## insecure: false - ## caData: "" - ## - name: mycluster2 - ## server: https://mycluster2.com - ## labels: {} - ## annotations: {} - ## namespaces: namespace1,namespace2 - ## config: - ## bearerToken: "" - ## tlsClientConfig: - ## insecure: false - ## caData: "" - ## - clusterCredentials: [] -## @section Init Container Parameters - -## 'volumePermissions' init container parameters -## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values -## based on the *podSecurityContext/*containerSecurityContext parameters -## -volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` - ## - enabled: false - ## OS Shell + Utility image - ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ - ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry - ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository - ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy - ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 12-debian-12-r35 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container's resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Init container Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser - ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the - ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) - ## - containerSecurityContext: - seLinuxOptions: {} - runAsUser: 0 -## @section Other Parameters - -## RBAC configuration -## -rbac: - ## @param rbac.create Specifies whether RBAC resources should be created - ## - create: true -## Redis parameters -## -redis: - ## Bitnami Redis image - ## ref: https://hub.docker.com/r/bitnami/redis/tags/ - ## @param redis.image.registry [default: REGISTRY_NAME] Redis image registry - ## @param redis.image.repository [default: REPOSITORY_NAME/redis] Redis image repository - ## @skip redis.image.tag Redis image tag (immutable tags are recommended) - ## @param redis.image.digest Redis image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param redis.image.pullPolicy Redis image pull policy - ## @param redis.image.pullSecrets Redis image pull secrets - ## - image: - registry: docker.io - repository: bitnami/redis - tag: 7.4.2-debian-12-r0 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param redis.enabled Enable Redis dependency - ## - enabled: false - ## @param redis.nameOverride Name override for the Redis dependency - ## - nameOverride: "" - ## @param redis.service.ports.redis Service port for Redis dependency - ## - service: - ports: - redis: 6379 - ## Use password authentication - ## @param redis.auth.enabled Enable Redis dependency authentication - ## @param redis.auth.existingSecret Existing secret to load redis dependency password - ## @param redis.auth.existingSecretPasswordKey Pasword key name inside the existing secret - ## - auth: - enabled: true - ## Name of existing secret object containing the password - ## - existingSecret: "" - ## - ## Password key to be retrieved from Redis® secret - ## - existingSecretPasswordKey: 'redis-password' - ## Cluster settings - ## @param redis.architecture Redis® architecture. Allowed values: `standalone` or `replication` - ## TODO(miguelaeh): We need to test the chart with redis sentinel, it seems to be supported at: https://github.com/argoproj/argo-cd/blob/2a410187565e15633b6f2a8c8d8da22cf02b257d/util/cache/cache.go#L40 - ## - architecture: standalone -## -## External Redis® -## -externalRedis: - ## Can be enabled after redisWait.enabled and redis.enabled are set to false - ## @param externalRedis.enabled Enables External Redis - ## - enabled: true - ## Redis® host - ## @param externalRedis.host External Redis host - ## - host: redis-master.{{ .Values.globals.redis.namespace }}.svc.cluster.local - ## Redis® port - ## @param externalRedis.port External Redis port - ## - port: 6379 - ## Redis® password for authentication - ## Ignored if existingSecret is set - ## @param externalRedis.password External Redis password - ## - password: {{ .Values.globals.redis.password }} - ## Name of existing secret object containing the password - ## @param externalRedis.existingSecret Existing secret for the external redis - ## - existingSecret: "" - ## Password key to be retrieved from Redis® secret - ## @param externalRedis.existingSecretPasswordKey Password key for the existing secret containing the external redis password - ## - existingSecretPasswordKey: 'redis-password' - ## Specify a label to use with the label selector - ## @param externalRedis.selector External Redis selector labels - ## - selector: {} - # - # selector: - # app.kubernetes.io/component: master - # app.kubernetes.io/instance: redis - # app.kubernetes.io/name: redis -## Wait-for-redis init container configuration -## -redisWait: - ## @param redisWait.enabled Enables waiting for redis - ## - enabled: false - ## @param redisWait.extraArgs Additional arguments for the redis-cli call, such as TLS - ## - extraArgs: '' - ## @param redisWait.containerSecurityContext.enabled Enabled Argo CD repo server containers' Security Context - ## @param redisWait.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param redisWait.containerSecurityContext.runAsUser Set Argo CD repo server containers' Security Context runAsUser - ## @param redisWait.containerSecurityContext.runAsGroup Set Argo CD repo server containers' Security Context runAsGroup - ## @param redisWait.containerSecurityContext.allowPrivilegeEscalation Set Argo CD repo server containers' Security Context allowPrivilegeEscalation - ## @param redisWait.containerSecurityContext.capabilities.drop Set Argo CD containers' repo server Security Context capabilities to be dropped - ## @param redisWait.containerSecurityContext.readOnlyRootFilesystem Set Argo CD containers' repo server Security Context readOnlyRootFilesystem - ## @param redisWait.containerSecurityContext.runAsNonRoot Set Argo CD repo server containers' Security Context runAsNonRoot - ## @param redisWait.containerSecurityContext.privileged Set redisWait container's Security Context privileged - ## @param redisWait.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - privileged: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - diff --git a/k8s/helmfile.d/values/authentik/values.yaml.gotmpl b/k8s/helmfile.d/values/authentik/values.yaml.gotmpl deleted file mode 100644 index 7846656..0000000 --- a/k8s/helmfile.d/values/authentik/values.yaml.gotmpl +++ /dev/null @@ -1,1036 +0,0 @@ ---- -# -- Provide a name in place of `authentik`. Prefer using global.nameOverride if possible -nameOverride: "" -# -- String to fully override `"authentik.fullname"`. Prefer using global.fullnameOverride if possible -fullnameOverride: "" -# -- Override the Kubernetes version, which is used to evaluate certain manifests -kubeVersionOverride: "" - - -## Globally shared configuration for authentik components. -global: - # -- Provide a name in place of `authentik` - nameOverride: "" - # -- String to fully override `"authentik.fullname"` - fullnameOverride: "" - # -- A custom namespace to override the default namespace for the deployed resources. - namespaceOverride: "" - # -- Common labels for all resources. - additionalLabels: {} - # app: authentik - - # Number of old deployment ReplicaSets to retain. The rest will be garbage collected. - revisionHistoryLimit: 3 - - # Default image used by all authentik components. For GeoIP configuration, see the geoip values below. - image: - # -- If defined, a repository applied to all authentik deployments - repository: ghcr.io/goauthentik/server - # -- Overrides the global authentik whose default is the chart appVersion - tag: "" - # -- If defined, an image digest applied to all authentik deployments - digest: "" - # -- If defined, an imagePullPolicy applied to all authentik deployments - pullPolicy: IfNotPresent - - # -- Secrets with credentials to pull images from a private registry - imagePullSecrets: [] - - # -- Annotations for all deployed Deployments - deploymentAnnotations: {} - - # -- Annotations for all deployed pods - podAnnotations: {} - - # -- Annotations for all deployed secrets - secretAnnotations: {} - - # -- Labels for all deployed pods - podLabels: {} - - # -- Add Prometheus scrape annotations to all metrics services. This can be used as an alternative to the ServiceMonitors. - addPrometheusAnnotations: false - - # -- Toggle and define pod-level security context. - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files - hostAliases: [] - # - ip: 10.20.30.40 - # hostnames: - # - my.hostname - - # -- Default priority class for all components - priorityClassName: "" - - # -- Default node selector for all components - nodeSelector: {} - - # -- Default tolerations for all components - tolerations: [] - - # Default affinity preset for all components - affinity: - # -- Default pod anti-affinity rules. Either: `none`, `soft` or `hard` - podAntiAffinity: soft - # Node affinity rules - nodeAffinity: - # -- Default node affinity rules. Either `none`, `soft` or `hard` - type: hard - # -- Default match expressions for node affinity - matchExpressions: [] - # - key: topology.kubernetes.io/zone - # operator: In - # values: - # - zonea - # - zoneb - - # -- Default [TopologySpreadConstraints] rules for all components - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy for all deployed Deployments - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - # -- Environment variables to pass to all deployed Deployments. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to all deployed Deployments. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Additional volumeMounts to all deployed Deployments. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to all deployed Deployments. - # @default -- `[]` (See [values.yaml]) - volumes: [] - # - name: custom - # emptyDir: {} - - -## Authentik configuration -authentik: - # -- Log level for server and worker - log_level: info - # -- Secret key used for cookie singing and unique user IDs, - # don't change this after the first install - secret_key: {{ .Values.globals.authentik.secretKey }} - events: - context_processors: - # -- Path for the GeoIP City database. If the file doesn't exist, GeoIP features are disabled. - geoip: /geoip/GeoLite2-City.mmdb - # -- Path for the GeoIP ASN database. If the file doesn't exist, GeoIP features are disabled. - asn: /geoip/GeoLite2-ASN.mmdb - email: - # -- SMTP Server emails are sent from, fully optional - host: "" - # -- SMTP server port - port: 587 - # -- SMTP credentials, when left empty, no authentication will be done - username: "" - # -- SMTP credentials, when left empty, no authentication will be done - password: "" - # -- Enable either use_tls or use_ssl, they can't be enabled at the same time. - use_tls: false - # -- Enable either use_tls or use_ssl, they can't be enabled at the same time. - use_ssl: false - # -- Connection timeout - timeout: 30 - # -- Email from address, can either be in the format "foo@bar.baz" or "authentik " - from: "" - outposts: - # -- Template used for managed outposts. The following placeholders can be used - # %(type)s - the type of the outpost - # %(version)s - version of your authentik install - # %(build_hash)s - only for beta versions, the build hash of the image - container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s - error_reporting: - # -- This sends anonymous usage-data, stack traces on errors and - # performance data to sentry.beryju.org, and is fully opt-in - enabled: false - # -- This is a string that is sent to sentry with your error reports - environment: "k8s" - # -- Send PII (Personally identifiable information) data to sentry - send_pii: false - postgresql: - # -- set the postgresql hostname to talk to - # if unset and .Values.postgresql.enabled == true, will generate the default - # @default -- `{{ .Release.Name }}-postgresql` - host: "postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local" - # -- postgresql Database name - # @default -- `authentik` - name: "{{ .Values.globals.authentik.postgres.database }}" - # -- postgresql Username - # @default -- `authentik` - user: "{{ .Values.globals.authentik.postgres.username }}" - password: "{{ .Values.globals.authentik.postgres.password }}" - port: 5432 - redis: - # -- set the redis hostname to talk to - # @default -- `{{ .Release.Name }}-redis-master` - host: "redis-master.{{ .Values.globals.redis.namespace }}.svc.cluster.local" - password: "{{ .Values.globals.redis.password }}" - - -blueprints: - # -- List of config maps to mount blueprints from. - # Only keys in the configMap ending with `.yaml` will be discovered and applied. - configMaps: [] - # -- List of secrets to mount blueprints from. - # Only keys in the secret ending with `.yaml` will be discovered and applied. - secrets: [] - - -## authentik server -server: - # -- authentik server name - name: server - - # -- The number of server pods to run - replicas: 1 - - ## authentik server Horizontal Pod Autoscaler - autoscaling: - # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik server - enabled: false - # -- Minimum number of replicas for the authentik server [HPA] - minReplicas: 1 - # -- Maximum number of replicas for the authentik server [HPA] - maxReplicas: 5 - # -- Average CPU utilization percentage for the authentik server [HPA] - targetCPUUtilizationPercentage: 50 - # -- Average memory utilization percentage for the authentik server [HPA] - targetMemoryUtilizationPercentage: ~ - # -- Configures the scaling behavior of the target in both Up and Down directions. - behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - # -- Configures custom HPA metrics for the authentik server - # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - metrics: [] - - ## authentik server Pod Disruption Budget - ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - pdb: - # -- Deploy a [PodDistrubtionBudget] for the authentik server - enabled: false - # -- Labels to be added to the authentik server pdb - labels: {} - # -- Annotations to be added to the authentik server pdb - annotations: {} - # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) - # @default -- `""` (defaults to 0 if not specified) - minAvailable: "" - # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%) - ## Has higher precedence over `server.pdb.minAvailable` - maxUnavailable: "" - - ## authentik server image - ## This should match what is deployed in the worker. Prefer using global.image - image: - # -- Repository to use to the authentik server - # @default -- `""` (defaults to global.image.repository) - repository: "" # defaults to global.image.repository - # -- Tag to use to the authentik server - # @default -- `""` (defaults to global.image.tag) - tag: "" # defaults to global.image.tag - # -- Digest to use to the authentik server - # @default -- `""` (defaults to global.image.digest) - digest: "" # defaults to global.image.digest - # -- Image pull policy to use to the authentik server - # @default -- `""` (defaults to global.image.pullPolicy) - pullPolicy: "" # defaults to global.image.pullPolicy - - # -- Secrets with credentials to pull images from a private registry - # @default -- `[]` (defaults to global.imagePullSecrets) - imagePullSecrets: [] - - # -- Environment variables to pass to the authentik server. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the authentik server. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Specify postStart and preStop lifecycle hooks for you authentik server container - lifecycle: {} - - # -- Additional containers to be added to the authentik server pod - ## Note: Supports use of custom Helm templates - extraContainers: [] - # - name: my-sidecar - # image: nginx:latest - - # -- Init containers to add to the authentik server pod - ## Note: Supports use of custom Helm templates - initContainers: [] - # - name: download-tools - # image: alpine:3 - # command: [sh, -c] - # args: - # - echo init - - # -- Additional volumeMounts to the authentik server main container - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to the authentik server pod - volumes: [] - # - name: custom - # emptyDir: {} - - # -- Annotations to be added to the authentik server Deployment - deploymentAnnotations: {} - - # -- Annotations to be added to the authentik server pods - podAnnotations: {} - - # -- Labels to be added to the authentik server pods - podLabels: {} - - # -- Resource limits and requests for the authentik server - resources: {} - # requests: - # cpu: 100m - # memory: 512Mi - # limits: - # memory: 512Mi - - # authentik server container ports - containerPorts: - # -- http container port - http: 9000 - # -- https container port - https: 9443 - # -- metrics container port - metrics: 9300 - - # -- Host Network for authentik server pods - hostNetwork: false - - # -- [DNS configuration] - dnsConfig: {} - # -- Alternative DNS policy for authentik server pods - dnsPolicy: "" - - # -- serviceAccount to use for authentik server pods - serviceAccountName: ~ - - # -- authentik server pod-level security context - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- authentik server container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - ## Liveness, readiness and startup probes for authentik server - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ - livenessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/live/ - port: http - - readinessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/ready/ - port: http - - startupProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 60 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/live/ - port: http - - # -- terminationGracePeriodSeconds for container lifecycle hook - terminationGracePeriodSeconds: 30 - - # -- Prority class for the authentik server pods - # @default -- `""` (defaults to global.priorityClassName) - priorityClassName: "" - - # -- [Node selector] - # @default -- `{}` (defaults to global.nodeSelector) - nodeSelector: {} - - # -- [Tolerations] for use with node taints - # @default -- `[]` (defaults to global.tolerations) - tolerations: [] - - # -- Assign custom [affinity] rules to the deployment - # @default -- `{}` (defaults to the global.affinity preset) - affinity: {} - - # -- Assign custom [TopologySpreadConstraints] rules to the authentik server - # @default -- `[]` (defaults to global.topologySpreadConstraints) - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy to be added to the authentik server Deployment - # @default -- `{}` (defaults to global.deploymentStrategy) - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - ## authentik server service configuration - service: - # -- authentik server service annotations - annotations: {} - # -- authentik server service labels - labels: {} - # -- authentik server service type - type: ClusterIP - # -- authentik server service http port for NodePort service type (only if `server.service.type` is set to `NodePort`) - nodePortHttp: 30080 - # -- authentik server service https port for NodePort service type (only if `server.service.type` is set to `NodePort`) - nodePortHttps: 30443 - # -- authentik server service http port - servicePortHttp: 80 - # -- authentik server service https port - servicePortHttps: 443 - # -- authentik server service http port name - servicePortHttpName: http - # -- authentik server service https port name - servicePortHttpsName: https - # -- authentik server service http port appProtocol - # servicePortHttpAppProtocol: HTTP - # -- authentik server service https port appProtocol - # servicePortHttpsAppProtocol: HTTPS - # -- LoadBalancer will get created with the IP specified in this field - loadBalancerIP: "" - # -- Source IP ranges to allow access to service from - loadBalancerSourceRanges: [] - # -- authentik server service external IPs - externalIPs: [] - # -- Denotes if this service desires to route external traffic to node-local or cluster-wide endpoints - externalTrafficPolicy: "" - # -- Used to maintain session affinity. Supports `ClientIP` and `None` - sessionAffinity: "" - # -- Session affinity configuration - sessionAffinityConfig: {} - - ## authentik server metrics service configuration - metrics: - # -- deploy metrics service - enabled: true - service: - # -- metrics service type - type: ClusterIP - # -- metrics service clusterIP. `None` makes a "headless service" (no virtual IP) - clusterIP: "" - # -- metrics service annotations - annotations: {} - # -- metrics service labels - labels: {} - # -- metrics service port - servicePort: 9300 - # -- metrics service port name - portName: metrics - serviceMonitor: - # -- enable a prometheus ServiceMonitor - enabled: false - # -- Prometheus ServiceMonitor interval - interval: 30s - # -- Prometheus ServiceMonitor scrape timeout - scrapeTimeout: 3s - # -- Prometheus [RelabelConfigs] to apply to samples before scraping - relabelings: [] - # -- Prometheus [MetricsRelabelConfigs] to apply to samples before ingestion - metricRelabelings: [] - # -- Prometheus ServiceMonitor selector - selector: {} - # prometheus: kube-prometheus - - # -- Prometheus ServiceMonitor scheme - scheme: "" - # -- Prometheus ServiceMonitor tlsConfig - tlsConfig: {} - # -- Prometheus ServiceMonitor namespace - namespace: "" - # -- Prometheus ServiceMonitor labels - labels: {} - # -- Prometheus ServiceMonitor annotations - annotations: {} - - ingress: - # -- enable an ingress resource for the authentik server - enabled: true - # -- additional ingress annotations - annotations: - kubernetes.io/ingress.class: {{ .Values.globals.authentik.ingressClass }} - cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }} - # -- additional ingress labels - labels: {} - # -- defines which ingress controller will implement the resource - ingressClassName: "{{ .Values.globals.authentik.ingressClass }}" - # -- List of ingress hosts - hosts: - {{- range .Values.globals.authentik.hostnames }} - - {{ . }} - {{- end }} - - # -- List of ingress paths - paths: - - / - # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` - pathType: Prefix - # -- additional ingress paths - extraPaths: [] - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - # -- ingress TLS configuration - tls: - - secretName: authentik-tls - hosts: - {{- range .Values.globals.authentik.hostnames }} - - {{ . }} - {{- end }} - - # -- uses `server.service.servicePortHttps` instead of `server.service.servicePortHttp` - https: false - - -## authentik worker -worker: - # -- authentik worker name - name: worker - - # -- The number of worker pods to run - replicas: 1 - - ## authentik worker Horizontal Pod Autoscaler - autoscaling: - # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik worker - enabled: false - # -- Minimum number of replicas for the authentik worker [HPA] - minReplicas: 1 - # -- Maximum number of replicas for the authentik worker [HPA] - maxReplicas: 5 - # -- Average CPU utilization percentage for the authentik worker [HPA] - targetCPUUtilizationPercentage: 50 - # -- Average memory utilization percentage for the authentik worker [HPA] - targetMemoryUtilizationPercentage: ~ - # -- Configures the scaling behavior of the target in both Up and Down directions. - behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - # -- Configures custom HPA metrics for the authentik worker - # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - metrics: [] - - ## authentik worker Pod Disruption Budget - ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - pdb: - # -- Deploy a [PodDistrubtionBudget] for the authentik worker - enabled: false - # -- Labels to be added to the authentik worker pdb - labels: {} - # -- Annotations to be added to the authentik worker pdb - annotations: {} - # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) - # @default -- `""` (defaults to 0 if not specified) - minAvailable: "" - # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%) - ## Has higher precedence over `worker.pdb.minAvailable` - maxUnavailable: "" - - ## authentik worker image - ## This should match what is deployed in the server. Prefer using global.image - image: - # -- Repository to use to the authentik worker - # @default -- `""` (defaults to global.image.repository) - repository: "" # defaults to global.image.repository - # -- Tag to use to the authentik worker - # @default -- `""` (defaults to global.image.tag) - tag: "" # defaults to global.image.tag - # -- Digest to use to the authentik worker - # @default -- `""` (defaults to global.image.digest) - digest: "" # defaults to global.image.digest - # -- Image pull policy to use to the authentik worker - # @default -- `""` (defaults to global.image.pullPolicy) - pullPolicy: "" # defaults to global.image.pullPolicy - - # -- Secrets with credentials to pull images from a private registry - # @default -- `[]` (defaults to global.imagePullSecrets) - imagePullSecrets: [] - - # -- Environment variables to pass to the authentik worker. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the authentik worker. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Specify postStart and preStop lifecycle hooks for you authentik worker container - lifecycle: {} - - # -- Additional containers to be added to the authentik worker pod - ## Note: Supports use of custom Helm templates - extraContainers: [] - # - name: my-sidecar - # image: nginx:latest - - # -- Init containers to add to the authentik worker pod - ## Note: Supports use of custom Helm templates - initContainers: [] - # - name: download-tools - # image: alpine:3 - # command: [sh, -c] - # args: - # - echo init - - # -- Additional volumeMounts to the authentik worker main container - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to the authentik worker pod - volumes: [] - # - name: custom - # emptyDir: {} - - # -- Annotations to be added to the authentik worker Deployment - deploymentAnnotations: {} - - # -- Annotations to be added to the authentik worker pods - podAnnotations: {} - - # -- Labels to be added to the authentik worker pods - podLabels: {} - - # -- Resource limits and requests for the authentik worker - resources: {} - # requests: - # cpu: 100m - # memory: 512Mi - # limits: - # memory: 512Mi - - # -- Host Network for authentik worker pods - hostNetwork: false - - # -- [DNS configuration] - dnsConfig: {} - # -- Alternative DNS policy for authentik worker pods - dnsPolicy: "" - - # -- serviceAccount to use for authentik worker pods. If set, overrides the value used when serviceAccount.create is true - serviceAccountName: ~ - - # -- authentik worker pod-level security context - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- authentik worker container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - livenessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - readinessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - startupProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 60 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 30 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - # -- terminationGracePeriodSeconds for container lifecycle hook - terminationGracePeriodSeconds: 30 - - # -- Prority class for the authentik worker pods - # @default -- `""` (defaults to global.priorityClassName) - priorityClassName: "" - - # -- [Node selector] - # @default -- `{}` (defaults to global.nodeSelector) - nodeSelector: {} - - # -- [Tolerations] for use with node taints - # @default -- `[]` (defaults to global.tolerations) - tolerations: [] - - # -- Assign custom [affinity] rules to the deployment - # @default -- `{}` (defaults to the global.affinity preset) - affinity: {} - - # -- Assign custom [TopologySpreadConstraints] rules to the authentik worker - # @default -- `[]` (defaults to global.topologySpreadConstraints) - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy to be added to the authentik worker Deployment - # @default -- `{}` (defaults to global.deploymentStrategy) - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - -serviceAccount: - # -- Create service account. Needed for managed outposts - create: true - # -- additional service account annotations - annotations: {} - serviceAccountSecret: - # As we use the authentik-remote-cluster chart as subchart, and that chart - # creates a service account secret by default which we don't need here, - # disable its creation - enabled: false - fullnameOverride: authentik - - -geoip: - # -- enable GeoIP sidecars for the authentik server and worker pods - enabled: false - - editionIds: "GeoLite2-City GeoLite2-ASN" - # -- GeoIP update frequency, in hours - updateInterval: 8 - # -- sign up under https://www.maxmind.com/en/geolite2/signup - accountId: "" - # -- sign up under https://www.maxmind.com/en/geolite2/signup - licenseKey: "" - ## use existing secret instead of values above - existingSecret: - # -- name of an existing secret to use instead of values above - secretName: "" - # -- key in the secret containing the account ID - accountId: "account_id" - # -- key in the secret containing the license key - licenseKey: "license_key" - - image: - # -- If defined, a repository for GeoIP images - repository: ghcr.io/maxmind/geoipupdate - # -- If defined, a tag for GeoIP images - tag: v7.1.0 - # -- If defined, an image digest for GeoIP images - digest: "" - # -- If defined, an imagePullPolicy for GeoIP images - pullPolicy: IfNotPresent - - # -- Environment variables to pass to the GeoIP containers - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: GEOIPUPDATE_VAR_NAME - # value: VALUE - # - name: GEOIPUPDATE_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: GEOIPUPDATE_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the GeoIP containers - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Additional volumeMounts to the GeoIP containers. Make sure the volumes exists for the server and the worker. - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Resource limits and requests for GeoIP containers - resources: {} - # requests: - # cpu: 100m - # memory: 128Mi - # limits: - # memory: 128Mi - - # -- GeoIP container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - -prometheus: - rules: - enabled: false - # -- PrometheusRule namespace - namespace: "" - # -- PrometheusRule selector - selector: {} - # prometheus: kube-prometheus - - # -- PrometheusRule labels - labels: {} - # -- PrometheusRule annotations - annotations: {} - - -postgresql: - # -- enable the Bitnami PostgreSQL chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/postgresql/ for possible values. - enabled: false - image: - repository: bitnami/postgresql - tag: 15.8.0-debian-12-r18 - auth: - username: authentik - database: authentik - # password: "" - primary: - extendedConfiguration: | - max_connections = 500 - resourcesPreset: "none" - # persistence: - # enabled: true - # storageClass: - # accessModes: - # - ReadWriteOnce - readReplicas: - resourcesPreset: "none" - backup: - resourcesPreset: "none" - passwordUpdateJob: - resourcesPreset: "none" - volumePermissions: - resourcesPreset: "none" - metrics: - resourcesPreset: "none" - - -redis: - # -- enable the Bitnami Redis chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/redis/ for possible values. - enabled: false - architecture: standalone - auth: - enabled: false - master: - resourcesPreset: "none" - replica: - resourcesPreset: "none" - sentinel: - resourcesPreset: "none" - metrics: - resourcesPreset: "none" - volumePermissions: - resourcesPreset: "none" - sysctl: - resourcesPreset: "none" - - -# -- additional resources to deploy. Those objects are templated. -additionalObjects: [] - diff --git a/k8s/helmfile.d/values/cert-manager/values.yml b/k8s/helmfile.d/values/cert-manager/values.yml deleted file mode 100644 index fe409db..0000000 --- a/k8s/helmfile.d/values/cert-manager/values.yml +++ /dev/null @@ -1,11 +0,0 @@ -crds: - enabled: true -replicaCount: 3 -extraArgs: - - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53 - - --dns01-recursive-nameservers-only -podDnsPolicy: None -podDnsConfig: - nameservers: - - 1.1.1.1 - - 9.9.9.9 diff --git a/k8s/helmfile.d/values/certs/values.yaml.gotmpl b/k8s/helmfile.d/values/certs/values.yaml.gotmpl deleted file mode 100644 index a85c518..0000000 --- a/k8s/helmfile.d/values/certs/values.yaml.gotmpl +++ /dev/null @@ -1,28 +0,0 @@ -acmeEmail: {{ .Values.globals.certs.acmeEmail }} -cloudflareEmail: {{ .Values.globals.certs.cloudflareEmail }} - -# staging or production -issuerMode: {{ .Values.globals.certs.certIssuerMode }} - -issuerName: {{ .Values.globals.certs.issuerName }} -privateKeySecretRef: {{ .Values.globals.certs.privateKeySecretRef }} - -certManagerNamespace: {{ .Values.globals.certManager.namespace }} - -cloudflareSecretToken: {{ .Values.globals.certs.cloudflareSecretToken }} -cloudflareTokenSecretName: {{ .Values.globals.certs.cloudflareTokenSecretName }} - -dnsZones: - {{- range .Values.globals.certs.hlMnkeOrg.dnsZones }} - - {{ . | quote }} - {{- end}} - -certificateName: {{ .Values.globals.certs.hlMnkeOrg.certificateName }} -certificateSecretName: {{ .Values.globals.certs.hlMnkeOrg.certificateSecretName }} -certificateNamespace: {{ .Values.globals.certs.hlMnkeOrg.certificateNamespace }} - -commonName: {{ .Values.globals.certs.hlMnkeOrg.commonName }} -dnsNames: - {{- range .Values.globals.certs.hlMnkeOrg.dnsNames }} - - {{ . | quote }} - {{- end}} diff --git a/k8s/helmfile.d/values/ghost/values.yaml.gotmpl b/k8s/helmfile.d/values/ghost/values.yaml.gotmpl deleted file mode 100644 index 940b09b..0000000 --- a/k8s/helmfile.d/values/ghost/values.yaml.gotmpl +++ /dev/null @@ -1,876 +0,0 @@ -# Copyright Broadcom, Inc. All Rights Reserved. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - defaultStorageClass: {{ .Values.globals.ghost.storageClass }} - ## Security parameters - ## - security: - ## @param global.security.allowInsecureImages Allows skipping image verification - allowInsecureImages: false - ## Compatibility adaptations for Kubernetes platforms - ## - compatibility: - ## Compatibility adaptations for Openshift - ## - openshift: - ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) - ## - adaptSecurityContext: auto -## @section Common parameters - -## @param kubeVersion Override Kubernetes version -## -kubeVersion: "" -## @param nameOverride String to partially override common.names.fullname -## -nameOverride: "" -## @param fullnameOverride String to fully override common.names.fullname -## -fullnameOverride: "" -## @param commonLabels Labels to add to all deployed objects -## -commonLabels: {} -## @param commonAnnotations Annotations to add to all deployed objects -## -commonAnnotations: {} -## @param clusterDomain Kubernetes cluster domain name -## -clusterDomain: cluster.local -## @param extraDeploy Array of extra objects to deploy with the release -## -extraDeploy: [] -## Enable diagnostic mode in the deployment -## -diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the deployment - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the deployment - ## - args: - - infinity -## @section Ghost Image parameters - -## Bitnami Ghost image -## ref: https://hub.docker.com/r/bitnami/ghost/tags/ -## @param image.registry [default: REGISTRY_NAME] Ghost image registry -## @param image.repository [default: REPOSITORY_NAME/ghost] Ghost image repository -## @skip image.tag Ghost image tag (immutable tags are recommended) -## @param image.digest Ghost image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag -## @param image.pullPolicy Ghost image pull policy -## @param image.pullSecrets Ghost image pull secrets -## @param image.debug Enable image debug mode -## -image: - registry: docker.io - repository: bitnami/ghost - tag: 5.108.1-debian-12-r0 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: true -## @section Ghost Configuration parameters -## Ghost settings based on environment variables -## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost#configuration - -## @param ghostUsername Ghost user name -## -ghostUsername: user -## @param ghostPassword Ghost user password -## Defaults to a random 10-character alphanumeric string if not set -## -ghostPassword: "{{ .Values.globals.ghost.ghostPassword }}" -## @param existingSecret Name of existing secret containing Ghost credentials -## NOTE: Must contain key `ghost-password` -## NOTE: When it's set, the `ghostPassword` parameter is ignored -## -existingSecret: "" -## @param ghostEmail Ghost user email -## -ghostEmail: {{ .Values.globals.ghost.ghostEmail }} -## @param ghostBlogTitle Ghost Blog title -## -ghostBlogTitle: User's Blog -## @param ghostHost Ghost host to create application URLs -## -ghostHost: {{ .Values.globals.ghost.primaryHost }} -## @param ghostPath URL sub path where to server the Ghost application -## -ghostPath: / -## @param ghostEnableHttps Configure Ghost to build application URLs using https -## -## This turns on whether URLs like the home page button use HTTPS URL schemes. -## If you turn this on, then shit will break because it seems like -## the reverse proxy (or any client) encounters a 301 redirect to https from -## ghost, but then ghost doesn't actually serve HTTPS so the reverse proxy -## ends up 501 internal server error'ing. We _do_ want to have HTTPS URLS in -## Ghost, so we need to turn this on, and for some ABSOLUTELY UNGODLY REASON, -## if I turn this on (and thereby making every request 501 error) AND THEN turn -## it off again (allowing normal traffic again), we continue generating HTTPS -## URLs. I'm going to fucking punch my monitor. -## -## - Tony -ghostEnableHttps: false -## SMTP mail delivery configuration -## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost/#smtp-configuration -## @param smtpHost SMTP server host -## @param smtpPort SMTP server port -## @param smtpUser SMTP username -## @param smtpPassword SMTP user password -## @param smtpService SMTP service -## @param smtpProtocol SMTP protocol (ssl or tls) -## -smtpHost: "" -smtpPort: "" -smtpUser: "" -smtpPassword: "" -smtpService: "" -smtpProtocol: "" -## @param smtpExistingSecret The name of an existing secret with SMTP credentials -## NOTE: Must contain key `smtp-password` -## NOTE: When it's set, the `smtpPassword` parameter is ignored -## -smtpExistingSecret: "" -## @param allowEmptyPassword Allow the container to be started with blank passwords -## -allowEmptyPassword: false -## @param ghostSkipInstall Skip performing the initial bootstrapping for Ghost -## -ghostSkipInstall: false -## @param command Override default container command (useful when using custom images) -## -command: [] -## @param args Override default container args (useful when using custom images) -## -args: [] -## @param extraEnvVars Array with extra environment variables to add to the Ghost container -## e.g: -## extraEnvVars: -## - name: FOO -## value: "bar" -## -extraEnvVars: [] -## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars -## -extraEnvVarsCM: "" -## @param extraEnvVarsSecret Name of existing Secret containing extra env vars -## -extraEnvVarsSecret: "" -## @section Ghost deployment parameters - -## @param replicaCount Number of Ghost replicas to deploy -## NOTE: ReadWriteMany PVC(s) are required if replicaCount > 1 -## -replicaCount: 1 -## @param updateStrategy.type Ghost deployment strategy type -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -## NOTE: Set it to `Recreate` if you use a PV that cannot be mounted on multiple pods -## e.g: -## updateStrategy: -## type: RollingUpdate -## rollingUpdate: -## maxSurge: 25% -## maxUnavailable: 25% -## -updateStrategy: - type: RollingUpdate -## @param priorityClassName Ghost pod priority class name -## -priorityClassName: "" -## @param schedulerName Name of the k8s scheduler (other than default) -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" -## @param topologySpreadConstraints Topology Spread Constraints for pod assignment -## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -## The value is evaluated as a template -## -topologySpreadConstraints: [] -## @param automountServiceAccountToken Mount Service Account token in pod -## -automountServiceAccountToken: false -## @param hostAliases Ghost pod host aliases -## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ -## -hostAliases: [] -## @param extraVolumes Optionally specify extra list of additional volumes for Ghost pods -## -extraVolumes: [] -## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for Ghost container(s) -## -extraVolumeMounts: [] -## @param sidecars Add additional sidecar containers to the Ghost pod -## e.g: -## sidecars: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -sidecars: [] -## @param initContainers Add additional init containers to the Ghost pods -## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ -## e.g: -## initContainers: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -initContainers: [] -## Pod Disruption Budget configuration -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb -## @param pdb.create Enable/disable a Pod Disruption Budget creation -## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled -## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. -## -pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @param lifecycleHooks Add lifecycle hooks to the Ghost deployment -## -lifecycleHooks: {} -## @param podLabels Extra labels for Ghost pods -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -## -podLabels: {} -## @param podAnnotations Annotations for Ghost pods -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -podAnnotations: {} -## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## -podAffinityPreset: "" -## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## -podAntiAffinityPreset: soft -## Node affinity preset -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity -## -nodeAffinityPreset: - ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set - ## - key: "" - ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] -## @param affinity Affinity for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set -## -affinity: {} -## @param nodeSelector Node labels for pod assignment -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ -## -nodeSelector: {} -## @param tolerations Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] -## Ghost containers' resource requests and limits -## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ -## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). -## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 -## -resourcesPreset: "medium" -## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) -## Example: -## resources: -## requests: -## cpu: 2 -## memory: 512Mi -## limits: -## cpu: 3 -## memory: 1024Mi -## -resources: {} -## Container ports -## @param containerPorts.http Ghost HTTP container port -## @param containerPorts.https Ghost HTTPS container port -## -containerPorts: - http: 2368 - https: 2368 -## @param extraContainerPorts Optionally specify extra list of additional ports for WordPress container(s) -## e.g: -## extraContainerPorts: -## - name: myservice -## containerPort: 9090 -## -extraContainerPorts: [] -## Configure Pods Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## @param podSecurityContext.enabled Enabled Ghost pods' Security Context -## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy -## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface -## @param podSecurityContext.supplementalGroups Set filesystem extra groups -## @param podSecurityContext.fsGroup Set Ghost pod's Security Context fsGroup -## -podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 -## Configure Container Security Context (only main container) -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enabled containers' Security Context -## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container -## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser -## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup -## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot -## @param containerSecurityContext.privileged Set container's Security Context privileged -## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem -## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation -## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped -## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile -containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - # capabilities: - # drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" -## Configure extra options for Ghost containers' liveness, readiness and startup probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes -## @param startupProbe.enabled Enable startupProbe -## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe -## @param startupProbe.periodSeconds Period seconds for startupProbe -## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe -## @param startupProbe.failureThreshold Failure threshold for startupProbe -## @param startupProbe.successThreshold Success threshold for startupProbe -## -startupProbe: - enabled: false - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 -## @param livenessProbe.enabled Enable livenessProbe -## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe -## @param livenessProbe.periodSeconds Period seconds for livenessProbe -## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe -## @param livenessProbe.failureThreshold Failure threshold for livenessProbe -## @param livenessProbe.successThreshold Success threshold for livenessProbe -## -livenessProbe: - enabled: true - initialDelaySeconds: 120 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 -## @param readinessProbe.enabled Enable readinessProbe -## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe -## @param readinessProbe.periodSeconds Period seconds for readinessProbe -## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe -## @param readinessProbe.failureThreshold Failure threshold for readinessProbe -## @param readinessProbe.successThreshold Success threshold for readinessProbe -## -readinessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 5 - timeoutSeconds: 3 - failureThreshold: 6 - successThreshold: 1 -## @param customLivenessProbe Custom livenessProbe that overrides the default one -## -customLivenessProbe: {} -## @param customReadinessProbe Custom readinessProbe that overrides the default one -# -# The default httpGet probe fails, and I think it's because of this: -# - We enabled ghostEnableHttps -# - The httpGet probe probes for http://xyz:2368, which then redirects to -# https://xyz:2368 -# - However, Ghost itself does not provide TLS. That option just makes HTTP -# redirect to HTTPS -# - The probe is now expecting TLS, but Ghost is still sending regular HTTP -# and the probe thus fails -# -# So we're just gonna do a TCP port check. The alternative is curl'ing and -# expecting a 301 response, but that doesn't seem much better than the TCP -# check, especially since it's so simple. -customReadinessProbe: - exec: - command: - - "true" - # tcpSocket: - # port: 2368 - -## @section Traffic Exposure Parameters - -## Ghost service parameters -## -service: - ## @param service.type Ghost service type - ## - type: ClusterIP - ## @param service.ports.http Ghost service HTTP port - ## @param service.ports.https Ghost service HTTPS port - ## - ports: - http: 80 - https: 443 - ## Node ports to expose - ## @param service.nodePorts.http Node port for HTTP - ## @param service.nodePorts.https Node port for HTTPS - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - https: "" - ## @param service.clusterIP Ghost service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param service.loadBalancerIP Ghost service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param service.loadBalancerSourceRanges Ghost service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param service.externalTrafficPolicy Ghost service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param service.annotations Additional custom annotations for Ghost service - ## - annotations: {} - ## @param service.extraPorts Extra port to expose on Ghost service - ## - extraPorts: [] - ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - sessionAffinityConfig: {} -## Configure the ingress resource that allows you to access the Ghost installation -## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ -## -ingress: - ## @param ingress.enabled Enable ingress record generation for Ghost - ## - enabled: true - ## @param ingress.pathType Ingress path type - ## - pathType: ImplementationSpecific - ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) - ## - apiVersion: "" - ## @param ingress.hostname Default host for the ingress record - ## - hostname: {{ .Values.globals.ghost.primaryHost }} - ## @param ingress.path Default path for the ingress record - ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers - ## - path: / - ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. - ## For a full list of possible ingress annotations, please see - ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md - ## Use this parameter to set the required annotations for cert-manager, see - ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## - ## e.g: - ## annotations: - ## kubernetes.io/ingress.class: nginx - ## cert-manager.io/cluster-issuer: cluster-issuer-name - ## - annotations: - kubernetes.io/ingress.class: {{ .Values.globals.ghost.ingressClass }} - cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }} - ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter - ## TLS certificates will be retrieved from a TLS secret with name: `\{\{- printf "%s-tls" .Values.ingress.hostname \}\}` - ## You can: - ## - Use the `ingress.secrets` parameter to create this TLS secret - ## - Rely on cert-manager to create it by setting the corresponding annotations - ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` - ## - tls: true - ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm - ## - selfSigned: false - ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record - ## e.g: - ## extraHosts: - ## - name: ghost.local - ## path: / - ## - extraHosts: [] - ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host - ## e.g: - ## extraPaths: - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - extraPaths: [] - ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - ## e.g: - ## extraTls: - ## - hosts: - ## - ghost.local - ## secretName: ghost.local-tls - ## - extraTls: [] - ## @param ingress.secrets Custom TLS certificates as secrets - ## NOTE: 'key' and 'certificate' are expected in PEM format - ## NOTE: 'name' should line up with a 'secretName' set further up - ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates - ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days - ## It is also possible to create and manage the certificates outside of this helm chart - ## Please see README.md for more information - ## e.g: - ## secrets: - ## - name: ghost.local-tls - ## key: |- - ## REDACTED - ## ... - ## REDACTED - ## certificate: |- - ## -----BEGIN CERTIFICATE----- - ## ... - ## -----END CERTIFICATE----- - ## - secrets: [] - ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: {{ .Values.globals.ghost.ingressClass }} - ## @param ingress.extraRules Additional rules to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules - ## e.g: - ## extraRules: - ## - host: example.local - ## http: - ## path: / - ## backend: - ## service: - ## name: example-svc - ## port: - ## name: http - ## - extraRules: [] -## @section Persistence Parameters - -## Persistence Parameters -## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ -## -persistence: - ## @param persistence.enabled Enable persistence using Persistent Volume Claims - ## - enabled: true - ## @param persistence.storageClass Persistent Volume storage class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner - ## - storageClass: "" - ## @param persistence.annotations Additional custom annotations for the PVC - ## - annotations: {} - ## @param persistence.accessModes [array] Persistent Volume access modes - ## - accessModes: - - ReadWriteMany - ## @param persistence.size Persistent Volume size - ## - size: 8Gi - ## @param persistence.existingClaim The name of an existing PVC to use for persistence - ## - existingClaim: "" - ## @param persistence.subPath The name of a volume's sub path to mount for persistence - ## - subPath: "" -## 'volumePermissions' init container parameters -## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values -## based on the podSecurityContext/containerSecurityContext parameters -## -volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` - ## - enabled: false - ## OS Shell + Utility image - ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ - ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry - ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository - ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy - ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 12-debian-12-r35 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container's resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "none" - ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Init container Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param volumePermissions.securityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param volumePermissions.securityContext.runAsUser Set init container's Security Context runAsUser - ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the - ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) - ## - securityContext: - seLinuxOptions: {} - runAsUser: 0 -## @section Database Parameters - -## MySQL chart configuration -## ref: https://github.com/bitnami/charts/blob/main/bitnami/mysql/values.yaml -## -mysql: - ## @param mysql.enabled Deploy a MySQL server to satisfy the applications database requirements - ## To use an external database set this to false and configure the `externalDatabase` parameters - ## - enabled: false - ## @param mysql.architecture MySQL architecture. Allowed values: `standalone` or `replication` - ## - architecture: standalone - ## MySQL Authentication parameters - ## @param mysql.auth.rootPassword MySQL root password - ## @param mysql.auth.database MySQL custom database - ## @param mysql.auth.username MySQL custom user name - ## @param mysql.auth.password MySQL custom user password - ## @param mysql.auth.existingSecret Existing secret with MySQL credentials - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-the-root-password-on-first-run - ## https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-on-first-run - ## https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-user-on-first-run - auth: - rootPassword: "password" - database: bitnami_ghost - username: bn_ghost - password: "password" - existingSecret: "" - ## MySQL Primary configuration - ## - primary: - ## MySQL Primary Persistence parameters - ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ - ## @param mysql.primary.persistence.enabled Enable persistence on MySQL using PVC(s) - ## @param mysql.primary.persistence.storageClass Persistent Volume storage class - ## @param mysql.primary.persistence.accessModes [array] Persistent Volume access modes - ## @param mysql.primary.persistence.size Persistent Volume size - ## - persistence: - enabled: true - storageClass: "" - accessModes: - - ReadWriteOnce - size: 8Gi - ## MySQL primary container's resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param mysql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param mysql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} -## External Database Configuration -## All of these values are only used if `mysql.enabled=false` -## -externalDatabase: - ## @param externalDatabase.host External Database server host - ## - host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local - ## @param externalDatabase.port External Database server port - ## - port: 3306 - ## @param externalDatabase.user External Database username - ## - user: {{ .Values.globals.ghost.mysql.username }} - ## @param externalDatabase.password External Database user password - ## - password: {{ .Values.globals.ghost.mysql.password }} - ## @param externalDatabase.database External Database database name - ## - database: {{ .Values.globals.ghost.mysql.database }} - ## @param externalDatabase.existingSecret The name of an existing secret with database credentials - ## NOTE: Must contain key `mysql-password` - ## NOTE: When it's set, the `externalDatabase.password` parameter is ignored - ## - existingSecret: "" - ## @param externalDatabase.ssl External Database ssl - ## - ssl: false - ## @param externalDatabase.sslCaFile External Database ssl CA filepath - ## - sslCaFile: "" -## @section NetworkPolicy parameters - -## Network Policy configuration -## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ -## -networkPolicy: - ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - -## Pods Service Account -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## @param serviceAccount.create Specifies whether a ServiceAccount should be created -## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. -## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account -## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. -## -serviceAccount: - create: true - name: "" - automountServiceAccountToken: false - annotations: {} - diff --git a/k8s/helmfile.d/values/gitea/values.yaml.gotmpl b/k8s/helmfile.d/values/gitea/values.yaml.gotmpl deleted file mode 100644 index c688966..0000000 --- a/k8s/helmfile.d/values/gitea/values.yaml.gotmpl +++ /dev/null @@ -1,782 +0,0 @@ -# Copyright Broadcom, Inc. All Rights Reserved. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass -## - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) -## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - defaultStorageClass: "" - storageClass: "" - ## Security parameters - ## - security: - ## @param global.security.allowInsecureImages Allows skipping image verification - allowInsecureImages: false - ## Compatibility adaptations for Kubernetes platforms - ## - compatibility: - ## Compatibility adaptations for Openshift - ## - openshift: - ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) - ## - adaptSecurityContext: auto -## @section Common parameters -## - -## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) -## -kubeVersion: "" -## @param nameOverride String to partially override gitea.fullname template (will maintain the release name) -## -nameOverride: "" -## @param fullnameOverride String to fully override gitea.fullname template -## -fullnameOverride: "" -## @param namespaceOverride String to fully override common.names.namespace -## -namespaceOverride: "" -## @param commonAnnotations Common annotations to add to all Gitea resources (sub-charts are not considered). Evaluated as a template -## -commonAnnotations: {} -## @param commonLabels Common labels to add to all Gitea resources (sub-charts are not considered). Evaluated as a template -## -commonLabels: {} -## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template). -## -extraDeploy: [] -## @section Gitea parameters -## - -## Bitnami Gitea image version -## ref: https://hub.docker.com/r/bitnami/gitea/tags/ -## @param image.registry [default: REGISTRY_NAME] Gitea image registry -## @param image.repository [default: REPOSITORY_NAME/gitea] Gitea Image name -## @skip image.tag Gitea Image tag -## @param image.digest Gitea image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag -## @param image.pullPolicy Gitea image pull policy -## @param image.pullSecrets Specify docker-registry secret names as an array -## @param image.debug Specify if debug logs should be enabled -## -image: - registry: docker.io - repository: bitnami/gitea - tag: 1.23.1-debian-12-r3 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Set to true if you would like to see extra information on logs - ## - debug: false -## @param adminUsername User of the application -## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration -## -adminUsername: bn_user -## @param adminPassword Application password -## Defaults to a random 10-character alphanumeric string if not set -## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration -## -adminPassword: "" -## @param adminEmail Admin email -## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration -## -adminEmail: user@example.com -## @param appName Gitea application name -## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration -## -appName: example -## @param runMode Gitea application host -## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration -## -runMode: prod -## @param exposeSSH Make the SSH server accesible -## -exposeSSH: true -## @param rootURL UI Root URL (for link generation) -## -rootURL: "" -## @param command Override default container command (useful when using custom images) -## -command: [] -## @param args Override default container args (useful when using custom images) -## -args: [] -## @param updateStrategy.type Update strategy - only really applicable for deployments with RWO PVs attached -## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the -## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will -## terminate the single previous pod, so that the new, incoming pod can attach to the PV -## -updateStrategy: - type: RollingUpdate -## @param priorityClassName Gitea pods' priorityClassName -## -priorityClassName: "" -## @param schedulerName Name of the k8s scheduler (other than default) -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" -## @param topologySpreadConstraints Topology Spread Constraints for pod assignment -## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -## The value is evaluated as a template -## -topologySpreadConstraints: [] -## @param automountServiceAccountToken Mount Service Account token in pod -## -automountServiceAccountToken: false -## @param hostAliases [array] Add deployment host aliases -## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ -## -hostAliases: [] -## @param extraEnvVars Extra environment variables -## For example: -## -extraEnvVars: [] -# - name: BEARER_AUTH -# value: true -## @param extraEnvVarsCM ConfigMap containing extra env vars -## -extraEnvVarsCM: "" -## @param extraEnvVarsSecret Secret containing extra env vars (in case of sensitive data) -## -extraEnvVarsSecret: "" -## @param extraVolumes Array of extra volumes to be added to the deployment (evaluated as template). Requires setting `extraVolumeMounts` -## -extraVolumes: [] -## @param extraVolumeMounts Array of extra volume mounts to be added to the container (evaluated as template). Normally used with `extraVolumes`. -## -extraVolumeMounts: [] -## @param initContainers Add additional init containers to the pod (evaluated as a template) -## -initContainers: [] -## Pod Disruption Budget configuration -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb -## @param pdb.create Enable/disable a Pod Disruption Budget creation -## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled -## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. -## -pdb: - create: true - minAvailable: "" - maxUnavailable: "" -## @param sidecars Attach additional containers to the pod (evaluated as a template) -## -sidecars: [] -## @param tolerations Tolerations for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] -## @param existingSecret Name of a secret with the application password -## -existingSecret: "" -## @param existingSecretKey Key inside the existing secret containing the password -## -existingSecretKey: "admin-password" -## SMTP mail delivery configuration -## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea/#smtp-configuration -## @param smtpHost SMTP host -## @param smtpPort SMTP port -## @param smtpUser SMTP user -## @param smtpPassword SMTP password -## -smtpHost: "" -smtpPort: "" -smtpUser: "" -smtpPassword: "" -## @param smtpExistingSecret The name of an existing secret with SMTP credentials -## NOTE: Must contain key `smtp-password` -## NOTE: When it's set, the `smtpPassword` parameter is ignored -## -smtpExistingSecret: "" -## @param containerPorts [object] Container ports -## -containerPorts: - http: 3000 - ssh: 2222 -## @param extraContainerPorts Optionally specify extra list of additional ports for Gitea container(s) -## e.g: -## extraContainerPorts: -## - name: myservice -## containerPort: 9090 -## -extraContainerPorts: [] -## Enable OpenID Configurations -## @param openid.enableSignIn Enable sign in with OpenID -## @param openid.enableSignUp Enable sign up with OpenID -openid: - enableSignIn: false - enableSignUp: false -## Enable persistence using Persistent Volume Claims -## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ -## -persistence: - ## @param persistence.enabled Enable persistence using PVC - ## - enabled: true - ## @param persistence.storageClass PVC Storage Class for Gitea volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "" - ## @param persistence.accessModes PVC Access Mode for Gitea volume - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - ## - accessModes: - - ReadWriteOnce - ## @param persistence.size PVC Storage Request for Gitea volume - ## - size: 8Gi - ## @param persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## @param persistence.existingClaim A manually managed Persistent Volume Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - ## - existingClaim: "" - ## @param persistence.hostPath If defined, the gitea-data volume will mount to the specified hostPath. - ## Requires persistence.enabled: true - ## Requires persistence.existingClaim: nil|false - ## Default: nil. - ## - hostPath: "" - ## @param persistence.annotations Persistent Volume Claim annotations - ## - annotations: {} - ## @param persistence.selector Selector to match an existing Persistent Volume for Gitea data PVC - ## If set, the PVC can't have a PV dynamically provisioned for it - ## E.g. - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} -## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## -podAffinityPreset: "" -## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## -podAntiAffinityPreset: soft -## Node affinity preset -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity -## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` -## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. -## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. -## -nodeAffinityPreset: - type: "" - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] -## @param affinity Affinity for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set -## -affinity: {} -## @param nodeSelector Node labels for pod assignment. Evaluated as a template. -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ -## -nodeSelector: {} -## Gitea container's resource requests and limits -## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ -## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). -## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 -## -resourcesPreset: "micro" -## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) -## Example: -## resources: -## requests: -## cpu: 2 -## memory: 512Mi -## limits: -## cpu: 3 -## memory: 1024Mi -## -resources: {} -## Configure Pods Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## @param podSecurityContext.enabled Enable Gitea pods' Security Context -## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy -## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface -## @param podSecurityContext.supplementalGroups Set filesystem extra groups -## @param podSecurityContext.fsGroup Gitea pods' group ID -## -podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 -## Configure Container Security Context (only main container) -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enabled containers' Security Context -## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container -## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser -## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup -## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot -## @param containerSecurityContext.privileged Set container's Security Context privileged -## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem -## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation -## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped -## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile -## -containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" -## Configure extra options for startup probe -## Gitea core exposes / to unauthenticated requests, making it a good -## default startup and readiness path. However, that may not always be the -## case. For example, if the image value is overridden to an image containing a -## module that alters that route, or an image that does not auto-install Gitea. -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes -## @param startupProbe.enabled Enable startupProbe -## @param startupProbe.path Request path for startupProbe -## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe -## @param startupProbe.periodSeconds Period seconds for startupProbe -## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe -## @param startupProbe.failureThreshold Failure threshold for startupProbe -## @param startupProbe.successThreshold Success threshold for startupProbe -## -startupProbe: - enabled: false - path: / - initialDelaySeconds: 600 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 5 - successThreshold: 1 -## Configure extra options for liveness probe -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes -## @param livenessProbe.enabled Enable livenessProbe -## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe -## @param livenessProbe.periodSeconds Period seconds for livenessProbe -## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe -## @param livenessProbe.failureThreshold Failure threshold for livenessProbe -## @param livenessProbe.successThreshold Success threshold for livenessProbe -## -livenessProbe: - enabled: true - initialDelaySeconds: 600 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 5 - successThreshold: 1 -## Configure extra options for readiness probe -## Gitea core exposes / to unauthenticated requests, making it a good -## default startup and readiness path. However, that may not always be the -## case. For example, if the image value is overridden to an image containing a -## module that alters that route, or an image that does not auto-install Gitea. -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes -## @param readinessProbe.enabled Enable readinessProbe -## @param readinessProbe.path Request path for readinessProbe -## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe -## @param readinessProbe.periodSeconds Period seconds for readinessProbe -## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe -## @param readinessProbe.failureThreshold Failure threshold for readinessProbe -## @param readinessProbe.successThreshold Success threshold for readinessProbe -## -readinessProbe: - enabled: true - path: / - initialDelaySeconds: 30 - periodSeconds: 5 - timeoutSeconds: 1 - failureThreshold: 5 - successThreshold: 1 -## @param customStartupProbe Override default startup probe -## -customStartupProbe: {} -## @param customLivenessProbe Override default liveness probe -## -customLivenessProbe: {} -## @param customReadinessProbe Override default readiness probe -## -customReadinessProbe: {} -## @param lifecycleHooks LifecycleHook to set additional configuration at startup Evaluated as a template -## -lifecycleHooks: {} -## @param podAnnotations Pod annotations -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -podAnnotations: {} -## @param podLabels Add additional labels to the pod (evaluated as a template) -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -## -podLabels: {} -## @section Traffic Exposure Parameters -## - -## Kubernetes configuration. For minikube, set this to NodePort, elsewhere use LoadBalancer -## -service: - ## @param service.type Kubernetes Service type - ## - type: LoadBalancer - ## @param service.ports.http Service HTTP port - ## @param service.ports.ssh Service SSH port - ## - ports: - http: 80 - ssh: 22 - ## @param service.loadBalancerSourceRanges Restricts access for LoadBalancer (only with `service.type: LoadBalancer`) - ## e.g: - ## loadBalancerSourceRanges: - ## - 0.0.0.0/0 - ## - loadBalancerSourceRanges: [] - ## @param service.loadBalancerIP loadBalancerIP for the Gitea Service (optional, cloud specific) - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param service.nodePorts [object] Kubernetes node port - ## nodePorts: - ## http: - ## https: - ## - nodePorts: - http: "" - ssh: "" - ## @param service.externalTrafficPolicy Enable client source IP preservation - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param service.clusterIP Gitea service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param service.annotations Additional custom annotations for Gitea service - ## - annotations: {} - ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - -## Network Policy configuration -## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ -## -networkPolicy: - ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - -## Configure the ingress resource that allows you to access the -## Gitea installation. Set up the URL -## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ -## -ingress: - ## @param ingress.enabled Enable ingress controller resource - ## - enabled: false - ## @param ingress.pathType Ingress Path type - ## - pathType: ImplementationSpecific - ## @param ingress.apiVersion Override API Version (automatically detected if not set) - ## - apiVersion: "" - ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: "" - ## @param ingress.hostname Default host for the ingress resource - ## - hostname: "gitea.local" - ## @param ingress.path The Path to Gitea. You may need to set this to '/*' in order to use this - ## with ALB ingress controllers. - ## - path: / - ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. - ## For a full list of possible ingress annotations, please see - ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md - ## Use this parameter to set the required annotations for cert-manager, see - ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## - ## e.g: - ## annotations: - ## kubernetes.io/ingress.class: nginx - ## cert-manager.io/cluster-issuer: cluster-issuer-name - ## - annotations: {} - ## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter - ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it - ## - tls: false - ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm - ## - selfSigned: false - ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. - ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array - ## extraHosts: - ## - name: gitea.local - ## path: / - ## - extraHosts: [] - ## @param ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host. - ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. - ## extraPaths: - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - extraPaths: [] - ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. - ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - ## extraTls: - ## - hosts: - ## - gitea.local - ## secretName: gitea.local-tls - ## - extraTls: [] - ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets - ## key and certificate should start with -----BEGIN CERTIFICATE----- or - ## REDACTED - ## - ## name should line up with a tlsSecret set further up - ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set - ## - ## It is also possible to create and manage the certificates outside of this helm chart - ## Please see README.md for more information - ## Example: - ## - name: gitea.local-tls - ## key: - ## certificate: - ## - secrets: [] - ## @param ingress.extraRules Additional rules to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules - ## e.g: - ## extraRules: - ## - host: example.local - ## http: - ## path: / - ## backend: - ## service: - ## name: example-svc - ## port: - ## name: http - ## - extraRules: [] -## @section Other Parameters -## - -## Service account for Gitea to use. -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## -serviceAccount: - ## @param serviceAccount.create Enable creation of ServiceAccount for Gitea pod - ## - create: true - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## Can be set to false if pods using this serviceAccount do not need to use K8s API - ## - automountServiceAccountToken: false - ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} -## @section Database parameters -## - -## PostgreSQL chart configuration -## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml -## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart -## @param postgresql.auth.username Name for a custom user to create -## @param postgresql.auth.password Password for the custom user to create -## @param postgresql.auth.database Name for a custom database to create -## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials -## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`) -## @param postgresql.service.ports.postgresql PostgreSQL service port -## -postgresql: - enabled: false - auth: - username: bn_gitea - password: "" - database: bitnami_gitea - existingSecret: "" - architecture: standalone - service: - ports: - postgresql: 5432 -## External PostgreSQL configuration -## All of these values are only used when postgresql.enabled is set to false -## @param externalDatabase.host Database host -## @param externalDatabase.port Database port number -## @param externalDatabase.user Non-root username for JupyterHub -## @param externalDatabase.password Password for the non-root username for JupyterHub -## @param externalDatabase.database JupyterHub database name -## @param externalDatabase.existingSecret Name of an existing secret resource containing the database credentials -## @param externalDatabase.existingSecretPasswordKey Name of an existing secret key containing the database credentials -## -externalDatabase: - host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local - port: 5432 - user: {{ .Values.globals.gitea.postgres.username }} - database: {{ .Values.globals.gitea.postgres.database }} - password: {{ .Values.globals.gitea.postgres.password }} - existingSecret: "" - existingSecretPasswordKey: "db-password" -## @section Volume Permissions parameters -## - -## Init containers parameters: -## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. -## -volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) - ## - enabled: false - ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry - ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name - ## @skip volumePermissions.image.tag Init container volume-permissions image tag - ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy - ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 12-debian-12-r35 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init containers' resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - diff --git a/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl b/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl deleted file mode 100644 index dbcda86..0000000 --- a/k8s/helmfile.d/values/gitlab/values.yaml.gotmpl +++ /dev/null @@ -1,1398 +0,0 @@ -## NOTICE -# -# Due to the scope and complexity of this chart, all possible values are -# not documented in this file. Extensive documentation is available. -# -# Please read the docs: https://docs.gitlab.com/charts/ -# -# Because properties are regularly added, updated, or relocated, it is -# _strongly suggest_ to not "copy and paste" this YAML. Please provide -# Helm only those properties you need, and allow the defaults to be -# provided by the version of this chart at the time of deployment. - -## Advanced Configuration -## https://docs.gitlab.com/charts/advanced -# -# Documentation for advanced configuration, such as -# - External PostgreSQL -# - External Gitaly -# - External Redis -# - External NGINX -# - External Object Storage providers -# - PersistentVolume configuration - -## The global properties are used to configure multiple charts at once. -## https://docs.gitlab.com/charts/charts/globals -global: - common: - labels: {} - - image: {} - # Registry value override is only available for the following Charts: - # - Spamcheck - # - Mailroom - # If specifying a value here, be sure to also configure - # `gitlab..image.repository` to a value that does not - # include the default registry domain `registry.gitlab.com`. - # Support for other charts is coming as an iterative rollout. - # See https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2859 - # for more information. - # registry: - - # pullPolicy: IfNotPresent - # pullSecrets: [] - # tagSuffix: "" - - ## Supplemental Pod labels. Will not be used for selectors. - pod: - labels: {} - - ## https://docs.gitlab.com/charts/installation/deployment#deploy-the-community-edition - edition: ee - - ## https://docs.gitlab.com/charts/charts/globals#gitlab-version - gitlabVersion: "17.8.1" - - ## https://docs.gitlab.com/charts/charts/globals#application-resource - application: - create: false - links: [] - allowClusterRoles: true - ## https://docs.gitlab.com/charts/charts/globals#configure-host-settings - hosts: - domain: example.com - hostSuffix: - https: true - externalIP: - ssh: - gitlab: {} - minio: {} - registry: {} - tls: {} - smartcard: {} - kas: {} - pages: {} - - ## https://docs.gitlab.com/charts/charts/globals#configure-ingress-settings - ingress: - apiVersion: "" - configureCertmanager: true - useNewIngressForCerts: false - provider: nginx - # class: - annotations: {} - enabled: true - tls: {} - # enabled: true - # secretName: - path: / - pathType: Prefix - - # Override the API version to use for HorizontalPodAutoscaler - hpa: - apiVersion: "" - - # Enable KEDA globally (https://keda.sh/) - keda: - enabled: false - - # Override the API version to use for PodDisruptionBudget - pdb: - apiVersion: "" - - # Override the API version to use for CronJob - batch: - cronJob: - apiVersion: "" - - # Override enablement of ServiceMonitor and PodMonitor objects. - monitoring: - enabled: false - - gitlab: - ## Enterprise license for this GitLab installation - ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#initial-enterprise-license - ## If allowing shared-secrets generation, this is OPTIONAL. - license: {} - # secret: RELEASE-gitlab-license - # key: license - - ## Initial root password for this GitLab installation - ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#initial-root-password - ## If allowing shared-secrets generation, this is OPTIONAL. - initialRootPassword: {} - # secret: RELEASE-gitlab-initial-root-password - # key: password - - ## https://docs.gitlab.com/charts/charts/globals#configure-postgresql-settings - psql: - connectTimeout: - keepalives: - keepalivesIdle: - keepalivesInterval: - keepalivesCount: - tcpUserTimeout: - password: {} - # useSecret: - # secret: - # key: - # file: - # host: postgresql.hostedsomewhere.else - # port: 123 - # username: gitlab - # database: gitlabhq_production - # applicationName: - # preparedStatements: false - # databaseTasks: true - main: {} - # host: postgresql.hostedsomewhere.else - # port: 123 - # username: gitlab - # database: gitlabhq_production - # applicationName: - # preparedStatements: false - # databaseTasks: true - ci: {} - # host: postgresql.hostedsomewhere.else - # port: 123 - # username: gitlab - # database: gitlabhq_production_ci - # applicationName: - # preparedStatements: false - # databaseTasks: false - - ## https://docs.gitlab.com/charts/charts/globals#configure-redis-settings - redis: - auth: - enabled: true - # secret: - # key: - # connectTimeout: 1 - # readTimeout: 1 - # writeTimeout: 1 - # host: redis.hostedsomewhere.else - # port: 6379 - # database: 0 - # user: webservice - # sentinels: - # - host: - # port: - sentinelAuth: - enabled: false - # secret: - # key: - - ## https://docs.gitlab.com/charts/charts/globals#configure-gitaly-settings - gitaly: - enabled: true - authToken: {} - # secret: - # key: - # serviceName: - internal: - names: [default] - external: [] - service: - name: gitaly - type: ClusterIP - externalPort: 8075 - internalPort: 8075 - tls: - externalPort: 8076 - internalPort: 8076 - tls: - enabled: false - # secretName: - - praefect: - enabled: false - ntpHost: pool.ntp.org - replaceInternalGitaly: true - authToken: {} - autoMigrate: true - dbSecret: {} - virtualStorages: - - name: default - gitalyReplicas: 3 - maxUnavailable: 1 - psql: - sslMode: disable - # serviceName: - service: - name: praefect - type: ClusterIP - externalPort: 8075 - internalPort: 8075 - tls: - externalPort: 8076 - internalPort: 8076 - tls: - enabled: false - # secretName: - - ## https://docs.gitlab.com/charts/charts/globals#configure-minio-settings - minio: - enabled: true - credentials: {} - # secret: - - ## https://docs.gitlab.com/charts/charts/globals#configure-appconfig-settings - ## Rails based portions of this chart share many settings - appConfig: - ## https://docs.gitlab.com/charts/charts/globals#general-application-settings - # cdnHost: - enableUsagePing: true - enableSeatLink: true - enableImpersonation: - applicationSettingsCacheSeconds: 60 - usernameChangingEnabled: true - issueClosingPattern: - defaultTheme: - defaultColorMode: - defaultSyntaxHighlightingTheme: - defaultProjectsFeatures: - issues: true - mergeRequests: true - wiki: true - snippets: true - builds: true - graphQlTimeout: - webhookTimeout: - maxRequestDurationSeconds: - - ## https://docs.gitlab.com/charts/charts/globals#cron-jobs-related-settings - cron_jobs: {} - ## Flag stuck CI builds as failed - # stuck_ci_jobs_worker: - # cron: "0 * * * *" - ## Schedule pipelines in the near future - # pipeline_schedule_worker: - # cron: "19 * * * *" - ## Remove expired build artifacts - # expire_build_artifacts_worker: - # cron: "*/7 * * * *" - ## Periodically run 'git fsck' on all repositories. - # repository_check_worker: - # cron: "20 * * * *" - ## Send admin emails once a week - # admin_email_worker: - # cron: "0 0 * * 0" - ## Remove outdated repository archives - # repository_archive_cache_worker: - # cron: "0 * * * *" - ## Verify custom GitLab Pages domains - # pages_domain_verification_cron_worker: - # cron: "*/15 * * * *" - # schedule_migrate_external_diffs_worker: - # cron: "15 * * * *" - ## Prune stale group runners on opted-in namespaces - # ci_runners_stale_group_runners_prune_worker_cron: - # cron: "30 * * * *" - ## Periodically update ci_runner_versions table with up-to-date versions and status - # ci_runner_versions_reconciliation_worker: - # cron: "@daily" - ## Periodically clean up stale ci_runner_machines records - # ci_runners_stale_machines_cleanup_worker: - # cron: "36 * * * *" - # ci_click_house_finished_pipelines_sync_worker: - # cron: "*/4 * * * *" - # args: [0, 1] - ### GitLab Geo - # Geo Primary only! - # geo_prune_event_log_worker: - # cron: "*/5 * * * *" - ## GitLab Geo repository sync worker - # geo_repository_sync_worker: - # cron: "*/5 * * * *" - ## GitLab Geo file download dispatch worker - # geo_file_download_dispatch_worker: - # cron: "*/10 * * * *" - ## GitLab Geo repository verification primary batch worker - # geo_repository_verification_primary_batch_worker: - # cron: "*/5 * * * *" - ## GitLab Geo repository verification secondary scheduler worker - # geo_repository_verification_secondary_scheduler_worker: - # cron: "*/5 * * * *" - ## GitLab Geo migrated local files clean up worker - # geo_migrated_local_files_clean_up_worker: - # cron: "15 */6 * * *" - ### LDAP - # ldap_sync_worker: - # cron: "30 1 * * *" - # ldap_group_sync_worker: - # cron: "0 * * * *" - ### Snapshot active user statistics - # historical_data_worker: - # cron: "0 12 * * *" - # loose_foreign_keys_cleanup_worker_cron: - # cron: "*/5 * * * *" - - ## https://docs.gitlab.com/charts/charts/globals#content-security-policy - contentSecurityPolicy: - enabled: false - report_only: true - # directives: {} - - ## https://docs.gitlab.com/charts/charts/globals#gravatarlibravatar-settings - gravatar: - plainUrl: - sslUrl: - - ## https://docs.gitlab.com/charts/charts/globals#hooking-analytics-services-to-the-gitlab-instance - extra: - googleAnalyticsId: - matomoUrl: - matomoSiteId: - matomoDisableCookies: - oneTrustId: - googleTagManagerNonceId: - bizible: - - ## https://docs.gitlab.com/charts/charts/globals#lfs-artifacts-uploads-packages-external-mr-diffs-and-dependency-proxy - object_store: - enabled: false - proxy_download: true - storage_options: {} - # server_side_encryption: - # server_side_encryption_kms_key_id - connection: {} - # secret: - # key: - lfs: - enabled: true - proxy_download: true - bucket: git-lfs - connection: {} - # secret: - # key: - artifacts: - enabled: true - proxy_download: true - bucket: gitlab-artifacts - connection: {} - # secret: - # key: - uploads: - enabled: true - proxy_download: true - bucket: gitlab-uploads - connection: {} - # secret: - # key: - packages: - enabled: true - proxy_download: true - bucket: gitlab-packages - connection: {} - externalDiffs: - enabled: false - when: - proxy_download: true - bucket: gitlab-mr-diffs - connection: {} - terraformState: - enabled: false - bucket: gitlab-terraform-state - connection: {} - ciSecureFiles: - enabled: false - bucket: gitlab-ci-secure-files - connection: {} - dependencyProxy: - enabled: false - proxy_download: true - bucket: gitlab-dependency-proxy - connection: {} - - backups: - bucket: gitlab-backups - tmpBucket: tmp - - ## https://docs.gitlab.com/charts/charts/globals#outgoing-email - ## Microsoft Graph Mailer settings - microsoft_graph_mailer: - enabled: false - user_id: "" - tenant: "" - client_id: "" - client_secret: - secret: "" - key: secret - azure_ad_endpoint: "https://login.microsoftonline.com" - graph_endpoint: "https://graph.microsoft.com" - - ## https://docs.gitlab.com/charts/installation/command-line-options.html#incoming-email-configuration - ## https://docs.gitlab.com/charts/charts/gitlab/mailroom/index.html#incoming-email - incomingEmail: - enabled: false - address: "" - host: "imap.gmail.com" - port: 993 - ssl: true - startTls: false - user: "" - password: - secret: "" - key: password - deleteAfterDelivery: true - expungeDeleted: false - logger: - logPath: "/dev/stdout" - mailbox: inbox - idleTimeout: 60 - inboxMethod: "imap" - clientSecret: - key: secret - pollInterval: 60 - deliveryMethod: webhook - authToken: {} - # secret: - # key: - - ## https://docs.gitlab.com/charts/charts/gitlab/mailroom/index.html#service-desk-email - serviceDeskEmail: - enabled: false - address: "" - host: "imap.gmail.com" - port: 993 - ssl: true - startTls: false - user: "" - password: - secret: "" - key: password - deleteAfterDelivery: true - expungeDeleted: false - logger: - logPath: "/dev/stdout" - mailbox: inbox - idleTimeout: 60 - inboxMethod: "imap" - clientSecret: - key: secret - pollInterval: 60 - deliveryMethod: webhook - authToken: {} - # secret: - # key: - - ## https://docs.gitlab.com/charts/charts/globals#ldap - ldap: - # prevent the use of LDAP for sign-in via web. - preventSignin: false - servers: {} - ## See documentation for complete example of a configured LDAP server - - duoAuth: - enabled: false - # hostname: - # integrationKey: - # secretKey: - # secret: - # key: - - ## https://docs.gitlab.com/charts/charts/globals#kas-settings - gitlab_kas: {} - # secret: - # key: - # enabled: - # externalUrl: - # internalUrl: - - ## https://docs.gitlab.com/charts/charts/globals#suggested-reviewers-settings - suggested_reviewers: {} - # secret: - # key: - - ## https://docs.gitlab.com/charts/charts/globals#omniauth - omniauth: - enabled: false - autoSignInWithProvider: - syncProfileFromProvider: [] - syncProfileAttributes: [email] - allowSingleSignOn: [saml] - blockAutoCreatedUsers: true - autoLinkLdapUser: false - autoLinkSamlUser: false - autoLinkUser: [] - externalProviders: [] - allowBypassTwoFactor: [] - providers: [] - # - secret: gitlab-google-oauth2 - # key: provider - - ## https://docs.gitlab.com/charts/charts/globals#kerberos - kerberos: - enabled: false - keytab: - # secret: - key: keytab - servicePrincipalName: "" - krb5Config: "" - dedicatedPort: - enabled: false - port: 8443 - https: true - simpleLdapLinkingAllowedRealms: [] - - ## https://docs.gitlab.com/charts/charts/globals#configure-appconfig-settings - sentry: - enabled: false - dsn: - clientside_dsn: - environment: - - gitlab_docs: - enabled: false - host: "" - - smartcard: - enabled: false - CASecret: - clientCertificateRequiredHost: - sanExtensions: false - requiredForGitAccess: false - - sidekiq: - routingRules: [] - - # Config that only applies to the defaults on initial install - initialDefaults: {} - # signupEnabled: - ## End of global.appConfig - - oauth: - gitlab-pages: {} - # secret: - # appIdKey: - # appSecretKey: - # redirectUri: - # authScope: - - ## https://docs.gitlab.com/charts/advanced/geo/ - geo: - enabled: false - # Valid values: primary, secondary - role: primary - ## Geo Secondary only - # nodeName allows multiple instances behind a load balancer. - nodeName: # defaults to `gitlab.gitlab.host` - # ingressClass: - # PostgreSQL connection details only needed for `secondary` - psql: - password: {} - # secret: - # key: - # host: postgresql.hostedsomewhere.else - # port: 123 - # username: gitlab_replicator - # database: gitlabhq_geo_production - # ssl: - # secret: - # clientKey: - # clientCertificate: - # serverCA: - registry: - replication: - enabled: false - primaryApiUrl: - ## Consumes global.registry.notificationSecret - - ## https://docs.gitlab.com/charts/charts/gitlab/kas/ - kas: - enabled: true - service: - apiExternalPort: 8153 # port for connections from the GitLab backend - tls: - enabled: false - verify: true - # secretName: - # caSecretName: - - ## https://docs.gitlab.com/charts/charts/gitlab/spamcheck/ - spamcheck: - enabled: false - - ## https://docs.gitlab.com/charts/charts/globals#configure-gitlab-shell - shell: - authToken: {} - # secret: - # key: - hostKeys: {} - # secret: - ## https://docs.gitlab.com/charts/charts/globals#tcp-proxy-protocol - tcp: - proxyProtocol: false - - ## Rails application secrets - ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#gitlab-rails-secret - ## If allowing shared-secrets generation, this is OPTIONAL. - railsSecrets: {} - # secret: - - ## Rails generic setting, applicable to all Rails-based containers - rails: - bootsnap: # Enable / disable Shopify/Bootsnap cache - enabled: true - sessionStore: - sessionCookieTokenPrefix: "" - - ## https://docs.gitlab.com/charts/charts/globals#configure-registry-settings - registry: - bucket: registry - - certificate: {} - # secret: - httpSecret: {} - # secret: - # key: - notificationSecret: {} - # secret: - # key: - tls: - enabled: false - # secretName: - redis: - cache: - password: {} - rateLimiting: - password: {} - # https://docs.docker.com/registry/notifications/#configuration - notifications: {} - # endpoints: - # - name: FooListener - # url: https://foolistener.com/event - # timeout: 500ms - # threshold: 10 # DEPRECATED: use maxretries instead https://gitlab.com/gitlab-org/container-registry/-/issues/1243. - # maxretries: 5 - # backoff: 1s - # headers: - # FooBar: ['1', '2'] - # Authorization: - # secret: gitlab-registry-authorization-header - # SpecificPassword: - # secret: gitlab-registry-specific-password - # key: password - # events: {} - - # Settings utilized by other services referencing registry: - enabled: true - host: - # port: 443 - api: - protocol: http - serviceName: registry - port: 5000 - tokenIssuer: gitlab-issuer - - pages: - enabled: false - accessControl: false - path: - host: - port: - https: # default true - externalHttp: [] - externalHttps: [] - artifactsServer: true - localStore: - enabled: false - # path: /srv/gitlab/shared/pages - objectStore: - enabled: true - bucket: gitlab-pages - # proxy_download: true - connection: {} - # secret: - # key: - apiSecret: {} - # secret: - # key: - authSecret: {} - # secret: - # key: - namespaceInPath: false - - ## GitLab Runner - ## Secret created according to https://docs.gitlab.com/charts/installation/secrets#gitlab-runner-secret - ## If allowing shared-secrets generation, this is OPTIONAL. - runner: - registrationToken: {} - # secret: - - ## https://docs.gitlab.com/charts/charts/globals#outgoing-email - ## Outgoing email server settings - smtp: - enabled: false - address: smtp.mailgun.org - port: 2525 - user_name: "" - ## https://docs.gitlab.com/charts/installation/secrets#smtp-password - password: - secret: "" - key: password - # domain: - authentication: "plain" - starttls_auto: false - openssl_verify_mode: "peer" - open_timeout: 30 - read_timeout: 60 - pool: false - - ## https://docs.gitlab.com/charts/charts/globals#outgoing-email - ## Email persona used in email sent by GitLab - email: - from: "" - display_name: GitLab - reply_to: "" - subject_suffix: "" - smime: - enabled: false - secretName: "" - keyName: "tls.key" - certName: "tls.crt" - - ## Timezone for containers. - time_zone: UTC - - ## Global Service Annotations and Labels - service: - labels: {} - annotations: {} - - ## Global Deployment Annotations - deployment: - annotations: {} - - - # Setting a global nodeAffinity only applies to the registry chart for now. - # See issue https://gitlab.com/gitlab-com/gl-infra/production-engineering/-/issues/25403 for more information - - nodeAffinity: - - antiAffinity: soft - affinity: - podAntiAffinity: - topologyKey: "kubernetes.io/hostname" - nodeAffinity: - key: topology.kubernetes.io/zone - values: [] - - # Priority class assigned to pods, may be overridden for individual components - # https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ - priorityClassName: "" - - ## https://docs.gitlab.com/charts/charts/globals#configure-workhorse-settings - ## Global settings related to Workhorse - workhorse: - serviceName: webservice-default - # scheme: - # host: - # port: - ## https://docs.gitlab.com/charts/installation/secrets#gitlab-workhorse-secret - # secret: - # key: - tls: - enabled: false - - ## https://docs.gitlab.com/charts/charts/globals#configure-webservice - webservice: - workerTimeout: 60 - - ## https://docs.gitlab.com/charts/charts/globals#custom-certificate-authorities - # configuration of certificates container & custom CA injection - certificates: - image: - repository: registry.gitlab.com/gitlab-org/build/cng/certificates - # Default tag is `global.gitlabVersion` or `master` if the former one is undefined. - # tag: master - # pullPolicy: IfNotPresent - # pullSecrets: [] - customCAs: [] - # - secret: custom-CA - # - secret: more-custom-CAs - # keys: - # - custom-ca-1.crt - # - configMap: custom-CA-cm - # - configMap: more-custom-CAs-cm - # keys: - # - custom-ca-2.crt - # - custom-ca-3.crt - - ## kubectl image used by hooks to carry out specific jobs - kubectl: - image: - repository: registry.gitlab.com/gitlab-org/build/cng/kubectl - # Default tag is `global.gitlabVersion` or `master` if the former one is undefined. - # tag: master - # pullPolicy: IfNotPresent - # pullSecrets: [] - securityContext: - # in most base images, this is `nobody:nogroup` - runAsUser: 65534 - fsGroup: 65534 - seccompProfile: - type: "RuntimeDefault" - gitlabBase: - image: - repository: registry.gitlab.com/gitlab-org/build/cng/gitlab-base - # Default tag is `global.gitlabVersion` or `master` if the former one is undefined. - # Charts using this image as init container support further overrides with `init.image.tag`. - # tag: master - # pullPolicy: IfNotPresent - # pullSecrets: [] - - ## https://docs.gitlab.com/charts/charts/globals#service-accounts - serviceAccount: - enabled: false - create: true - annotations: {} - automountServiceAccountToken: false - ## Name to be used for serviceAccount, otherwise defaults to chart fullname - # name: - - ## https://docs.gitlab.com/charts/charts/globals/#tracing - tracing: - connection: - string: "" - urlTemplate: "" - - zoekt: - gateway: - basicAuth: {} - indexer: - internalApi: {} - - ## https://docs.gitlab.com/charts/charts/globals - extraEnv: {} - # SOME_KEY: some_value - # SOME_OTHER_KEY: some_other_value - - ## https://docs.gitlab.com/charts/charts/globals - extraEnvFrom: {} - # MY_NODE_NAME: - # fieldRef: - # fieldPath: spec.nodeName - # MY_CPU_REQUEST: - # resourceFieldRef: - # containerName: test-container - # resource: requests.cpu - # SECRET_THING: - # secretKeyRef: - # name: special-secret - # key: special_token - # # optional: boolean - # CONFIG_STRING: - # configMapKeyRef: - # name: useful-config - # key: some-string - # # optional: boolean - - ## https://docs.gitlab.com/charts/charts/globals/#jobs - job: - nameSuffixOverride: - - traefik: - apiVersion: "" # newer apiVersion: "traefik.io/v1alpha1" - -## End of global - -upgradeCheck: - enabled: true - image: {} - # repository: - # tag: - # pullPolicy: IfNotPresent - # pullSecrets: [] - securityContext: - # in alpine/debian/busybox based images, this is `nobody:nogroup` - runAsUser: 65534 - fsGroup: 65534 - seccompProfile: - type: "RuntimeDefault" - ## Allow to overwrite the specific security context under which the container is running. - containerSecurityContext: - runAsUser: 65534 - allowPrivilegeEscalation: false - runAsNonRoot: true - capabilities: - drop: [ "ALL" ] - tolerations: [] - annotations: {} - configMapAnnotations: {} - resources: - requests: - cpu: 50m - priorityClassName: "" - -## Settings to for the Let's Encrypt ACME Issuer -# certmanager-issuer: -# # The email address to register certificates requested from Let's Encrypt. -# # Required if using Let's Encrypt. -# email: email@example.com - -## Installation & configuration of jetstack/cert-manager -## See requirements.yaml for current version -certmanager: - installCRDs: true - nameOverride: certmanager - # Install cert-manager chart. Set to false if you already have cert-manager - # installed or if you are not using cert-manager. - install: true - # Other cert-manager configurations from upstream - # See https://github.com/jetstack/cert-manager/blob/master/deploy/charts/cert-manager/README#configuration - rbac: - create: true - -## https://docs.gitlab.com/charts/charts/nginx/ -## https://docs.gitlab.com/charts/architecture/decisions#nginx-ingress -## Installation & configuration of charts/ingress-nginx: -nginx-ingress: &nginx-ingress - enabled: true - tcpExternalConfig: "true" - controller: &nginx-ingress-controller - addHeaders: - Referrer-Policy: strict-origin-when-cross-origin - config: &nginx-ingress-controller-config - annotation-value-word-blocklist: "load_module,lua_package,_by_lua,location,root,proxy_pass,serviceaccount,{,},',\"" - hsts: "true" - hsts-include-subdomains: "false" - hsts-max-age: "63072000" - server-name-hash-bucket-size: "256" - use-http2: "true" - ssl-ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4" - ssl-protocols: "TLSv1.3 TLSv1.2" - server-tokens: "false" - # Configure smaller defaults for upstream-keepalive-*, see https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration - upstream-keepalive-connections: 100 # Limit of 100 held-open connections - upstream-keepalive-time: 30s # 30 second limit for connection reuse - upstream-keepalive-timeout: 5 # 5 second timeout to hold open idle connections - upstream-keepalive-requests: 1000 # 1000 requests per connection, before recycling - - service: - externalTrafficPolicy: "Local" - ingressClassByName: false - ingressClassResource: - name: '{{ include "ingress.class.name" $ | quote }}' - resources: - requests: - cpu: 100m - memory: 100Mi - publishService: - enabled: true - replicaCount: 2 - minAvailable: 1 - scope: - enabled: true - metrics: - enabled: true - service: - annotations: - gitlab.com/prometheus_scrape: "true" - gitlab.com/prometheus_port: "10254" - prometheus.io/scrape: "true" - prometheus.io/port: "10254" - admissionWebhooks: - enabled: false - defaultBackend: - resources: - requests: - cpu: 5m - memory: 5Mi - rbac: - create: true - # Needed for k8s 1.20 and 1.21 - # https://github.com/kubernetes/ingress-nginx/issues/7510 - # https://github.com/kubernetes/ingress-nginx/issues/7519 - scope: false - serviceAccount: - create: true - -# Ingress controller to handle requests forwarded from other Geo sites. -# Configuration differences compared to the main nginx ingress: -# - Pass X-Forwarded-For headers as is -# - Use a different IngressClass name -nginx-ingress-geo: - <<: *nginx-ingress - enabled: false - controller: - <<: *nginx-ingress-controller - config: - <<: *nginx-ingress-controller-config - # Pass incoming X-Forwarded-* headers to upstream. Required to handle requests - # from other Geo sites. - # https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#use-forwarded-headers - use-forwarded-headers: true - electionID: ingress-controller-leader-geo - ingressClassResource: - name: '{{ include "gitlab.geo.ingress.class.name" $ | quote }}' - controllerValue: 'k8s.io/nginx-ingress-geo' - # A pre-defined/static external IP can be configured with global.hosts.externalGeoIP. - externalIpTpl: '{{ .Values.global.hosts.externalGeoIP }}' - -haproxy: - install: false - controller: - service: - type: LoadBalancer - tcpPorts: - - name: ssh - port: 22 - targetPort: 22 - extraArgs: - - --configmap-tcp-services=$(POD_NAMESPACE)/$(POD_NAMESPACE)-haproxy-tcp - -## Installation & configuration of stable/prometheus -## See requirements.yaml for current version -prometheus: - install: true - rbac: - create: true - alertmanager: - enabled: false - alertmanagerFiles: - alertmanager.yml: {} - kubeStateMetrics: - enabled: false - nodeExporter: - enabled: false - pushgateway: - enabled: false - server: - retention: 15d - strategy: - type: Recreate - image: - tag: v2.38.0 - containerSecurityContext: - runAsUser: 1000 - allowPrivilegeEscalation: false - runAsNonRoot: true - capabilities: - drop: [ "ALL" ] - seccompProfile: - type: "RuntimeDefault" - podSecurityPolicy: - enabled: false - configmapReload: - prometheus: - containerSecurityContext: - runAsUser: 1000 - allowPrivilegeEscalation: false - runAsNonRoot: true - capabilities: - drop: [ "ALL" ] - seccompProfile: - type: "RuntimeDefault" - serverFiles: - prometheus.yml: - scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - job_name: kubernetes-apiservers - kubernetes_sd_configs: - - role: endpoints - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: - [ - __meta_kubernetes_namespace, - __meta_kubernetes_service_name, - __meta_kubernetes_endpoint_port_name, - ] - action: keep - regex: default;kubernetes;https - - job_name: kubernetes-pods - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - [__meta_kubernetes_pod_annotation_gitlab_com_prometheus_scrape] - action: keep - regex: true - - source_labels: - [__meta_kubernetes_pod_annotation_gitlab_com_prometheus_scheme] - action: replace - regex: (https?) - target_label: __scheme__ - - source_labels: - [__meta_kubernetes_pod_annotation_gitlab_com_prometheus_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: - [ - __address__, - __meta_kubernetes_pod_annotation_gitlab_com_prometheus_port, - ] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name - - job_name: kubernetes-service-endpoints - kubernetes_sd_configs: - - role: endpoints - relabel_configs: - - action: keep - regex: true - source_labels: - - __meta_kubernetes_service_annotation_gitlab_com_prometheus_scrape - - action: replace - regex: (https?) - source_labels: - - __meta_kubernetes_service_annotation_gitlab_com_prometheus_scheme - target_label: __scheme__ - - action: replace - regex: (.+) - source_labels: - - __meta_kubernetes_service_annotation_gitlab_com_prometheus_path - target_label: __metrics_path__ - - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - source_labels: - - __address__ - - __meta_kubernetes_service_annotation_gitlab_com_prometheus_port - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - action: replace - source_labels: - - __meta_kubernetes_namespace - target_label: kubernetes_namespace - - action: replace - source_labels: - - __meta_kubernetes_service_name - target_label: kubernetes_name - - action: replace - source_labels: - - __meta_kubernetes_pod_node_name - target_label: kubernetes_node - - job_name: kubernetes-services - metrics_path: /probe - params: - module: [http_2xx] - kubernetes_sd_configs: - - role: service - relabel_configs: - - source_labels: - [ - __meta_kubernetes_service_annotation_gitlab_com_prometheus_probe, - ] - action: keep - regex: true - - source_labels: [__address__] - target_label: __param_target - - target_label: __address__ - replacement: blackbox - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: kubernetes_name - -## Configuration of Redis -## https://docs.gitlab.com/charts/architecture/decisions#redis -## https://docs.gitlab.com/charts/installation/deployment.html#redis -redis: - install: true - image: - tag: "6.2.16-debian-12-r1" - auth: - existingSecret: gitlab-redis-secret - existingSecretKey: redis-password - usePasswordFiles: true - architecture: standalone - cluster: - enabled: false - metrics: - enabled: true - image: - tag: "1.46.0-debian-11-r8" - -## Installation & configuration of stable/postgresql -## See requirements.yaml for current version -postgresql: - install: true - auth: - ## These need to be set, for the sake of bitnami/postgresql upgrade patterns. - ## They are overridden by use of `existingSecret` - password: bogus-satisfy-upgrade - postgresPassword: bogus-satisfy-upgrade - ## - usePasswordFiles: false - existingSecret: '{{ include "gitlab.psql.password.secret" . }}' - secretKeys: - adminPasswordKey: postgresql-postgres-password - userPasswordKey: '{{ include "gitlab.psql.password.key" $ }}' - image: - tag: 14.8.0 - primary: - initdb: - scriptsConfigMap: '{{ include "gitlab.psql.initdbscripts" $}}' - extraVolumeMounts: - - name: custom-init-scripts - mountPath: /docker-entrypoint-preinitdb.d/init_revision.sh - subPath: init_revision.sh - podAnnotations: - postgresql.gitlab/init-revision: "1" - metrics: - enabled: true - service: - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9187" - gitlab.com/prometheus_scrape: "true" - gitlab.com/prometheus_port: "9187" - - ## Optionally define additional custom metrics - ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file - -## Installation & configuration charts/registry -## https://docs.gitlab.com/charts/architecture/decisions#registry -## https://docs.gitlab.com/charts/charts/registry/ -# registry: -# enabled: false - -## Automatic shared secret generation -## https://docs.gitlab.com/charts/installation/secrets -## https://docs.gitlab.com/charts/charts/shared-secrets.html -shared-secrets: - enabled: true - rbac: - create: true - selfsign: - image: - # pullPolicy: IfNotPresent - # pullSecrets: [] - repository: registry.gitlab.com/gitlab-org/build/cng/cfssl-self-sign - # Default tag is `master`, overridable by `global.gitlabVersion`. - # tag: master - keyAlgorithm: "rsa" - keySize: "4096" - expiry: "3650d" - caSubject: "GitLab Helm Chart" - env: production - serviceAccount: - enabled: true - create: true - name: # Specify a pre-existing ServiceAccount name - resources: - requests: - cpu: 50m - securityContext: - # in debian/alpine based images, this is `nobody:nogroup` - runAsUser: 65534 - fsGroup: 65534 - seccompProfile: - type: "RuntimeDefault" - containerSecurityContext: - allowPrivilegeEscalation: false - runAsNonRoot: true - capabilities: - drop: [ "ALL" ] - tolerations: [] - podLabels: {} - annotations: {} - -## Installation & configuration of gitlab/gitlab-runner -## See requirements.yaml for current version -gitlab-runner: - install: true - rbac: - create: true - runners: - locked: false - # Set secret to an arbitrary value because the runner chart renders the gitlab-runner.secret template only if it is not empty. - # The parent/GitLab chart overrides the template to render the actual secret name. - secret: "nonempty" - config: | - [[runners]] - [runners.kubernetes] - image = "ubuntu:22.04" - {{- if .Values.global.minio.enabled }} - [runners.cache] - Type = "s3" - Path = "gitlab-runner" - Shared = true - [runners.cache.s3] - ServerAddress = {{ include "gitlab-runner.cache-tpl.s3ServerAddress" . }} - BucketName = "runner-cache" - BucketLocation = "us-east-1" - Insecure = false - {{ end }} - podAnnotations: - gitlab.com/prometheus_scrape: "true" - gitlab.com/prometheus_port: 9252 - podSecurityContext: - seccompProfile: - type: "RuntimeDefault" - -traefik: - install: false - ports: - gitlab-shell: - expose: true - port: 2222 - exposedPort: 22 - -## Settings for individual sub-charts under GitLab -## Note: Many of these settings are configurable via globals -gitlab: - ## https://docs.gitlab.com/charts/charts/gitlab/toolbox - toolbox: - replicas: 1 - antiAffinityLabels: - matchLabels: - app: gitaly - ## https://docs.gitlab.com/charts/charts/gitlab/migrations - # migrations: - # enabled: false - ## https://docs.gitlab.com/charts/charts/gitlab/webservice - # webservice: - # enabled: false - ## https://docs.gitlab.com/charts/charts/gitlab/sidekiq - # sidekiq: - # enabled: false - ## https://docs.gitlab.com/charts/charts/gitlab/gitaly - # gitaly: - ## https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell - # gitlab-shell: - # enabled: false - ## https://docs.gitlab.com/charts/charts/gitlab/gitlab-pages - # gitlab-pages: - ## https://docs.gitlab.com/charts/charts/gitlab/kas - # kas: - ## https://docs.gitlab.com/charts/charts/gitlab/praefect - # praefect: - -## Installation & configuration of gitlab/gitlab-zoekt -gitlab-zoekt: - install: false - gateway: - basicAuth: - enabled: true - secretName: '{{ include "gitlab.zoekt.gateway.basicAuth.secretName" $ }}' - indexer: - internalApi: - enabled: true - secretName: '{{ include "gitlab.zoekt.indexer.internalApi.secretName" $ }}' - secretKey: '{{ include "gitlab.zoekt.indexer.internalApi.secretKey" $ }}' - gitlabUrl: '{{ include "gitlab.zoekt.indexer.internalApi.gitlabUrl" $ }}' diff --git a/k8s/helmfile.d/values/globals/.gitignore b/k8s/helmfile.d/values/globals/.gitignore deleted file mode 100644 index 03cdeec..0000000 --- a/k8s/helmfile.d/values/globals/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yaml diff --git a/k8s/helmfile.d/values/globals/staging.yaml.gotmpl b/k8s/helmfile.d/values/globals/staging.yaml.gotmpl deleted file mode 100644 index 7d7df2c..0000000 --- a/k8s/helmfile.d/values/globals/staging.yaml.gotmpl +++ /dev/null @@ -1,216 +0,0 @@ -{{ $email := "tonydu121@hotmail.com" }} -{{ $domain := "mnke.org" }} -{{ $subdomain := "dolo" }} -{{ $appDomain := print $subdomain "." $domain }} -# This should be an IP in the MetalLB range -{{ $primaryLoadBalancerIP := "10.0.185.128" }} -{{ $environment := "staging" }} -{{ $ingressClass := "traefik" }} -{{ $nfsStorageClass := "nfs-client" }} -{{ $longhornStorageClass := "longhorn" }} - -{{ - $ghostDatabase := dict - "database" "ghost" - "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/username" ) - "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/password" ) -}} -{{ - $mysqlDatabases := list - $ghostDatabase -}} - -{{ - $authentikDatabase := dict - "database" "authentik" - "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/username" ) - "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/password" ) -}} -{{ - $harborDatabase := dict - "database" "harborcore" - "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/username" ) - "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/password" ) -}} -{{ - $giteaDatabase := dict - "database" "gitea" - "username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/username" ) - "password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/password" ) -}} -{{ - $postgresDatabases := list - $authentikDatabase - $harborDatabase - $giteaDatabase -}} - -globals: - email: {{ $email }} - environment: {{ $environment }} - - certManager: - namespace: cert-manager - - traefik: - namespace: traefik - ingressClass: {{ $ingressClass }} - loadBalancerIP: {{ $primaryLoadBalancerIP }} - - certs: - acmeEmail: {{ $email }} - cloudflareEmail: {{ $email }} - certIssuerMode: {{ $environment }} - - cloudflareSecretToken: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#cloudflare/secretToken" }} - cloudflareTokenSecretName: cloudflare-token-secret - - issuerName: letsencrypt - privateKeySecretRef: letsencrypt - - hlMnkeOrg: - certificateName: {{ $subdomain }}.{{ $domain }} - certificateSecretName: {{ $subdomain }}.{{ $domain }}-tls - certificateNamespace: default - commonName: "{{ $appDomain }}" - dnsZones: - - "{{ $domain }}" - dnsNames: - - "{{ $appDomain }}" - - "*.{{ $appDomain }}" - - longhorn: - namespace: longhorn-system - storageClass: {{ $longhornStorageClass }} - - nfsSubdirExternalProvisioner: - namespace: nfs-subdir-external-provisioner - replicaCount: 1 - nfs: - server: truenas.local - path: /mnt/emc14t9/k8s-pv - storageClass: {{ $nfsStorageClass }} - accessModes: ReadWriteMany - - rancher: - namespace: cattle-system - ingressClass: {{ $ingressClass }} - hostname: rancher.{{ $appDomain }} - replicas: 3 - bootstrapPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#rancher/bootstrapPassword" }} - - uptimeKuma: - namespace: uptime-kuma - ingressClass: {{ $ingressClass }} - hosts: - - uptime.{{ $appDomain }} - storageClass: {{ $longhornStorageClass }} - - mysql: - namespace: db - storageClass: {{ $longhornStorageClass }} - username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/username" }} - password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/password" }} - rootPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/rootPassword" }} - databases: - {{ $mysqlDatabases | toYaml | nindent 4 }} - - postgres: - namespace: db - storageClass: {{ $longhornStorageClass }} - username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/username" }} - password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/password" }} - postgresPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/postgresPassword" }} - databases: - {{ $postgresDatabases | toYaml | nindent 4 }} - - phpmyadmin: - namespace: db - hostname: pma.{{ $appDomain }} - storageClass: {{ $longhornStorageClass }} - ingressClass: {{ $ingressClass }} - - pgadmin4: - namespace: db - hostname: pg.{{ $appDomain }} - ingressClass: {{ $ingressClass }} - storageClass: {{ $longhornStorageClass }} - storageSize: 2Gi - accessMode: ReadWriteOnce - # can be email or nickname - email: tony@mnke.org - password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#pgadmin4/password" }} - - redis: - namespace: redis - storageClass: {{ $longhornStorageClass }} - storageSize: 8Gi - accessMode: ReadWriteMany - password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#redis/password" }} - - ghost: - namespace: ghost - primaryHost: blog.mnke.org - storageClass: {{ $longhornStorageClass }} - ingressClass: {{ $ingressClass }} - ghostEmail: {{ $email }} - ghostPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/password" }} - mysql: - {{ $ghostDatabase | toYaml | nindent 6 }} - - authentik: - namespace: authentik - storageClass: {{ $longhornStorageClass }} - ingressClass: {{ $ingressClass }} - secretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/secretKey" }} - hostnames: - - auth.{{ $appDomain }} - - auth.{{ $domain }} - postgres: - {{ $authentikDatabase | toYaml | nindent 6 }} - - harbor: - namespace: harbor - hostname: harbor.{{ $appDomain }} - ingressClass: {{ $ingressClass }} - storageClass: {{ $nfsStorageClass }} - username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/username" }} - password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/password" }} - htpasswd: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/htpasswd" }} - registrySecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/registrySecret" }} - jobserviceSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/jobserviceSecret" }} - coreSecretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecretKey" }} - coreSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecret" }} - coreCsrfKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreCsrfKey" }} - coreTlsKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsKey" | quote }} - coreTlsCert: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsCert" | quote }} - - postgres: - {{ $harborDatabase | toYaml | nindent 6 }} - - kubePrometheusStack: - namespace: kube-prometheus-stack - ingressClass: {{ $ingressClass }} - storageClass: {{ $nfsStorageClass }} - thanosRuler: - storageSize: 4Gi - prometheus: - storageSize: 4Gi - grafana: - storageSize: 4Gi - adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#kubePrometheusStack/grafana/adminPassword" }} - hosts: - - gf.{{ $appDomain }} - - argocd: - namespace: argo-cd - ingressClass: {{ $ingressClass }} - storageClass: {{ $nfsStorageClass }} - hostname: argocd.{{ $appDomain }} - adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#argocd/adminPassword" }} - - gitea: - namespace: gitea - ingressClass: {{ $ingressClass }} - postgres: - {{ $giteaDatabase | toYaml | nindent 6 }} diff --git a/k8s/helmfile.d/values/harbor/values.yaml.gotmpl b/k8s/helmfile.d/values/harbor/values.yaml.gotmpl deleted file mode 100644 index a0e3988..0000000 --- a/k8s/helmfile.d/values/harbor/values.yaml.gotmpl +++ /dev/null @@ -1,3815 +0,0 @@ -# Copyright Broadcom, Inc. All Rights Reserved. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass -## - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) -## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - defaultStorageClass: {{ .Values.globals.harbor.storageClass }} - storageClass: "" - ## Security parameters - ## - security: - ## @param global.security.allowInsecureImages Allows skipping image verification - allowInsecureImages: false - ## Compatibility adaptations for Kubernetes platforms - ## - compatibility: - ## Compatibility adaptations for Openshift - ## - openshift: - ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) - ## - adaptSecurityContext: auto -## @section Common Parameters -## - -## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) -## -nameOverride: "" -## @param fullnameOverride String to fully override common.names.fullname template with a string -## -fullnameOverride: "" -## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) -## -kubeVersion: "" -## @param clusterDomain Kubernetes Cluster Domain -## -clusterDomain: cluster.local -## @param commonAnnotations Annotations to add to all deployed objects -## -commonAnnotations: {} -## @param commonLabels Labels to add to all deployed objects -## -commonLabels: {} -## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template). -## -extraDeploy: [] -## Enable diagnostic mode in the deployment(s)/statefulset(s) -## -diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the the deployment(s)/statefulset(s) - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the the deployment(s)/statefulset(s) - ## - args: - - infinity -## @section Harbor common parameters -## - -## @param adminPassword The initial password of Harbor admin. Change it from portal after launching Harbor -## -adminPassword: {{ .Values.globals.harbor.password }} -## @param externalURL The external URL for Harbor Core service -## It is used to -## 1) populate the docker/helm commands showed on portal -## -## Format: protocol://domain[:port]. Usually: -## 1) if "exposureType" is "ingress", the "domain" should be -## the value of "ingress.hostname" -## 2) if "exposureType" is "proxy" and "service.type" is "ClusterIP", -## the "domain" should be the value of "service.clusterIP" -## 3) if "exposureType" is "proxy" and "service.type" is "NodePort", -## the "domain" should be the IP address of k8s node -## 4) if "exposureType" is "proxy" and "service.type" is "LoadBalancer", -## the "domain" should be the LoadBalancer IP -## -externalURL: https://{{ .Values.globals.harbor.hostname }} -## Note: If Harbor is exposed via Ingress, the NGINX server will not be used -## @param proxy.httpProxy The URL of the HTTP proxy server -## @param proxy.httpsProxy The URL of the HTTPS proxy server -## @param proxy.noProxy The URLs that the proxy settings not apply to -## @param proxy.components The component list that the proxy settings apply to -## -proxy: - httpProxy: "" - httpsProxy: "" - noProxy: 127.0.0.1,localhost,.local,.internal - components: - - core - - jobservice - - trivy -## @param logLevel The log level used for Harbor services. Allowed values are [ fatal \| error \| warn \| info \| debug \| trace ] -## -logLevel: debug -## TLS settings -## Note: TLS cert files need to provided in each components in advance. -## -internalTLS: - ## @param internalTLS.enabled Use TLS in all the supported containers: core, jobservice, portal, registry and trivy - ## - enabled: false - ## @param internalTLS.caBundleSecret Name of an existing secret with a custom CA that will be injected into the trust store for core, jobservice, registry, trivy components - ## The secret must contain the key "ca.crt" - ## - caBundleSecret: "" -## IP family parameters -## -ipFamily: - ## @param ipFamily.ipv6.enabled Enable listening on IPv6 ([::]) for NGINX-based components (NGINX,portal) - ## Note: enabling IPv6 will cause NGINX to crash on start on systems with IPv6 disabled (`ipv6.disable` kernel flag) - ## - ipv6: - enabled: false - ## @param ipFamily.ipv4.enabled Enable listening on IPv4 for NGINX-based components (NGINX,portal) - ## - ipv4: - enabled: true -## @section Traffic Exposure Parameters -## - -## @param exposureType The way to expose Harbor. Allowed values are [ ingress \| proxy ] -## Use "proxy" to use a deploy NGINX proxy in front of Harbor services -## Use "ingress" to use an Ingress Controller as proxy -## -exposureType: ingress -## Service parameters -## -service: - ## @param service.type NGINX proxy service type - ## - type: ClusterIP - ## @param service.ports.http NGINX proxy service HTTP port - ## @param service.ports.https NGINX proxy service HTTPS port - ## - ports: - http: 80 - https: 443 - ## Node ports to expose - ## @param service.nodePorts.http Node port for HTTP - ## @param service.nodePorts.https Node port for HTTPS - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - https: "" - ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin - ## Values: ClientIP or None - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ - ## - sessionAffinity: None - ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## @param service.clusterIP NGINX proxy service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param service.loadBalancerIP NGINX proxy service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param service.loadBalancerSourceRanges NGINX proxy service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param service.externalTrafficPolicy NGINX proxy service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param service.annotations Additional custom annotations for NGINX proxy service - ## - annotations: {} - ## @param service.extraPorts Extra port to expose on NGINX proxy service - ## - extraPorts: [] -ingress: - ## Configure the ingress resource that allows you to access Harbor Core - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ## - core: - ## @param ingress.core.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: {{ .Values.globals.harbor.ingressClass }} - ## @param ingress.core.pathType Ingress path type - ## - pathType: ImplementationSpecific - ## @param ingress.core.apiVersion Force Ingress API version (automatically detected if not set) - ## - apiVersion: "" - ## @param ingress.core.controller The ingress controller type. Currently supports `default`, `gce` and `ncp` - ## leave as `default` for most ingress controllers. - ## set to `gce` if using the GCE ingress controller - ## set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller - ## - controller: default - ## @param ingress.core.hostname Default host for the ingress record - ## - hostname: {{ .Values.globals.harbor.hostname }} - ## @param ingress.core.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. - ## Use this parameter to set the required annotations for cert-manager, see - ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## e.g: - ## annotations: - ## kubernetes.io/ingress.class: nginx - ## cert-manager.io/cluster-issuer: cluster-issuer-name - ## - annotations: - cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }} - kubernetes.io/ingress.class: {{ .Values.globals.harbor.ingressClass }} - ## @param ingress.core.tls Enable TLS configuration for the host defined at `ingress.core.hostname` parameter - ## TLS certificates will be retrieved from a TLS secret with name: - ## You can: - ## - Use the `ingress.core.secrets` parameter to create this TLS secret - ## - Rely on cert-manager to create it by setting the corresponding annotations - ## - Rely on Helm to create self-signed certificates by setting `ingress.core.selfSigned=true` - ## - tls: true - ## @param ingress.core.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm - ## - selfSigned: false - ## @param ingress.core.extraHosts An array with additional hostname(s) to be covered with the ingress record - ## e.g: - ## extraHosts: - ## - name: core.harbor.domain - ## path: / - ## - extraHosts: [] - ## @param ingress.core.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host - ## e.g: - ## extraPaths: - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - extraPaths: [] - ## @param ingress.core.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - ## e.g: - ## extraTls: - ## - hosts: - ## - core.harbor.domain - ## secretName: core.harbor.domain-tls - ## - extraTls: [] - ## @param ingress.core.secrets Custom TLS certificates as secrets - ## NOTE: 'key' and 'certificate' are expected in PEM format - ## NOTE: 'name' should line up with a 'secretName' set further up - ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates - ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days - ## It is also possible to create and manage the certificates outside of this helm chart - ## Please see README.md for more information - ## e.g: - ## secrets: - ## - name: core.harbor.domain-tls - ## key: |- - ## REDACTED - ## ... - ## REDACTED - ## certificate: |- - ## -----BEGIN CERTIFICATE----- - ## ... - ## -----END CERTIFICATE----- - ## - secrets: [] - ## @param ingress.core.extraRules Additional rules to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules - ## e.g: - ## extraRules: - ## - host: example.local - ## http: - ## path: / - ## backend: - ## service: - ## name: example-svc - ## port: - ## name: http - ## - extraRules: [] - ## -## @section Persistence Parameters -## - -## The persistence is enabled by default and a default StorageClass -## is needed in the k8s cluster to provision volumes dynamically. -## Specify another StorageClass in the "storageClass" or set "existingClaim" -## if you have already existing persistent volumes to use -## -## For storing images and charts, you can also use "azure", "gcs", "s3", -## "swift" or "oss". Set it in the "imageChartStorage" section -## -persistence: - ## @param persistence.enabled Enable the data persistence or not - ## - enabled: true - ## Resource Policy - ## @param persistence.resourcePolicy Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted - ## - resourcePolicy: "keep" - persistentVolumeClaim: - ## @param persistence.persistentVolumeClaim.registry.existingClaim Name of an existing PVC to use - ## @param persistence.persistentVolumeClaim.registry.storageClass PVC Storage Class for Harbor Registry data volume - ## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning - ## @param persistence.persistentVolumeClaim.registry.subPath The sub path used in the volume - ## @param persistence.persistentVolumeClaim.registry.accessModes The access mode of the volume - ## @param persistence.persistentVolumeClaim.registry.size The size of the volume - ## @param persistence.persistentVolumeClaim.registry.annotations Annotations for the PVC - ## @param persistence.persistentVolumeClaim.registry.selector Selector to match an existing Persistent Volume - ## - registry: - existingClaim: "" - storageClass: "" - subPath: "" - accessModes: - - ReadWriteOnce - size: 5Gi - annotations: {} - selector: {} - ## @param persistence.persistentVolumeClaim.jobservice.existingClaim Name of an existing PVC to use - ## @param persistence.persistentVolumeClaim.jobservice.storageClass PVC Storage Class for Harbor Jobservice data volume - ## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning - ## @param persistence.persistentVolumeClaim.jobservice.subPath The sub path used in the volume - ## @param persistence.persistentVolumeClaim.jobservice.accessModes The access mode of the volume - ## @param persistence.persistentVolumeClaim.jobservice.size The size of the volume - ## @param persistence.persistentVolumeClaim.jobservice.annotations Annotations for the PVC - ## @param persistence.persistentVolumeClaim.jobservice.selector Selector to match an existing Persistent Volume - ## - jobservice: - existingClaim: "" - storageClass: "" - subPath: "" - accessModes: - - ReadWriteOnce - size: 1Gi - annotations: {} - selector: {} - ## @param persistence.persistentVolumeClaim.trivy.storageClass PVC Storage Class for Trivy data volume - ## Note: The default StorageClass will be used if not defined. Set it to `-` to disable dynamic provisioning - ## @param persistence.persistentVolumeClaim.trivy.accessModes The access mode of the volume - ## @param persistence.persistentVolumeClaim.trivy.size The size of the volume - ## @param persistence.persistentVolumeClaim.trivy.annotations Annotations for the PVC - ## @param persistence.persistentVolumeClaim.trivy.selector Selector to match an existing Persistent Volume - ## - trivy: - storageClass: "" - accessModes: - - ReadWriteOnce - size: 5Gi - annotations: {} - selector: {} - ## Define which storage backend is used for registry to store - ## images and charts. - ## ref: https://github.com/docker/distribution/blob/master/docs/configuration.md#storage - ## - imageChartStorage: - ## @param persistence.imageChartStorage.caBundleSecret Specify the `caBundleSecret` if the storage service uses a self-signed certificate. The secret must contain keys named `ca.crt` which will be injected into the trust store of registry's containers. - ## - caBundleSecret: "" - ## @param persistence.imageChartStorage.disableredirect The configuration for managing redirects from content backends. For backends which do not supported it (such as using MinIO® for `s3` storage type), please set it to `true` to disable redirects. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect) for more information about the detail - ## - disableredirect: false - ## @param persistence.imageChartStorage.type The type of storage for images and charts: `filesystem`, `azure`, `gcs`, `s3`, `swift` or `oss`. The type must be `filesystem` if you want to use persistent volumes for registry. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) for more information about the detail - ## - type: filesystem - ## Images/charts storage parameters when type is "filesystem" - ## @param persistence.imageChartStorage.filesystem.rootdirectory Filesystem storage type setting: Storage root directory - ## @param persistence.imageChartStorage.filesystem.maxthreads Filesystem storage type setting: Maximum threads directory - ## - filesystem: - rootdirectory: /storage - maxthreads: "" - ## Images/charts storage parameters when type is "azure" - ## @param persistence.imageChartStorage.azure.accountname Azure storage type setting: Name of the Azure account - ## @param persistence.imageChartStorage.azure.accountkey Azure storage type setting: Key of the Azure account - ## @param persistence.imageChartStorage.azure.container Azure storage type setting: Container - ## @param persistence.imageChartStorage.azure.storagePrefix Azure storage type setting: Storage prefix - ## @param persistence.imageChartStorage.azure.realm Azure storage type setting: Realm of the Azure account - ## - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - storagePrefix: /azure/harbor/charts - ## Example realm - ## realm: core.windows.net - ## - realm: "" - ## Images/charts storage parameters when type is "gcs" - ## @param persistence.imageChartStorage.gcs.bucket GCS storage type setting: Bucket name - ## @param persistence.imageChartStorage.gcs.encodedkey GCS storage type setting: Base64 encoded key - ## @param persistence.imageChartStorage.gcs.rootdirectory GCS storage type setting: Root directory name - ## @param persistence.imageChartStorage.gcs.chunksize GCS storage type setting: Chunk size name - ## - gcs: - bucket: bucketname - ## The base64 encoded json file which contains the gcs key (file's content) - ## - encodedkey: "" - rootdirectory: "" - chunksize: "" - ## Images/charts storage parameters when type is "s3" - ## ref: https://docs.docker.com/registry/storage-drivers/s3/ - ## @param persistence.imageChartStorage.s3.region S3 storage type setting: Region - ## @param persistence.imageChartStorage.s3.bucket S3 storage type setting: Bucket name - ## @param persistence.imageChartStorage.s3.accesskey S3 storage type setting: Access key name - ## @param persistence.imageChartStorage.s3.secretkey S3 storage type setting: Secret Key name - ## @param persistence.imageChartStorage.s3.regionendpoint S3 storage type setting: Region Endpoint - ## @param persistence.imageChartStorage.s3.encrypt S3 storage type setting: Encrypt - ## @param persistence.imageChartStorage.s3.keyid S3 storage type setting: Key ID - ## @param persistence.imageChartStorage.s3.secure S3 storage type setting: Secure - ## @param persistence.imageChartStorage.s3.skipverify S3 storage type setting: TLS skip verification - ## @param persistence.imageChartStorage.s3.v4auth S3 storage type setting: V4 authorization - ## @param persistence.imageChartStorage.s3.chunksize S3 storage type setting: V4 authorization - ## @param persistence.imageChartStorage.s3.rootdirectory S3 storage type setting: Root directory name - ## @param persistence.imageChartStorage.s3.storageClass S3 storage type setting: Storage class - ## @param persistence.imageChartStorage.s3.sse S3 storage type setting: SSE name - ## @param persistence.imageChartStorage.s3.multipartcopythresholdsize S3 storage type setting: Threshold size for multipart copy - ## - s3: - region: us-west-1 - bucket: bucketname - accesskey: "" - secretkey: "" - regionendpoint: "" - encrypt: "" - keyid: "" - secure: "" - skipverify: "" - v4auth: "" - chunksize: "" - rootdirectory: "" - storageClass: "" - sse: "" - multipartcopythresholdsize: "" - ## Images/charts storage parameters when type is "swift" - ## @param persistence.imageChartStorage.swift.authurl Swift storage type setting: Authentication url - ## @param persistence.imageChartStorage.swift.username Swift storage type setting: Authentication url - ## @param persistence.imageChartStorage.swift.password Swift storage type setting: Password - ## @param persistence.imageChartStorage.swift.container Swift storage type setting: Container - ## @param persistence.imageChartStorage.swift.region Swift storage type setting: Region - ## @param persistence.imageChartStorage.swift.tenant Swift storage type setting: Tenant - ## @param persistence.imageChartStorage.swift.tenantid Swift storage type setting: TenantID - ## @param persistence.imageChartStorage.swift.domain Swift storage type setting: Domain - ## @param persistence.imageChartStorage.swift.domainid Swift storage type setting: DomainID - ## @param persistence.imageChartStorage.swift.trustid Swift storage type setting: TrustID - ## @param persistence.imageChartStorage.swift.insecureskipverify Swift storage type setting: Verification - ## @param persistence.imageChartStorage.swift.chunksize Swift storage type setting: Chunk - ## @param persistence.imageChartStorage.swift.prefix Swift storage type setting: Prefix - ## @param persistence.imageChartStorage.swift.secretkey Swift storage type setting: Secre Key - ## @param persistence.imageChartStorage.swift.accesskey Swift storage type setting: Access Key - ## @param persistence.imageChartStorage.swift.authversion Swift storage type setting: Auth - ## @param persistence.imageChartStorage.swift.endpointtype Swift storage type setting: Endpoint - ## @param persistence.imageChartStorage.swift.tempurlcontainerkey Swift storage type setting: Temp URL container key - ## @param persistence.imageChartStorage.swift.tempurlmethods Swift storage type setting: Temp URL methods - ## - swift: - authurl: https://storage.myprovider.com/v3/auth - username: "" - password: "" - container: "" - region: "" - tenant: "" - tenantid: "" - domain: "" - domainid: "" - trustid: "" - insecureskipverify: "" - chunksize: "" - prefix: "" - secretkey: "" - accesskey: "" - authversion: "" - endpointtype: "" - tempurlcontainerkey: "" - tempurlmethods: "" - ## Images/charts storage parameters when type is "oss" - ## @param persistence.imageChartStorage.oss.accesskeyid OSS storage type setting: Access key ID - ## @param persistence.imageChartStorage.oss.accesskeysecret OSS storage type setting: Access key secret name containing the token - ## @param persistence.imageChartStorage.oss.region OSS storage type setting: Region name - ## @param persistence.imageChartStorage.oss.bucket OSS storage type setting: Bucket name - ## @param persistence.imageChartStorage.oss.endpoint OSS storage type setting: Endpoint - ## @param persistence.imageChartStorage.oss.internal OSS storage type setting: Internal - ## @param persistence.imageChartStorage.oss.encrypt OSS storage type setting: Encrypt - ## @param persistence.imageChartStorage.oss.secure OSS storage type setting: Secure - ## @param persistence.imageChartStorage.oss.chunksize OSS storage type setting: Chunk - ## @param persistence.imageChartStorage.oss.rootdirectory OSS storage type setting: Directory - ## @param persistence.imageChartStorage.oss.secretkey OSS storage type setting: Secret key - ## - oss: - accesskeyid: "" - accesskeysecret: "" - region: "" - bucket: "" - endpoint: "" - internal: "" - encrypt: "" - secure: "" - chunksize: "" - rootdirectory: "" - secretkey: "" -## @section Tracing parameters -## - -## Tracing parameters: -## tracing: Configure tracing for Harbor, only one of tracing.jeager.enabled and tracing.otel.enabled should be set -## -tracing: - ## @param tracing.enabled Enable tracing - ## - enabled: false - ## @param tracing.sampleRate Tracing sample rate from 0 to 1 - ## - sampleRate: 1 - ## @param tracing.namespace Used to differentiate traces between different harbor services - ## - namespace: "" - ## @param tracing.attributes A key value dict containing user defined attributes used to initialize the trace provider - ## e.g: - ## attributes: - ## application: harbor - ## - attributes: {} - ## @extra tracing.jaeger Configuration for exporting to jaeger. If using jaeger collector mode, use endpoint, username and password. If using jaeger agent mode, use agentHostname and agentPort. - ## e.g: - ## jaeger: - ## enabled: true - ## endpoint: http://hostname:14268/api/traces - ## username: "jaeger-username" - ## password: "jaeger-password" - ## @param tracing.jaeger.enabled Enable jaeger export - ## @param tracing.jaeger.endpoint Jaeger endpoint - ## @param tracing.jaeger.username Jaeger username - ## @param tracing.jaeger.password Jaeger password - ## @param tracing.jaeger.agentHost Jaeger agent hostname - ## @param tracing.jaeger.agentPort Jaeger agent port - ## - jaeger: - enabled: false - endpoint: "" - username: "" - password: "" - agentHost: "" - agentPort: "" - ## @extra tracing.otel Configuration for exporting to an otel endpoint - ## @param tracing.otel.enabled Enable otel export - ## @param tracing.otel.endpoint The hostname and port for an otel compatible backend - ## @param tracing.otel.urlpath Url path of otel endpoint - ## @param tracing.otel.compression Enable data compression - ## @param tracing.otel.timeout The timeout for data transfer - ## @param tracing.otel.insecure Ignore cert verification for otel backend - ## - otel: - enabled: false - endpoint: "hostname:4318" - urlpath: "/v1/traces" - compression: false - timeout: 10s - insecure: true -## @section Volume Permissions parameters -## - -## Init containers parameters: -## certificateVolume: Copy /etc/ssl/certs to a volume so that they can be updated when a read-only volume is in use. -## -certificateVolume: - ## Init container resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param certificateVolume.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if certificateVolume.resources is set (certificateVolume.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param certificateVolume.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} -## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node -## -volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume - ## - enabled: false - ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry - ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository - ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy - ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 12-debian-12-r35 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Init container' Security Context - ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser - ## and not the below volumePermissions.containerSecurityContext.runAsUser - ## @param volumePermissions.containerSecurityContext.enabled Enable init container Security Context - ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 0 -## @section NGINX Parameters -## -nginx: - ## Bitnami NGINX image - ## ref: https://hub.docker.com/r/bitnami/nginx/tags/ - ## @param nginx.image.registry [default: REGISTRY_NAME] NGINX image registry - ## @param nginx.image.repository [default: REPOSITORY_NAME/nginx] NGINX image repository - ## @skip nginx.image.tag NGINX image tag (immutable tags are recommended) - ## @param nginx.image.digest NGINX image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param nginx.image.pullPolicy NGINX image pull policy - ## @param nginx.image.pullSecrets NGINX image pull secrets - ## @param nginx.image.debug Enable NGINX image debug mode - ## - image: - registry: docker.io - repository: bitnami/nginx - tag: 1.27.3-debian-12-r5 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## TLS parameters - ## - tls: - ## @param nginx.tls.enabled Enable TLS termination - ## - enabled: true - ## @param nginx.tls.existingSecret Existing secret name containing your own TLS certificates. - ## The secret must contain the keys: - ## `tls.crt` - the certificate (required), - ## `tls.key` - the private key (required), - ## `ca.crt` - CA certificate (optional) - ## Self-signed TLS certificates will be used otherwise. - ## - existingSecret: "" - ## @param nginx.tls.commonName The common name used to generate the self-signed TLS certificates - ## - commonName: core.harbor.domain - ## @param nginx.behindReverseProxy If NGINX is behind another reverse proxy, set to true - ## if the reverse proxy already provides the 'X-Forwarded-Proto' header field. - ## This is, for example, the case for the OpenShift HAProxy router. - ## - behindReverseProxy: false - ## @param nginx.command Override default container command (useful when using custom images) - ## - command: [] - ## @param nginx.args Override default container args (useful when using custom images) - ## - args: [] - ## @param nginx.extraEnvVars Array with extra environment variables to add NGINX pods - ## - extraEnvVars: [] - ## @param nginx.extraEnvVarsCM ConfigMap containing extra environment variables for NGINX pods - ## - extraEnvVarsCM: "" - ## @param nginx.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for NGINX pods - ## - extraEnvVarsSecret: "" - ## @param nginx.containerPorts.http NGINX HTTP container port - ## @param nginx.containerPorts.https NGINX HTTPS container port - ## - containerPorts: - http: 8080 - https: 8443 - ## @param nginx.replicaCount Number of NGINX replicas - ## - replicaCount: 1 - ## Configure extra options for NGINX containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param nginx.livenessProbe.enabled Enable livenessProbe on NGINX containers - ## @param nginx.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param nginx.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param nginx.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param nginx.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param nginx.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param nginx.readinessProbe.enabled Enable readinessProbe on NGINX containers - ## @param nginx.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param nginx.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param nginx.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param nginx.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param nginx.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param nginx.startupProbe.enabled Enable startupProbe on NGINX containers - ## @param nginx.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param nginx.startupProbe.periodSeconds Period seconds for startupProbe - ## @param nginx.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param nginx.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param nginx.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param nginx.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param nginx.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param nginx.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## NGINX resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if nginx.resources is set (nginx.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param nginx.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure NGINX pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param nginx.podSecurityContext.enabled Enabled NGINX pods' Security Context - ## @param nginx.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param nginx.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param nginx.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param nginx.podSecurityContext.fsGroup Set NGINX pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure NGINX containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param nginx.containerSecurityContext.enabled Enabled containers' Security Context - ## @param nginx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param nginx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param nginx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param nginx.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param nginx.containerSecurityContext.privileged Set container's Security Context privileged - ## @param nginx.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param nginx.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param nginx.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param nginx.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param nginx.updateStrategy.type NGINX deployment strategy type - only really applicable for deployments with RWO PVs attached - ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the - ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will - ## terminate the single previous pod, so that the new, incoming pod can attach to the PV - ## - updateStrategy: - type: RollingUpdate - ## @param nginx.lifecycleHooks LifecycleHook for the NGINX container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param nginx.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## Harbor Nginx ServiceAccount configuration - ## - serviceAccount: - ## @param nginx.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param nginx.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param nginx.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## - automountServiceAccountToken: false - ## @param nginx.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## @param nginx.hostAliases NGINX pods host aliases - ## - hostAliases: [] - ## @param nginx.podLabels Add additional labels to the NGINX pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param nginx.podAnnotations Annotations to add to the NGINX pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param nginx.podAffinityPreset NGINX Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param nginx.podAntiAffinityPreset NGINX Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## Allowed values: soft, hard - ## - nodeAffinityPreset: - ## @param nginx.nodeAffinityPreset.type NGINX Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param nginx.nodeAffinityPreset.key NGINX Node label key to match Ignored if `affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param nginx.nodeAffinityPreset.values NGINX Node label values to match. Ignored if `affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param nginx.affinity NGINX Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param nginx.nodeSelector NGINX Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param nginx.tolerations NGINX Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param nginx.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param nginx.priorityClassName Priority Class Name - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param nginx.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param nginx.sidecars Add additional sidecar containers to the NGINX pods - ## Example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param nginx.initContainers Add additional init containers to the NGINX pods - ## Example: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param nginx.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param nginx.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param nginx.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `nginx.pdb.minAvailable` and `nginx.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param nginx.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the NGINX pods - ## - extraVolumeMounts: [] - ## @param nginx.extraVolumes Optionally specify extra list of additional volumes for the NGINX pods - ## - extraVolumes: [] - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param nginx.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param nginx.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param nginx.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param nginx.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param nginx.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param nginx.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param nginx.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} -## @section Harbor Portal Parameters -## -portal: - ## Bitnami Harbor Portal image - ## ref: https://hub.docker.com/r/bitnami/harbor-portal/tags/ - ## @param portal.image.registry [default: REGISTRY_NAME] Harbor Portal image registry - ## @param portal.image.repository [default: REPOSITORY_NAME/harbor-portal] Harbor Portal image repository - ## @skip portal.image.tag Harbor Portal image tag (immutable tags are recommended) - ## @param portal.image.digest Harbor Portal image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param portal.image.pullPolicy Harbor Portal image pull policy - ## @param portal.image.pullSecrets Harbor Portal image pull secrets - ## @param portal.image.debug Enable Harbor Portal image debug mode - ## - image: - registry: docker.io - repository: bitnami/harbor-portal - tag: 2.12.2-debian-12-r0 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## Use TLS in the container - ## - tls: - ## @param portal.tls.existingSecret Name of an existing secret with the certificates for internal TLS access - ## Requires `internalTLS.enabled` to be set to `true`` - ## Self-signed TLS certificates will be used otherwise - ## - existingSecret: "" - ## @param portal.command Override default container command (useful when using custom images) - ## - command: [] - ## @param portal.args Override default container args (useful when using custom images) - ## - args: [] - ## @param portal.extraEnvVars Array with extra environment variables to add Harbor Portal pods - ## - extraEnvVars: [] - ## @param portal.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Portal pods - ## - extraEnvVarsCM: "" - ## @param portal.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Portal pods - ## - extraEnvVarsSecret: "" - ## @param portal.containerPorts.http Harbor Portal HTTP container port - ## @param portal.containerPorts.https Harbor Portal HTTPS container port - ## - containerPorts: - http: 8080 - https: 8443 - ## @param portal.replicaCount Number of Harbor Portal replicas - ## - replicaCount: 1 - ## Configure extra options for Harbor Portal containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param portal.livenessProbe.enabled Enable livenessProbe on Harbor Portal containers - ## @param portal.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param portal.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param portal.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param portal.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param portal.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param portal.readinessProbe.enabled Enable readinessProbe on Harbor Portal containers - ## @param portal.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param portal.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param portal.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param portal.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param portal.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param portal.startupProbe.enabled Enable startupProbe on Harbor Portal containers - ## @param portal.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param portal.startupProbe.periodSeconds Period seconds for startupProbe - ## @param portal.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param portal.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param portal.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param portal.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param portal.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param portal.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## Harbor Portal resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param portal.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if portal.resources is set (portal.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param portal.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Harbor Portal pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param portal.podSecurityContext.enabled Enabled Harbor Portal pods' Security Context - ## @param portal.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param portal.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param portal.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param portal.podSecurityContext.fsGroup Set Harbor Portal pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Harbor Portal containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param portal.containerSecurityContext.enabled Enabled containers' Security Context - ## @param portal.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param portal.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param portal.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param portal.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param portal.containerSecurityContext.privileged Set container's Security Context privileged - ## @param portal.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param portal.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param portal.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param portal.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param portal.updateStrategy.type Harbor Portal deployment strategy type - only really applicable for deployments with RWO PVs attached - ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the - ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will - ## terminate the single previous pod, so that the new, incoming pod can attach to the PV - ## - updateStrategy: - type: RollingUpdate - ## @param portal.lifecycleHooks LifecycleHook for the Harbor Portal container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param portal.hostAliases Harbor Portal pods host aliases - ## - hostAliases: [] - ## @param portal.podLabels Add additional labels to the Harbor Portal pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param portal.podAnnotations Annotations to add to the Harbor Portal pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param portal.podAffinityPreset Harbor Portal Pod affinity preset. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param portal.podAntiAffinityPreset Harbor Portal Pod anti-affinity preset. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param portal.nodeAffinityPreset.type Harbor Portal Node affinity preset type. Ignored if `portal.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param portal.nodeAffinityPreset.key Harbor Portal Node label key to match Ignored if `portal.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param portal.nodeAffinityPreset.values Harbor Portal Node label values to match. Ignored if `portal.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param portal.affinity Harbor Portal Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: portal.podAffinityPreset, portal.podAntiAffinityPreset, and portal.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param portal.nodeSelector Harbor Portal Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param portal.tolerations Harbor Portal Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param portal.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param portal.priorityClassName Priority Class Name - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param portal.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param portal.sidecars Add additional sidecar containers to the Harbor Portal pods - ## Example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param portal.initContainers Add additional init containers to the Harbor Portal pods - ## Example: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param portal.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param portal.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param portal.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `portal.pdb.minAvailable` and `portal.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param portal.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Portal pods - ## - extraVolumeMounts: [] - ## @param portal.extraVolumes Optionally specify extra list of additional volumes for the Harbor Portal pods - ## - extraVolumes: [] - ## @param portal.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## Harbor Portal ServiceAccount configuration - ## - serviceAccount: - ## @param portal.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param portal.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param portal.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## - automountServiceAccountToken: false - ## @param portal.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Harbor Portal service configuration - ## - service: - ## @param portal.service.ports.http Harbor Portal HTTP service port - ## @param portal.service.ports.https Harbor Portal HTTPS service port - ## - ports: - http: 80 - https: 443 - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param portal.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param portal.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param portal.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param portal.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param portal.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param portal.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param portal.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} -## @section Harbor Core Parameters -## -core: - ## Bitnami Harbor Core image - ## ref: https://hub.docker.com/r/bitnami/harbor-core/tags/ - ## @param core.image.registry [default: REGISTRY_NAME] Harbor Core image registry - ## @param core.image.repository [default: REPOSITORY_NAME/harbor-core] Harbor Core image repository - ## @skip core.image.tag Harbor Core image tag (immutable tags are recommended) - ## @param core.image.digest Harbor Core image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param core.image.pullPolicy Harbor Core image pull policy - ## @param core.image.pullSecrets Harbor Core image pull secrets - ## @param core.image.debug Enable Harbor Core image debug mode - ## - image: - registry: docker.io - repository: bitnami/harbor-core - tag: 2.12.2-debian-12-r1 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param core.sessionLifetime Explicitly set a session timeout (in seconds) overriding the backend default. - ## - sessionLifetime: "" - ## @param core.uaaSecret If using external UAA auth which has a self signed cert, you can provide a pre-created secret containing it under the key `ca.crt`. - ## - uaaSecret: "" - ## @param core.secretKey The key used for encryption. Must be a string of 16 chars - ## e.g: - ## secretKey: "not-a-secure-string" - ## - secretKey: {{ .Values.globals.harbor.coreSecretKey }} - ## @param core.secret Secret used when the core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. - ## - secret: {{ .Values.globals.harbor.coreSecret }} - ## @param core.tokenKey Key of the certificate used for token encryption/decryption. - ## - tokenKey: {{ .Values.globals.harbor.coreTlsKey | quote }} - ## @param core.tokenCert Certificate used for token encryption/decryption. - ## - tokenCert: {{ .Values.globals.harbor.coreTlsCert | quote }} - ## @param core.secretName Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain two keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set - ## - secretName: "" - ## @param core.existingSecret Existing secret for core - ## The secret must contain the keys: - ## `secret` (required), - ## `secretKey` (required), - ## - existingSecret: "" - ## @param core.existingEnvVarsSecret Existing secret for core envvars - ## The secret must contain the keys: - ## `CSRF_KEY` (optional - alternatively auto-generated), - ## `HARBOR_ADMIN_PASSWORD` (optional - alternatively auto-generated), - ## `POSTGRESQL_PASSWORD` (optional - alternatively uses weak upstream default. Read below if you set it. You must also set postgresql.auth.existingSecret to the same value as core.existingEnvVarsSecret for this to work!), - ## `postgres-password` (required if POSTGRESQL_PASSWORD is set & must be the same as POSTGRESQL_PASSWORD.) - ## `HARBOR_DATABASE_PASSWORD` (required if POSTGRESQL_PASSWORD is set & must be the same as POSTGRESQL_PASSWORD.) - ## `REGISTRY_CREDENTIAL_USERNAME` (optional - alternatively weak defaults), - ## `REGISTRY_CREDENTIAL_PASSWORD` (optional - alternatively weak defaults), - ## `_REDIS_URL_CORE` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/0") - ## `_REDIS_URL_REG` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/2") - ## - ## If you do not know how to start, let the chart generate a full secret for you before defining an existingEnvVarsSecret - ## Notes: - ## As a EnvVars secret, this secret also store redis config urls - ## The HARBOR_ADMIN_PASSWORD is only required at initial deployment, once the password is set in database, it is not used anymore - ## - existingEnvVarsSecret: "" - ## @param core.csrfKey The CSRF key. Will be generated automatically if it isn't specified - ## - csrfKey: {{ .Values.globals.harbor.coreCsrfKey }} - ## Use TLS in the container - ## - tls: - ## @param core.tls.existingSecret Name of an existing secret with the certificates for internal TLS access - ## Requires `internalTLS.enabled` to be set to `true`` - ## Self-signed TLS certificates will be used otherwise - ## - existingSecret: "" - ## @param core.command Override default container command (useful when using custom images) - ## - command: [] - ## @param core.args Override default container args (useful when using custom images) - ## - args: [] - ## @param core.extraEnvVars Array with extra environment variables to add Harbor Core pods - ## - extraEnvVars: [] - ## @param core.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Core pods - ## - extraEnvVarsCM: "" - ## @param core.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Core pods - ## - extraEnvVarsSecret: "" - ## @param core.configOverwriteJson String containing a JSON with configuration overrides - ## Source: https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#harbor-user-settings - ## - configOverwriteJson: "" - ## @param core.configOverwriteJsonSecret Secret containing the JSON configuration overrides - ## Source: https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#harbor-user-settings - ## - configOverwriteJsonSecret: "" - ## @param core.containerPorts.http Harbor Core HTTP container port - ## @param core.containerPorts.https Harbor Core HTTPS container port - ## @param core.containerPorts.metrics Harbor Core metrics container port - ## - containerPorts: - http: 8080 - https: 8443 - metrics: 8001 - ## @param core.replicaCount Number of Harbor Core replicas - ## - replicaCount: 1 - ## Configure extra options for Harbor Core containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param core.livenessProbe.enabled Enable livenessProbe on Harbor Core containers - ## @param core.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param core.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param core.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param core.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param core.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param core.readinessProbe.enabled Enable readinessProbe on Harbor Core containers - ## @param core.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param core.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param core.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param core.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param core.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param core.startupProbe.enabled Enable startupProbe on Harbor Core containers - ## @param core.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param core.startupProbe.periodSeconds Period seconds for startupProbe - ## @param core.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param core.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param core.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param core.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param core.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param core.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## Harbor Core resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param core.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if core.resources is set (core.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param core.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Harbor Core pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param core.podSecurityContext.enabled Enabled Harbor Core pods' Security Context - ## @param core.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param core.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param core.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param core.podSecurityContext.fsGroup Set Harbor Core pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Harbor Core containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param core.containerSecurityContext.enabled Enabled containers' Security Context - ## @param core.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param core.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param core.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param core.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param core.containerSecurityContext.privileged Set container's Security Context privileged - ## @param core.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param core.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param core.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param core.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param core.updateStrategy.type Harbor Core deployment strategy type - only really applicable for deployments with RWO PVs attached - ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the - ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will - ## terminate the single previous pod, so that the new, incoming pod can attach to the PV - ## - updateStrategy: - type: RollingUpdate - ## @param core.lifecycleHooks LifecycleHook for the Harbor Core container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param core.hostAliases Harbor Core pods host aliases - ## - hostAliases: [] - ## @param core.podLabels Add additional labels to the Harbor Core pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param core.podAnnotations Annotations to add to the Harbor Core pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param core.podAffinityPreset Harbor Core Pod affinity preset. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param core.podAntiAffinityPreset Harbor Core Pod anti-affinity preset. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param core.nodeAffinityPreset.type Harbor Core Node affinity preset type. Ignored if `core.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param core.nodeAffinityPreset.key Harbor Core Node label key to match Ignored if `core.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param core.nodeAffinityPreset.values Harbor Core Node label values to match. Ignored if `core.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param core.affinity Harbor Core Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: core.podAffinityPreset, core.podAntiAffinityPreset, and core.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param core.nodeSelector Harbor Core Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param core.tolerations Harbor Core Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param core.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param core.priorityClassName Priority Class Name - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param core.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param core.sidecars Add additional sidecar containers to the Harbor Core pods - ## Example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param core.initContainers Add additional init containers to the Harbor Core pods - ## Example: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param core.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param core.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param core.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `core.pdb.minAvailable` and `core.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param core.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Core pods - ## - extraVolumeMounts: [] - ## @param core.extraVolumes Optionally specify extra list of additional volumes for the Harbor Core pods - ## - extraVolumes: [] - ## @param core.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## Harbor Core ServiceAccount configuration - ## - serviceAccount: - ## @param core.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param core.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param core.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## - automountServiceAccountToken: false - ## @param core.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Harbor Core service configuration - ## - service: - ## @param core.service.ports.http Harbor Core HTTP service port - ## @param core.service.ports.https Harbor Core HTTPS service port - ## @param core.service.ports.metrics Harbor Core metrics service port - ## - ports: - http: 80 - https: 443 - metrics: 8001 - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param core.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param core.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param core.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param core.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param core.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param core.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param core.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} -## @section Harbor Jobservice Parameters -## -jobservice: - ## Bitnami Harbor Jobservice image - ## ref: https://hub.docker.com/r/bitnami/harbor-jobservice/tags/ - ## @param jobservice.image.registry [default: REGISTRY_NAME] Harbor Jobservice image registry - ## @param jobservice.image.repository [default: REPOSITORY_NAME/harbor-jobservice] Harbor Jobservice image repository - ## @skip jobservice.image.tag Harbor Jobservice image tag (immutable tags are recommended) - ## @param jobservice.image.digest Harbor Jobservice image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param jobservice.image.pullPolicy Harbor Jobservice image pull policy - ## @param jobservice.image.pullSecrets Harbor Jobservice image pull secrets - ## @param jobservice.image.debug Enable Harbor Jobservice image debug mode - ## - image: - registry: docker.io - repository: bitnami/harbor-jobservice - tag: 2.12.2-debian-12-r1 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param jobservice.maxJobWorkers The max job workers - ## - maxJobWorkers: 10 - ## @param jobservice.redisNamespace Redis namespace for jobservice - ## - redisNamespace: harbor_job_service_namespace - ## @param jobservice.jobLogger The logger for jobs: `file`, `database` or `stdout` - ## - jobLogger: file - ## @param jobservice.secret Secret used when the job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. - ## If a secret key is not specified, Helm will generate one. - ## Must be a string of 16 chars. - ## - secret: {{ .Values.globals.harbor.jobserviceSecret }} - ## @param jobservice.existingSecret Existing secret for jobservice - ## The secret must contain the keys: - ## `secret` (required), - ## - existingSecret: "" - ## @param jobservice.existingEnvVarsSecret Existing secret for jobservice envvars - ## The secret must contain the keys: - ## `REGISTRY_CREDENTIAL_PASSWORD` (optional), - ## `JOB_SERVICE_POOL_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/1"), - ## - ## If you do not know how to start, let the chart generate a full secret for you before defining an existingEnvVarsSecret - existingEnvVarsSecret: "" - ## Use TLS in the container - ## - tls: - ## @param jobservice.tls.existingSecret Name of an existing secret with the certificates for internal TLS access - ## Requires `internalTLS.enabled` to be set to `true`` - ## Self-signed TLS certificates will be used otherwise - ## - existingSecret: "" - ## @param jobservice.command Override default container command (useful when using custom images) - ## - command: [] - ## @param jobservice.args Override default container args (useful when using custom images) - ## - args: [] - ## @param jobservice.extraEnvVars Array with extra environment variables to add Harbor Jobservice pods - ## - extraEnvVars: [] - ## @param jobservice.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Jobservice pods - ## - extraEnvVarsCM: "" - ## @param jobservice.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Jobservice pods - ## - extraEnvVarsSecret: "" - ## @param jobservice.containerPorts.http Harbor Jobservice HTTP container port - ## @param jobservice.containerPorts.https Harbor Jobservice HTTPS container port - ## @param jobservice.containerPorts.metrics Harbor Jobservice metrics container port - ## - containerPorts: - http: 8080 - https: 8443 - metrics: 8001 - ## @param jobservice.replicaCount Number of Harbor Jobservice replicas - ## - replicaCount: 1 - ## Configure extra options for Harbor Jobservice containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param jobservice.livenessProbe.enabled Enable livenessProbe on Harbor Jobservice containers - ## @param jobservice.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param jobservice.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param jobservice.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param jobservice.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param jobservice.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param jobservice.readinessProbe.enabled Enable readinessProbe on Harbor Jobservice containers - ## @param jobservice.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param jobservice.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param jobservice.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param jobservice.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param jobservice.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param jobservice.startupProbe.enabled Enable startupProbe on Harbor Jobservice containers - ## @param jobservice.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param jobservice.startupProbe.periodSeconds Period seconds for startupProbe - ## @param jobservice.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param jobservice.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param jobservice.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param jobservice.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param jobservice.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param jobservice.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## Harbor Jobservice resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param jobservice.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if jobservice.resources is set (jobservice.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param jobservice.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Harbor Jobservice pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param jobservice.podSecurityContext.enabled Enabled Harbor Jobservice pods' Security Context - ## @param jobservice.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param jobservice.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param jobservice.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param jobservice.podSecurityContext.fsGroup Set Harbor Jobservice pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Harbor Jobservice containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param jobservice.containerSecurityContext.enabled Enabled containers' Security Context - ## @param jobservice.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param jobservice.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param jobservice.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param jobservice.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param jobservice.containerSecurityContext.privileged Set container's Security Context privileged - ## @param jobservice.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param jobservice.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param jobservice.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param jobservice.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param jobservice.updateStrategy.type Harbor Jobservice deployment strategy type - only really applicable for deployments with RWO PVs attached - ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the - ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will - ## terminate the single previous pod, so that the new, incoming pod can attach to the PV - ## - updateStrategy: - type: RollingUpdate - ## @param jobservice.lifecycleHooks LifecycleHook for the Harbor Jobservice container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param jobservice.hostAliases Harbor Jobservice pods host aliases - ## - hostAliases: [] - ## @param jobservice.podLabels Add additional labels to the Harbor Jobservice pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param jobservice.podAnnotations Annotations to add to the Harbor Jobservice pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param jobservice.podAffinityPreset Harbor Jobservice Pod affinity preset. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param jobservice.podAntiAffinityPreset Harbor Jobservice Pod anti-affinity preset. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param jobservice.nodeAffinityPreset.type Harbor Jobservice Node affinity preset type. Ignored if `jobservice.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param jobservice.nodeAffinityPreset.key Harbor Jobservice Node label key to match Ignored if `jobservice.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param jobservice.nodeAffinityPreset.values Harbor Jobservice Node label values to match. Ignored if `jobservice.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param jobservice.affinity Harbor Jobservice Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: jobservice.podAffinityPreset, jobservice.podAntiAffinityPreset, and jobservice.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param jobservice.nodeSelector Harbor Jobservice Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param jobservice.tolerations Harbor Jobservice Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param jobservice.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param jobservice.priorityClassName Priority Class Name - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param jobservice.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param jobservice.sidecars Add additional sidecar containers to the Harbor Jobservice pods - ## Example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param jobservice.initContainers Add additional init containers to the Harbor Jobservice pods - ## Example: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param jobservice.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param jobservice.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param jobservice.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `jobservice.pdb.minAvailable` and `jobservice.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param jobservice.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Jobservice pods - ## - extraVolumeMounts: [] - ## @param jobservice.extraVolumes Optionally specify extra list of additional volumes for the Harbor Jobservice pods - ## - extraVolumes: [] - ## @param jobservice.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## Harbor Jobservice ServiceAccount configuration - ## - serviceAccount: - ## @param jobservice.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param jobservice.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param jobservice.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## - automountServiceAccountToken: false - ## @param jobservice.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Harbor Jobservice service configuration - ## - service: - ## @param jobservice.service.ports.http Harbor Jobservice HTTP service port - ## @param jobservice.service.ports.https Harbor Jobservice HTTPS service port - ## @param jobservice.service.ports.metrics Harbor Jobservice HTTPS service port - ## - ports: - http: 80 - https: 443 - metrics: 8001 - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param jobservice.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param jobservice.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param jobservice.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param jobservice.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param jobservice.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param jobservice.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param jobservice.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} -## @section Harbor Registry Parameters -## - -## Registry Parameters -## -registry: - ## @param registry.secret Secret is used to secure the upload state from client and registry storage backend. See: - ## and registry storage backend. - ## See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http - ## If a secret key is not specified, Helm will generate one. - ## Must be a string of 16 chars. - ## - secret: {{ .Values.globals.harbor.registrySecret }} - ## @param registry.existingSecret Existing secret for registry - ## The secret must contain the keys: - ## `REGISTRY_HTPASSWD` (required - replaces insecure defaults), - ## `REGISTRY_HTTP_SECRET` (optional - generated on the fly if not secified), - ## `REGISTRY_REDIS_PASSWORD` (optional), - ## - existingSecret: "" - ## @param registry.relativeurls Make the registry return relative URLs in Location headers. The client is responsible for resolving the correct URL. - ## - relativeurls: false - ## @param registry.credentials.username The username for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd) - ## @param registry.credentials.password The password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation. - ## @param registry.credentials.htpasswd The content of htpasswd file based on the value of `registry.credentials.username` `registry.credentials.password`. Currently `helm` does not support bcrypt in the template script, if the credential is updated you need to manually generated by calling - ## - credentials: - username: {{ .Values.globals.harbor.username }} - password: {{ .Values.globals.harbor.password }} - ## If you update the username or password of registry, make sure use cli tool htpasswd to generate the bcrypt hash - ## e.g. "htpasswd -nbBC10 $username $password" - ## - htpasswd: {{ .Values.globals.harbor.htpasswd }} - middleware: - ## @param registry.middleware.enabled Middleware is used to add support for a CDN between backend storage and `docker pull` recipient. See - ## - enabled: false - ## @param registry.middleware.type CDN type for the middleware - ## - type: cloudFront - ## @param registry.middleware.cloudFront.baseurl CloudFront CDN settings: Base URL - ## @param registry.middleware.cloudFront.keypairid CloudFront CDN settings: Keypair ID - ## @param registry.middleware.cloudFront.duration CloudFront CDN settings: Duration - ## @param registry.middleware.cloudFront.ipfilteredby CloudFront CDN settings: IP filters - ## @param registry.middleware.cloudFront.privateKeySecret CloudFront CDN settings: Secret name with the private key - ## - cloudFront: - baseurl: example.cloudfront.net - keypairid: KEYPAIRID - duration: 3000s - ipfilteredby: none - ## The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key - ## that allows access to CloudFront - ## - privateKeySecret: my-secret - ## Use TLS in the container - ## - tls: - ## @param registry.tls.existingSecret Name of an existing secret with the certificates for internal TLS access - ## Requires `internalTLS.enabled` to be set to `true`` - ## Self-signed TLS certificates will be used otherwise - ## - existingSecret: "" - ## @param registry.replicaCount Number of Harbor Registry replicas - ## - replicaCount: 1 - ## Configure Harbor Registry pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param registry.podSecurityContext.enabled Enabled Harbor Registry pods' Security Context - ## @param registry.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param registry.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param registry.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param registry.podSecurityContext.fsGroup Set Harbor Registry pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## @param registry.updateStrategy.type Harbor Registry deployment strategy type - only really applicable for deployments with RWO PVs attached - ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the - ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will - ## terminate the single previous pod, so that the new, incoming pod can attach to the PV - ## - updateStrategy: - type: RollingUpdate - ## @param registry.hostAliases Harbor Registry pods host aliases - ## - hostAliases: [] - ## @param registry.podLabels Add additional labels to the Harbor Registry pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param registry.podAnnotations Annotations to add to the Harbor Registry pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param registry.podAffinityPreset Harbor Registry Pod affinity preset. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param registry.podAntiAffinityPreset Harbor Registry Pod anti-affinity preset. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param registry.nodeAffinityPreset.type Harbor Registry Node affinity preset type. Ignored if `registry.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param registry.nodeAffinityPreset.key Harbor Registry Node label key to match Ignored if `registry.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param registry.nodeAffinityPreset.values Harbor Registry Node label values to match. Ignored if `registry.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param registry.affinity Harbor Registry Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: registry.podAffinityPreset, registry.podAntiAffinityPreset, and registry.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param registry.nodeSelector Harbor Registry Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param registry.tolerations Harbor Registry Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param registry.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param registry.priorityClassName Priority Class Name - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param registry.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param registry.sidecars Add additional sidecar containers to the Harbor Registry pods - ## Example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param registry.initContainers Add additional init containers to the Harbor Registry pods - ## Example: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param registry.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param registry.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param registry.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `registry.pdb.minAvailable` and `registry.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param registry.extraVolumes Optionally specify extra list of additional volumes for the Harbor Registry pods - ## - extraVolumes: [] - ## @param registry.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## Harbor Registry ServiceAccount configuration - ## - serviceAccount: - ## @param registry.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param registry.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param registry.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## - automountServiceAccountToken: false - ## @param registry.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param registry.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param registry.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param registry.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param registry.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param registry.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param registry.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param registry.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## Harbor Registry main container parameters - ## - server: - ## Bitnami Harbor Registry image - ## ref: https://hub.docker.com/r/bitnami/harbor-registry/tags/ - ## @param registry.server.image.registry [default: REGISTRY_NAME] Harbor Registry image registry - ## @param registry.server.image.repository [default: REPOSITORY_NAME/harbor-registry] Harbor Registry image repository - ## @skip registry.server.image.tag Harbor Registry image tag (immutable tags are recommended) - ## @param registry.server.image.digest Harbor Registry image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param registry.server.image.pullPolicy Harbor Registry image pull policy - ## @param registry.server.image.pullSecrets Harbor Registry image pull secrets - ## @param registry.server.image.debug Enable Harbor Registry image debug mode - ## - image: - registry: docker.io - repository: bitnami/harbor-registry - tag: 2.12.2-debian-12-r1 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param registry.server.command Override default container command (useful when using custom images) - ## - command: [] - ## @param registry.server.args Override default container args (useful when using custom images) - ## - args: [] - ## @param registry.server.extraEnvVars Array with extra environment variables to add Harbor Registry main containers - ## - extraEnvVars: [] - ## @param registry.server.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Registry main containers - ## - extraEnvVarsCM: "" - ## @param registry.server.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Registry main containers - ## - extraEnvVarsSecret: "" - ## @param registry.server.containerPorts.http Harbor Registry HTTP container port - ## @param registry.server.containerPorts.https Harbor Registry HTTPS container port - ## @param registry.server.containerPorts.debug Harbor Registry debug container port - ## @param registry.server.containerPorts.metrics Harbor Registry metrics container port - ## - containerPorts: - http: 5000 - https: 5443 - debug: 5001 - metrics: 8001 - ## Configure extra options for Harbor Registry main containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param registry.server.livenessProbe.enabled Enable livenessProbe on Harbor Registry main containers - ## @param registry.server.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param registry.server.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param registry.server.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param registry.server.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param registry.server.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param registry.server.readinessProbe.enabled Enable readinessProbe on Harbor Registry main containers - ## @param registry.server.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param registry.server.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param registry.server.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param registry.server.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param registry.server.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param registry.server.startupProbe.enabled Enable startupProbe on Harbor Registry main containers - ## @param registry.server.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param registry.server.startupProbe.periodSeconds Period seconds for startupProbe - ## @param registry.server.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param registry.server.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param registry.server.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param registry.server.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param registry.server.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param registry.server.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## Harbor Registry main resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param registry.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if registry.server.resources is set (registry.server.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param registry.server.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Harbor Registry main containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param registry.server.containerSecurityContext.enabled Enabled containers' Security Context - ## @param registry.server.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param registry.server.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param registry.server.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param registry.server.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param registry.server.containerSecurityContext.privileged Set container's Security Context privileged - ## @param registry.server.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param registry.server.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param registry.server.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param registry.server.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param registry.server.lifecycleHooks LifecycleHook for the Harbor Registry main container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param registry.server.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Registry main pods - ## - extraVolumeMounts: [] - ## Harbor Registry service configuration - ## - service: - ## @param registry.server.service.ports.http Harbor Registry HTTP service port - ## @param registry.server.service.ports.https Harbor Registry HTTPS service port - ## @param registry.server.service.ports.metrics Harbor Registry metrics service port - ## - ports: - http: 5000 - https: 5443 - metrics: 8001 - ## Harbor Registryctl parameters - ## - controller: - ## Bitnami Harbor Registryctl image - ## ref: https://hub.docker.com/r/bitnami/harbor-registryctl/tags/ - ## @param registry.controller.image.registry [default: REGISTRY_NAME] Harbor Registryctl image registry - ## @param registry.controller.image.repository [default: REPOSITORY_NAME/harbor-registryctl] Harbor Registryctl image repository - ## @skip registry.controller.image.tag Harbor Registryctl image tag (immutable tags are recommended) - ## @param registry.controller.image.digest Harbor Registryctl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param registry.controller.image.pullPolicy Harbor Registryctl image pull policy - ## @param registry.controller.image.pullSecrets Harbor Registryctl image pull secrets - ## @param registry.controller.image.debug Enable Harbor Registryctl image debug mode - ## - image: - registry: docker.io - repository: bitnami/harbor-registryctl - tag: 2.12.2-debian-12-r1 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param registry.controller.command Override default container command (useful when using custom images) - ## - command: [] - ## @param registry.controller.args Override default container args (useful when using custom images) - ## - args: [] - ## @param registry.controller.extraEnvVars Array with extra environment variables to add Harbor Registryctl containers - ## - extraEnvVars: [] - ## @param registry.controller.extraEnvVarsCM ConfigMap containing extra environment variables for Harbor Registryctl containers - ## - extraEnvVarsCM: "" - ## @param registry.controller.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Harbor Registryctl containers - ## - extraEnvVarsSecret: "" - ## @param registry.controller.containerPorts.http Harbor Registryctl HTTP container port - ## @param registry.controller.containerPorts.https Harbor Registryctl HTTPS container port - ## - containerPorts: - http: 8080 - https: 8443 - ## Configure extra options for Harbor Registryctl containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param registry.controller.livenessProbe.enabled Enable livenessProbe on Harbor Registryctl containers - ## @param registry.controller.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param registry.controller.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param registry.controller.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param registry.controller.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param registry.controller.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param registry.controller.readinessProbe.enabled Enable readinessProbe on Harbor Registryctl containers - ## @param registry.controller.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param registry.controller.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param registry.controller.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param registry.controller.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param registry.controller.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param registry.controller.startupProbe.enabled Enable startupProbe on Harbor Registryctl containers - ## @param registry.controller.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param registry.controller.startupProbe.periodSeconds Period seconds for startupProbe - ## @param registry.controller.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param registry.controller.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param registry.controller.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param registry.controller.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param registry.controller.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param registry.controller.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## Harbor Registryctl resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param registry.controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if registry.controller.resources is set (registry.controller.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param registry.controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Harbor Registryctl containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param registry.controller.containerSecurityContext.enabled Enabled containers' Security Context - ## @param registry.controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param registry.controller.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param registry.controller.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param registry.controller.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param registry.controller.containerSecurityContext.privileged Set container's Security Context privileged - ## @param registry.controller.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param registry.controller.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param registry.controller.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param registry.controller.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param registry.controller.lifecycleHooks LifecycleHook for the Harbor Registryctl container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param registry.controller.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Harbor Registryctl pods - ## - extraVolumeMounts: [] - ## Harbor Registryctl service configuration - ## - service: - ## @param registry.controller.service.ports.http Harbor Registryctl HTTP service port - ## @param registry.controller.service.ports.https Harbor Registryctl HTTPS service port - ## - ports: - http: 8080 - https: 8443 -## @section Harbor Adapter Trivy Parameters -## -trivy: - ## Bitnami Harbor Adapter Trivy image - ## ref: https://hub.docker.com/r/bitnami/harbor-adapter-trivy/tags/ - ## @param trivy.image.registry [default: REGISTRY_NAME] Harbor Adapter Trivy image registry - ## @param trivy.image.repository [default: REPOSITORY_NAME/harbor-adapter-trivy] Harbor Adapter Trivy image repository - ## @skip trivy.image.tag Harbor Adapter Trivy image tag (immutable tags are recommended) - ## @param trivy.image.digest Harbor Adapter Trivy image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param trivy.image.pullPolicy Harbor Adapter Trivy image pull policy - ## @param trivy.image.pullSecrets Harbor Adapter Trivy image pull secrets - ## @param trivy.image.debug Enable Harbor Adapter Trivy image debug mode - ## - image: - registry: docker.io - repository: bitnami/harbor-adapter-trivy - tag: 2.12.2-debian-12-r0 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param trivy.enabled Enable Trivy - ## - enabled: true - ## @param trivy.debugMode The flag to enable Trivy debug mode - ## - debugMode: false - ## @param trivy.vulnType Comma-separated list of vulnerability types. Possible values `os` and `library`. - ## - vulnType: "os,library" - ## @param trivy.severity Comma-separated list of severities to be checked - ## - severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" - ## @param trivy.ignoreUnfixed The flag to display only fixed vulnerabilities - ## - ignoreUnfixed: false - ## @param trivy.insecure The flag to skip verifying registry certificate - ## - insecure: false - ## @param trivy.existingEnvVarsSecret Existing secret for trivy - ## The secret must contain the keys: - ## `SCANNER_TRIVY_GITHUB_TOKEN` (optional) - ## `SCANNER_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5") - ## `SCANNER_STORE_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5") - ## `SCANNER_JOB_QUEUE_REDIS_URL` (required - if using the internal Redis - set to base64 of "redis://harbor-redis-master:6379/5") - ## - existingEnvVarsSecret: "" - ## @param trivy.gitHubToken The GitHub access token to download Trivy DB - ## - ## Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. - ## It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached - ## in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update - ## timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one. - ## Currently, the database is updated every 12 hours and published as a new release to GitHub. - ## - ## Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough - ## for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 - ## requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult - ## https://developer.github.com/v3/#rate-limiting - ## - ## You can create a GitHub token by following the instructions in - ## https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line - ## - gitHubToken: "" - ## @param trivy.skipUpdate The flag to disable Trivy DB downloads from GitHub - ## You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues. - ## If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the - ## `/bitnami/harbor-adapter-trivy/.cache/trivy/db/trivy.db` path. - ## ref: https://trivy.dev/dev/docs/configuration/db/ - ## - skipUpdate: false - ## @param trivy.skipJavaDbUpdate The flag to disable Trivy JAVA DB downloads. - ## You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues. - ## If the value is set to `true` you have to manually download the `trivy-java.db` file and mount it in the - ## `/bitnami/harbor-adapter-trivy/.cache/trivy/java-db/trivy-java.db` path. - ## - skipJavaDbUpdate: false - ## @param trivy.dbRepository OCI repositor(ies) to retrieve the trivy vulnerability database from - dbRepository: "" - ## @param trivy.javaDbRepository OCI repositor(ies) to retrieve the Java trivy vulnerability database from - javaDbRepository: "" - ## @param trivy.cacheDir Directory to store the cache - ## - cacheDir: "/bitnami/harbor-adapter-trivy/.cache" - ## Use TLS in the container - ## - tls: - ## @param trivy.tls.existingSecret Name of an existing secret with the certificates for internal TLS access - ## Requires `internalTLS.enabled` to be set to `true`` - ## Self-signed TLS certificates will be used otherwise - ## - existingSecret: "" - ## @param trivy.command Override default container command (useful when using custom images) - ## - command: [] - ## @param trivy.args Override default container args (useful when using custom images) - ## - args: [] - ## @param trivy.extraEnvVars Array with extra environment variables to add Trivy pods - ## - extraEnvVars: [] - ## @param trivy.extraEnvVarsCM ConfigMap containing extra environment variables for Trivy pods - ## - extraEnvVarsCM: "" - ## @param trivy.extraEnvVarsSecret Secret containing extra environment variables (in case of sensitive data) for Trivy pods - ## - extraEnvVarsSecret: "" - ## @param trivy.containerPorts.http Trivy HTTP container port - ## @param trivy.containerPorts.https Trivy HTTPS container port - ## - containerPorts: - http: 8080 - https: 8443 - ## @param trivy.replicaCount Number of Trivy replicas - ## - replicaCount: 1 - ## Configure extra options for Trivy containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param trivy.livenessProbe.enabled Enable livenessProbe on Trivy containers - ## @param trivy.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param trivy.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param trivy.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param trivy.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param trivy.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param trivy.readinessProbe.enabled Enable readinessProbe on Trivy containers - ## @param trivy.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param trivy.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param trivy.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param trivy.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param trivy.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param trivy.startupProbe.enabled Enable startupProbe on Trivy containers - ## @param trivy.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param trivy.startupProbe.periodSeconds Period seconds for startupProbe - ## @param trivy.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param trivy.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param trivy.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param trivy.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param trivy.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param trivy.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## Trivy resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param trivy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if trivy.resources is set (trivy.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "small" - ## @param trivy.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Trivy pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param trivy.podSecurityContext.enabled Enabled Trivy pods' Security Context - ## @param trivy.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param trivy.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param trivy.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param trivy.podSecurityContext.fsGroup Set Trivy pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Trivy containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param trivy.containerSecurityContext.enabled Enabled containers' Security Context - ## @param trivy.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param trivy.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param trivy.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param trivy.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param trivy.containerSecurityContext.privileged Set container's Security Context privileged - ## @param trivy.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param trivy.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param trivy.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param trivy.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param trivy.updateStrategy.type Trivy deployment strategy type - only really applicable for deployments with RWO PVs attached - ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the - ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will - ## terminate the single previous pod, so that the new, incoming pod can attach to the PV - ## - updateStrategy: - type: RollingUpdate - ## @param trivy.lifecycleHooks LifecycleHook for the Trivy container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param trivy.hostAliases Trivy pods host aliases - ## - hostAliases: [] - ## @param trivy.podLabels Add additional labels to the Trivy pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param trivy.podAnnotations Annotations to add to the Trivy pods (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param trivy.podAffinityPreset Trivy Pod affinity preset. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param trivy.podAntiAffinityPreset Trivy Pod anti-affinity preset. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param trivy.nodeAffinityPreset.type Trivy Node affinity preset type. Ignored if `trivy.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param trivy.nodeAffinityPreset.key Trivy Node label key to match Ignored if `trivy.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param trivy.nodeAffinityPreset.values Trivy Node label values to match. Ignored if `trivy.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param trivy.affinity Trivy Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: trivy.podAffinityPreset, trivy.podAntiAffinityPreset, and trivy.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param trivy.nodeSelector Trivy Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param trivy.tolerations Trivy Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param trivy.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param trivy.priorityClassName Priority Class Name - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param trivy.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param trivy.sidecars Add additional sidecar containers to the Trivy pods - ## Example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param trivy.initContainers Add additional init containers to the Trivy pods - ## Example: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param trivy.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param trivy.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param trivy.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `trivy.pdb.minAvailable` and `trivy.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param trivy.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Trivy pods - ## - extraVolumeMounts: [] - ## @param trivy.extraVolumes Optionally specify extra list of additional volumes for the Trivy pods - ## - extraVolumes: [] - ## @param trivy.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## Trivy ServiceAccount configuration - ## - serviceAccount: - ## @param trivy.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param trivy.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param trivy.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## - automountServiceAccountToken: false - ## @param trivy.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Trivy service configuration - ## - service: - ## @param trivy.service.ports.http Trivy HTTP service port - ## @param trivy.service.ports.https Trivy HTTPS service port - ## - ports: - http: 8080 - https: 8443 - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param trivy.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param trivy.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param trivy.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param trivy.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param trivy.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param trivy.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param trivy.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} -## @section Harbor Exporter Parameters -## -exporter: - ## Bitnami Harbor Exporter image - ## ref: https://hub.docker.com/r/bitnami/harbor-exporter/tags/ - ## @param exporter.image.registry [default: REGISTRY_NAME] Harbor Exporter image registry - ## @param exporter.image.repository [default: REPOSITORY_NAME/harbor-exporter] Harbor Exporter image repository - ## @skip exporter.image.tag Harbor Exporter image tag - ## @param exporter.image.digest Harbor Exporter image image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param exporter.image.pullPolicy Harbor exporter image pull policy - ## @param exporter.image.pullSecrets Specify docker-registry secret names as an array - ## @param exporter.image.debug Specify if debug logs should be enabled - ## - image: - registry: docker.io - repository: bitnami/harbor-exporter - tag: 2.12.2-debian-12-r1 - digest: "" - ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param exporter.command Override default container command (useful when using custom images) - ## - command: [] - ## @param exporter.args Override default container args (useful when using custom images) - ## - args: [] - ## @param exporter.extraEnvVars Array containing extra env vars - ## For example: - ## - name: HARBOR_DATABASE_SSLMODE - ## value: verify-ca - ## - extraEnvVars: [] - ## @param exporter.extraEnvVarsCM ConfigMap containing extra env vars - ## - extraEnvVarsCM: "" - ## @param exporter.extraEnvVarsSecret Secret containing extra env vars (in case of sensitive data) - ## - extraEnvVarsSecret: "" - ## @param exporter.containerPorts.metrics Harbor Exporter HTTP container port - ## - containerPorts: - metrics: 8001 - ## @param exporter.replicaCount The replica count - ## - replicaCount: 1 - ## Harbor Exporter containers' liveness probe - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param exporter.livenessProbe.enabled Enable livenessProbe - ## @param exporter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param exporter.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param exporter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param exporter.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param exporter.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## Harbor Exporter containers' readiness probe - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param exporter.readinessProbe.enabled Enable readinessProbe - ## @param exporter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param exporter.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param exporter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param exporter.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param exporter.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param exporter.startupProbe.enabled Enable startupProbe on Harbor Exporter containers - ## @param exporter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param exporter.startupProbe.periodSeconds Period seconds for startupProbe - ## @param exporter.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param exporter.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param exporter.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param exporter.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param exporter.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param exporter.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## Harbor Exporter resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param exporter.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if exporter.resources is set (exporter.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param exporter.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Configure Exporter pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param exporter.podSecurityContext.enabled Enabled Exporter pods' Security Context - ## @param exporter.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param exporter.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param exporter.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param exporter.podSecurityContext.fsGroup Set Exporter pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Configure Exporter containers (only main one) Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param exporter.containerSecurityContext.enabled Enabled containers' Security Context - ## @param exporter.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param exporter.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param exporter.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param exporter.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param exporter.containerSecurityContext.privileged Set container's Security Context privileged - ## @param exporter.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param exporter.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param exporter.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param exporter.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param exporter.updateStrategy.type The update strategy for deployments with persistent volumes: RollingUpdate or Recreate. Set it as Recreate when RWM for volumes isn't supported - ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the - ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will - ## terminate the single previous pod, so that the new, incoming pod can attach to the PV - ## - updateStrategy: - type: RollingUpdate - ## @param exporter.lifecycleHooks LifecycleHook to set additional configuration at startup, e.g. LDAP settings via REST API. Evaluated as a template - ## - lifecycleHooks: {} - ## @param exporter.hostAliases Exporter pods host aliases - ## - hostAliases: [] - ## @param exporter.podLabels Add additional labels to the pod (evaluated as a template) - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param exporter.podAnnotations Annotations to add to the exporter pod - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param exporter.podAffinityPreset Harbor Exporter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param exporter.podAntiAffinityPreset Harbor Exporter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param exporter.nodeAffinityPreset.type Harbor Exporter Node affinity preset type. Ignored if `exporter.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param exporter.nodeAffinityPreset.key Harbor Exporter Node label key to match Ignored if `exporter.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param exporter.nodeAffinityPreset.values Harbor Exporter Node label values to match. Ignored if `exporter.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param exporter.affinity Harbor Exporter Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: `exporter.podAffinityPreset`, `exporter.podAntiAffinityPreset`, and `exporter.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param exporter.priorityClassName Exporter pods Priority Class Name - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param exporter.schedulerName Name of the k8s scheduler (other than default) - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param exporter.nodeSelector Harbor Exporter Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param exporter.tolerations Harbor Exporter Tolerations for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param exporter.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param exporter.initContainers Add additional init containers to the pod (evaluated as a template) - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param exporter.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param exporter.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param exporter.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `exporter.pdb.minAvailable` and `exporter.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param exporter.extraVolumeMounts - ## - extraVolumeMounts: [] - ## @param exporter.extraVolumes - ## - extraVolumes: [] - ## @param exporter.sidecars Attach additional containers to the pod (evaluated as a template) - ## - sidecars: [] - ## @param exporter.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## Harbor Exporter ServiceAccount configuration - ## - serviceAccount: - ## @param exporter.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param exporter.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param exporter.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## - automountServiceAccountToken: false - ## @param exporter.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Exporter service configuration - ## - service: - ## @param exporter.service.ports.metrics Exporter HTTP service port - ## - ports: - metrics: 8001 - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param exporter.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param exporter.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param exporter.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param exporter.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param exporter.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param exporter.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param exporter.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} -## @section PostgreSQL Parameters -## - -## PostgreSQL chart configuration -## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml -## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart -## @param postgresql.auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user -## @param postgresql.auth.postgresPassword Password for the "postgres" admin user -## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials -## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`) -## @param postgresql.primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) -## @param postgresql.primary.initdb.scripts [object] Initdb scripts to create Harbor databases -## -postgresql: - enabled: false - ## Override PostgreSQL default image as 14.x is not supported https://goharbor.io/docs/2.4.0/install-config/ - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql - ## @param postgresql.image.registry [default: REGISTRY_NAME] PostgreSQL image registry - ## @param postgresql.image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository - ## @skip postgresql.image.tag PostgreSQL image tag (immutable tags are recommended) - ## @param postgresql.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## - image: - registry: docker.io - repository: bitnami/postgresql - tag: 14.15.0-debian-12-r8 - digest: "" - auth: - enablePostgresUser: true - postgresPassword: not-secure-database-password - existingSecret: "" - architecture: standalone - primary: - extendedConfiguration: | - max_connections = 1024 - initdb: - scripts: - initial-registry.sql: | - CREATE DATABASE registry ENCODING 'UTF8'; - \c registry; - CREATE TABLE schema_migrations(version bigint not null primary key, dirty boolean not null); - ## PostgreSQL Primary resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param postgresql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param postgresql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} -## External PostgreSQL configuration -## All of these values are only used when postgresql.enabled is set to false -## @param externalDatabase.host Database host -## @param externalDatabase.port Database port number -## @param externalDatabase.user Non-root username for Harbor -## @param externalDatabase.password Password for the non-root username for Harbor -## @param externalDatabase.sslmode External database ssl mode -## @param externalDatabase.coreDatabase External database name for core -## @param externalDatabase.existingSecret The name of an existing secret with database credentials -## @param externalDatabase.existingSecretPasswordKey Password key on the existing secret -## -externalDatabase: - host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local - port: 5432 - user: {{ .Values.globals.harbor.postgres.username }} - password: {{ .Values.globals.harbor.postgres.password }} - sslmode: disable - coreDatabase: {{ .Values.globals.harbor.postgres.database }} - existingSecret: "" - existingSecretPasswordKey: "db-password" - -## @section Redis® parameters -## - -## Redis® chart configuration -## ref: https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml -## @param redis.enabled Switch to enable or disable the Redis® helm -## @param redis.auth.enabled Enable password authentication -## @param redis.auth.password Redis® password -## @param redis.auth.existingSecret The name of an existing secret with Redis® credentials -## @param redis.architecture Redis® architecture. Allowed values: `standalone` or `replication` -## @param redis.sentinel.enabled Use Redis® Sentinel on Redis® pods. -## @param redis.sentinel.masterSet Master set name -## @param redis.sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel -## -redis: - enabled: false - auth: - enabled: false - ## Redis® password (both master and slave). Defaults to a random 10-character alphanumeric string if not set and auth.enabled is true. - ## It should always be set using the password value or in the existingSecret to avoid issues - ## with Harbor. - ## The password value is ignored if existingSecret is set - ## - password: "" - existingSecret: "" - architecture: standalone - sentinel: - enabled: false - masterSet: mymaster - service: - ports: - sentinel: 26379 - master: - ## Redis® master resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param redis.master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param redis.master.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} -## External Redis® configuration -## All of these values are only used when redis.enabled is set to false -## @param externalRedis.host Redis® host -## @param externalRedis.port Redis® port number -## @param externalRedis.password Redis® password -## @param externalRedis.coreDatabaseIndex Index for core database -## @param externalRedis.jobserviceDatabaseIndex Index for jobservice database -## @param externalRedis.registryDatabaseIndex Index for registry database -## @param externalRedis.trivyAdapterDatabaseIndex Index for trivy adapter database -## -externalRedis: - host: redis-master.{{ .Values.globals.redis.namespace }}.svc.cluster.local - port: 6379 - password: {{ .Values.globals.redis.password }} - coreDatabaseIndex: "0" - jobserviceDatabaseIndex: "1" - registryDatabaseIndex: "2" - trivyAdapterDatabaseIndex: "5" - ## Redis® sentinel configuration - ## @param externalRedis.sentinel.enabled If external redis with sentinal is used, set it to `true` - ## @param externalRedis.sentinel.masterSet Name of sentinel masterSet if sentinel is used - ## @param externalRedis.sentinel.hosts Sentinel hosts and ports in the format - ## - sentinel: - enabled: false - masterSet: "mymaster" - hosts: "" -## @section Harbor metrics parameters -## -metrics: - ## @param metrics.enabled Whether or not to enable metrics for different - ## - enabled: true - ## @param metrics.path Path where metrics are exposed - ## - path: /metrics - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.enabled` to be `true`) - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running - ## - namespace: "" - ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: "" - ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus - ## - labels: {} - ## @param metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - selector: {} - ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## - relabelings: [] - ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - diff --git a/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl b/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl deleted file mode 100644 index 5911482..0000000 --- a/k8s/helmfile.d/values/init-dbs/values.yaml.gotmpl +++ /dev/null @@ -1,34 +0,0 @@ -# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# This is to override the chart name. -nameOverride: "" -fullnameOverride: "" - -postgres: - image: - ref: postgres - # This sets the pull policy for images. - pullPolicy: IfNotPresent - host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local - username: postgres - password: {{ .Values.globals.postgres.postgresPassword }} - databases: - {{- range .Values.globals.postgres.databases }} - - database: {{ .database }} - username: {{ .username }} - password: {{ .password }} - {{- end }} -mysql: - image: - ref: mysql - # This sets the pull policy for images. - pullPolicy: IfNotPresent - host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local - username: root - password: {{ .Values.globals.mysql.rootPassword }} - databases: - {{- range .Values.globals.mysql.databases }} - - database: {{ .database }} - username: {{ .username }} - password: {{ .password }} - {{- end }} diff --git a/k8s/helmfile.d/values/kube-prometheus-stack/values.yaml.gotmpl b/k8s/helmfile.d/values/kube-prometheus-stack/values.yaml.gotmpl deleted file mode 100644 index be64576..0000000 --- a/k8s/helmfile.d/values/kube-prometheus-stack/values.yaml.gotmpl +++ /dev/null @@ -1,5142 +0,0 @@ -# Default values for kube-prometheus-stack. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -## Provide a name in place of kube-prometheus-stack for `app:` labels -## -nameOverride: "" - -## Override the deployment namespace -## -namespaceOverride: "" - -## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6 -## -kubeTargetVersionOverride: "" - -## Allow kubeVersion to be overridden while creating the ingress -## -kubeVersionOverride: "" - -## Provide a name to substitute for the full names of resources -## -fullnameOverride: "" - -## Labels to apply to all resources -## -commonLabels: {} -# scmhash: abc123 -# myLabel: aakkmd - -## Install Prometheus Operator CRDs -## -crds: - enabled: true - ## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs. - ## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks. - ## It deploy a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs. - ## This feature is in preview, off by default and may change in the future. - upgradeJob: - enabled: false - image: - busybox: - registry: docker.io - repository: busybox - tag: "latest" - sha: "" - pullPolicy: IfNotPresent - kubectl: - registry: registry.k8s.io - repository: kubectl - tag: "" # defaults to the Kubernetes version - sha: "" - pullPolicy: IfNotPresent - - env: {} - ## Define resources requests and limits for single Pods. - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - - ## Additional volumes - ## - extraVolumes: [] - - ## Additional volume mounts - ## - extraVolumeMounts: [] - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Assign custom affinity rules to the upgrade-crd job - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## If specified, the pod's tolerations. - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## If specified, the pod's topology spread constraints. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app: alertmanager - - # ## Labels to add to the upgrade-crd job - # ## - labels: {} - - ## Annotations to add to the upgrade-crd job - ## - annotations: {} - - ## Labels to add to the upgrade-crd pod - ## - podLabels: {} - - ## Annotations to add to the upgrade-crd pod - ## - podAnnotations: {} - - ## Service account for upgrade crd job to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - annotations: {} - labels: {} - automountServiceAccountToken: true - - ## Container-specific security context configuration - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - containerSecurityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - fsGroup: 65534 - runAsGroup: 65534 - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault - -## custom Rules to override "for" and "severity" in defaultRules -## -customRules: {} - # AlertmanagerFailedReload: - # for: 3m - # AlertmanagerMembersInconsistent: - # for: 5m - # severity: "warning" - -## Create default rules for monitoring the cluster -## -defaultRules: - create: true - rules: - alertmanager: true - etcd: true - configReloaders: true - general: true - k8sContainerCpuUsageSecondsTotal: true - k8sContainerMemoryCache: true - k8sContainerMemoryRss: true - k8sContainerMemorySwap: true - k8sContainerResource: true - k8sContainerMemoryWorkingSetBytes: true - k8sPodOwner: true - kubeApiserverAvailability: true - kubeApiserverBurnrate: true - kubeApiserverHistogram: true - kubeApiserverSlos: true - kubeControllerManager: true - kubelet: true - kubeProxy: true - kubePrometheusGeneral: true - kubePrometheusNodeRecording: true - kubernetesApps: true - kubernetesResources: true - kubernetesStorage: true - kubernetesSystem: true - kubeSchedulerAlerting: true - kubeSchedulerRecording: true - kubeStateMetrics: true - network: true - node: true - nodeExporterAlerting: true - nodeExporterRecording: true - prometheus: true - prometheusOperator: true - windows: true - - ## Reduce app namespace alert scope - appNamespacesTarget: "" - - ## Set keep_firing_for for all alerts - keepFiringFor: "" - - ## Labels for default rules - labels: {} - ## Annotations for default rules - annotations: {} - - ## Additional labels for PrometheusRule alerts - additionalRuleLabels: {} - - ## Additional annotations for PrometheusRule alerts - additionalRuleAnnotations: {} - - ## Additional labels for specific PrometheusRule alert groups - additionalRuleGroupLabels: - alertmanager: {} - etcd: {} - configReloaders: {} - general: {} - k8sContainerCpuUsageSecondsTotal: {} - k8sContainerMemoryCache: {} - k8sContainerMemoryRss: {} - k8sContainerMemorySwap: {} - k8sContainerResource: {} - k8sPodOwner: {} - kubeApiserverAvailability: {} - kubeApiserverBurnrate: {} - kubeApiserverHistogram: {} - kubeApiserverSlos: {} - kubeControllerManager: {} - kubelet: {} - kubeProxy: {} - kubePrometheusGeneral: {} - kubePrometheusNodeRecording: {} - kubernetesApps: {} - kubernetesResources: {} - kubernetesStorage: {} - kubernetesSystem: {} - kubeSchedulerAlerting: {} - kubeSchedulerRecording: {} - kubeStateMetrics: {} - network: {} - node: {} - nodeExporterAlerting: {} - nodeExporterRecording: {} - prometheus: {} - prometheusOperator: {} - - ## Additional annotations for specific PrometheusRule alerts groups - additionalRuleGroupAnnotations: - alertmanager: {} - etcd: {} - configReloaders: {} - general: {} - k8sContainerCpuUsageSecondsTotal: {} - k8sContainerMemoryCache: {} - k8sContainerMemoryRss: {} - k8sContainerMemorySwap: {} - k8sContainerResource: {} - k8sPodOwner: {} - kubeApiserverAvailability: {} - kubeApiserverBurnrate: {} - kubeApiserverHistogram: {} - kubeApiserverSlos: {} - kubeControllerManager: {} - kubelet: {} - kubeProxy: {} - kubePrometheusGeneral: {} - kubePrometheusNodeRecording: {} - kubernetesApps: {} - kubernetesResources: {} - kubernetesStorage: {} - kubernetesSystem: {} - kubeSchedulerAlerting: {} - kubeSchedulerRecording: {} - kubeStateMetrics: {} - network: {} - node: {} - nodeExporterAlerting: {} - nodeExporterRecording: {} - prometheus: {} - prometheusOperator: {} - - additionalAggregationLabels: [] - - ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules. - runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks" - - node: - fsSelector: 'fstype!=""' - # fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"' - - ## Disabled PrometheusRule alerts - disabled: {} - # KubeAPIDown: true - # NodeRAIDDegraded: true - -## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. -## -# additionalPrometheusRules: [] -# - name: my-rule-file -# groups: -# - name: my_group -# rules: -# - record: my_record -# expr: 100 * my_record - -## Provide custom recording or alerting rules to be deployed into the cluster. -## -additionalPrometheusRulesMap: {} -# rule-name: -# groups: -# - name: my_group -# rules: -# - record: my_record -# expr: 100 * my_record - -## -global: - rbac: - create: true - - ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs - ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles - createAggregateClusterRoles: false - pspEnabled: false - pspAnnotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...) - ## - imageRegistry: "" - - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - # or - # - "image-pull-secret" - -windowsMonitoring: - ## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter') - enabled: false - -## Configuration for prometheus-windows-exporter -## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter -## -prometheus-windows-exporter: - ## Enable ServiceMonitor and set Kubernetes label to use as a job label - ## - prometheus: - monitor: - enabled: true - jobLabel: jobLabel - - releaseLabel: true - - ## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards - ## - podLabels: - jobLabel: windows-exporter - - ## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards - ## - config: |- - collectors: - enabled: '[defaults],memory,container' - -## Configuration for alertmanager -## ref: https://prometheus.io/docs/alerting/alertmanager/ -## -alertmanager: - - ## Deploy alertmanager - ## - enabled: true - - ## Annotations for Alertmanager - ## - annotations: {} - - ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 - ## - apiVersion: v2 - - ## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features. - ## - enableFeatures: [] - - ## Create dashboard configmap even if alertmanager deployment has been disabled - ## - forceDeployDashboards: false - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - annotations: {} - automountServiceAccountToken: true - - ## Configure pod disruption budgets for Alertmanager - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ## Alertmanager configuration directives - ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file - ## https://prometheus.io/webtools/alerting/routing-tree-editor/ - ## - config: - global: - resolve_timeout: 5m - inhibit_rules: - - source_matchers: - - 'severity = critical' - target_matchers: - - 'severity =~ warning|info' - equal: - - 'namespace' - - 'alertname' - - source_matchers: - - 'severity = warning' - target_matchers: - - 'severity = info' - equal: - - 'namespace' - - 'alertname' - - source_matchers: - - 'alertname = InfoInhibitor' - target_matchers: - - 'severity = info' - equal: - - 'namespace' - - target_matchers: - - 'alertname = InfoInhibitor' - route: - group_by: ['namespace'] - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: 'null' - routes: - - receiver: 'null' - matchers: - - alertname = "Watchdog" - receivers: - - name: 'null' - templates: - - '/etc/alertmanager/config/*.tmpl' - - ## Alertmanager configuration directives (as string type, preferred over the config hash map) - ## stringConfig will be used only, if tplConfig is true - ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file - ## https://prometheus.io/webtools/alerting/routing-tree-editor/ - ## - stringConfig: "" - - ## Pass the Alertmanager configuration directives through Helm's templating - ## engine. If the Alertmanager configuration contains Alertmanager templates, - ## they'll need to be properly escaped so that they are not interpreted by - ## Helm - ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function - ## https://prometheus.io/docs/alerting/configuration/#tmpl_string - ## https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - tplConfig: false - - ## Alertmanager template files to format alerts - ## By default, templateFiles are placed in /etc/alertmanager/config/ and if - ## they have a .tmpl file suffix will be loaded. See config.templates above - ## to change, add other suffixes. If adding other suffixes, be sure to update - ## config.templates above to include those suffixes. - ## ref: https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - ## - templateFiles: {} - # - - ingress: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - - labels: {} - - ## Override ingress to a different defined port on the service - # servicePort: 8081 - ## Override ingress to a different service then the default, this is useful if you need to - ## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0) - # serviceName: kube-prometheus-stack-alertmanager-0 - - ## Hosts must be provided if Ingress is enabled. - ## - hosts: [] - # - alertmanager.domain.com - - ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## TLS configuration for Alertmanager Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: alertmanager-general-tls - # hosts: - # - alertmanager.example.com - - # -- BETA: Configure the gateway routes for the chart here. - # More routes can be added by adding a dictionary key like the 'main' route. - # Be aware that this is an early beta of this feature, - # kube-prometheus-stack does not guarantee this works and is subject to change. - # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk - # [[ref]](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1alpha2) - route: - main: - # -- Enables or disables the route - enabled: false - - # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2 - apiVersion: gateway.networking.k8s.io/v1 - # -- Set the route kind - # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute - kind: HTTPRoute - - annotations: {} - labels: {} - - hostnames: [] - # - my-filter.example.com - parentRefs: [] - # - name: acme-gw - - matches: - - path: - type: PathPrefix - value: / - - ## Filters define the filters that are applied to requests that match this rule. - filters: [] - - ## Additional custom rules that can be added to the route - additionalRules: [] - - ## Configuration for Alertmanager secret - ## - secret: - annotations: {} - - ## Configuration for creating an Ingress that will map to each Alertmanager replica service - ## alertmanager.servicePerReplica must be enabled - ## - ingressPerReplica: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - labels: {} - - ## Final form of the hostname for each per replica ingress is - ## - ## Prefix for the per replica ingress that will have `-$replicaNumber` - ## appended to the end - hostPrefix: "" - ## Domain that will be used for the per replica ingress - hostDomain: "" - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## Secret name containing the TLS certificate for alertmanager per replica ingress - ## Secret must be manually created in the namespace - tlsSecretName: "" - - ## Separated secret for each per replica Ingress. Can be used together with cert-manager - ## - tlsSecretPerReplica: - enabled: false - ## Final form of the secret for each per replica ingress is - ## - prefix: "alertmanager" - - ## Configuration for Alertmanager service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - - ## Port for Alertmanager Service to listen on - ## - port: 9093 - ## To be used with a proxy extraContainer port - ## - targetPort: 9093 - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30903 - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - - ## Additional ports to open for Alertmanager service - ## - additionalPorts: [] - # - name: oauth-proxy - # port: 8081 - # targetPort: 8081 - # - name: oauth-metrics - # port: 8082 - # targetPort: 8082 - - externalIPs: [] - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## If you want to make sure that connections from a particular client are passed to the same Pod each time - ## Accepts 'ClientIP' or 'None' - ## - sessionAffinity: None - - ## If you want to modify the ClientIP sessionAffinity timeout - ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP" - ## - sessionAffinityConfig: - clientIP: - timeoutSeconds: 10800 - - ## Service type - ## - type: ClusterIP - - ## Configuration for creating a separate Service for each statefulset Alertmanager replica - ## - servicePerReplica: - enabled: false - annotations: {} - - ## Port for Alertmanager Service per replica to listen on - ## - port: 9093 - - ## To be used with a proxy extraContainer port - targetPort: 9093 - - ## Port to expose on each node - ## Only used if servicePerReplica.type is 'NodePort' - ## - nodePort: 30904 - - ## Loadbalancer source IP ranges - ## Only used if servicePerReplica.type is "LoadBalancer" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: ClusterIP - - ## Configuration for creating a ServiceMonitor for AlertManager - ## - serviceMonitor: - ## If true, a ServiceMonitor will be created for the AlertManager service. - ## - selfMonitor: true - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Additional labels - ## - additionalLabels: {} - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## enableHttp2: Whether to enable HTTP2. - ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint - enableHttp2: true - - ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig - tlsConfig: {} - - bearerTokenFile: - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional Endpoints - ## - additionalEndpoints: [] - # - port: oauth-metrics - # path: /metrics - - ## Settings affecting alertmanagerSpec - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec - ## - alertmanagerSpec: - ## Statefulset's persistent volume claim retention policy - ## whenDeleted and whenScaled determine whether - ## statefulset's PVCs are deleted (true) or retained (false) - ## on scaling down and deleting statefulset, respectively. - ## Requires Kubernetes version 1.27.0+. - ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - persistentVolumeClaimRetentionPolicy: {} - # whenDeleted: Retain - # whenScaled: Retain - - ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. - ## - podMetadata: {} - - ## Image of Alertmanager - ## - image: - registry: quay.io - repository: prometheus/alertmanager - tag: v0.28.0 - sha: "" - - ## If true then the user will be responsible to provide a secret with alertmanager configuration - ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used - ## - useExistingSecret: false - - ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the - ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. - ## - secrets: [] - - ## If false then the user will opt out of automounting API credentials. - ## - automountServiceAccountToken: true - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. - ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. - ## - configMaps: [] - - ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for - ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. - ## - # configSecret: - - ## WebTLSConfig defines the TLS parameters for HTTPS - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec - web: {} - - ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with. - ## - alertmanagerConfigSelector: {} - ## Example which selects all alertmanagerConfig resources - ## with label "alertconfig" with values any of "example-config" or "example-config-2" - # alertmanagerConfigSelector: - # matchExpressions: - # - key: alertconfig - # operator: In - # values: - # - example-config - # - example-config-2 - # - ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config" - # alertmanagerConfigSelector: - # matchLabels: - # role: example-config - - ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. - ## - alertmanagerConfigNamespaceSelector: {} - ## Example which selects all namespaces - ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2" - # alertmanagerConfigNamespaceSelector: - # matchExpressions: - # - key: alertmanagerconfig - # operator: In - # values: - # - example-namespace - # - example-namespace-2 - - ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled" - # alertmanagerConfigNamespaceSelector: - # matchLabels: - # alertmanagerconfig: enabled - - ## AlermanagerConfig to be used as top level configuration - ## - alertmanagerConfiguration: {} - ## Example with select a global alertmanagerconfig - # alertmanagerConfiguration: - # name: global-alertmanager-Configuration - - ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg: - ## - alertmanagerConfigMatcherStrategy: {} - ## Example with use OnNamespace strategy - # alertmanagerConfigMatcherStrategy: - # type: OnNamespace - - ## Define Log Format - # Use logfmt (default) or json logging - logFormat: logfmt - - ## Log level for Alertmanager to be configured with. - ## - logLevel: info - - ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the - ## running cluster equal to the expected size. - replicas: 1 - - ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression - ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). - ## - retention: 120h - - ## Storage is the definition of how storage will be used by the Alertmanager instances. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md - ## - storage: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - - - ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false - ## - externalUrl: - - ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, - ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. - ## - routePrefix: / - - ## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig - tlsConfig: {} - - ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. - ## - paused: false - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Define resources requests and limits for single Pods. - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # requests: - # memory: 400Mi - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - ## - podAntiAffinity: "soft" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the alertmanager instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## If specified, the pod's tolerations. - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## If specified, the pod's topology spread constraints. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app: alertmanager - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - runAsGroup: 2000 - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - seccompProfile: - type: RuntimeDefault - - ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. - ## Note this is only for the Alertmanager UI, not the gossip communication. - ## - listenLocal: false - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. - ## - containers: [] - # containers: - # - name: oauth-proxy - # image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1 - # args: - # - --upstream=http://127.0.0.1:9093 - # - --http-address=0.0.0.0:8081 - # - --metrics-address=0.0.0.0:8082 - # - ... - # ports: - # - containerPort: 8081 - # name: oauth-proxy - # protocol: TCP - # - containerPort: 8082 - # name: oauth-metrics - # protocol: TCP - # resources: {} - - # Additional volumes on the output StatefulSet definition. - volumes: [] - - # Additional VolumeMounts on the output StatefulSet definition. - volumeMounts: [] - - ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes - ## (permissions, dir tree) on mounted volumes before starting prometheus - initContainers: [] - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. - ## - additionalPeers: [] - - ## PortName to use for Alert Manager. - ## - portName: "http-web" - - ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 - ## - clusterAdvertiseAddress: false - - ## clusterGossipInterval determines interval between gossip attempts. - ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) - clusterGossipInterval: "" - - ## clusterPeerTimeout determines timeout for cluster peering. - ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) - clusterPeerTimeout: "" - - ## clusterPushpullInterval determines interval between pushpull attempts. - ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) - clusterPushpullInterval: "" - - ## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster. - clusterLabel: "" - - ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. - ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. - forceEnableClusterMode: false - - ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to - ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). - minReadySeconds: 0 - - ## Additional configuration which is not covered by the properties above. (passed through tpl) - additionalConfig: {} - - ## Additional configuration which is not covered by the properties above. - ## Useful, if you need advanced templating inside alertmanagerSpec. - ## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl) - additionalConfigString: "" - - ## ExtraSecret can be used to store various data in an extra secret - ## (use it for example to store hashed basic auth credentials) - extraSecret: - ## if not set, name will be auto generated - # name: "" - annotations: {} - data: {} - # auth: | - # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 - # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. - -## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml -## -grafana: - enabled: true - namespaceOverride: "" - - ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled - ## - forceDeployDatasources: false - - ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled - ## - forceDeployDashboards: false - - ## Deploy default dashboards - ## - defaultDashboardsEnabled: true - - ## Timezone for the default dashboards - ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg - ## - defaultDashboardsTimezone: browser - - ## Editable flag for the default dashboards - ## - defaultDashboardsEditable: true - - adminPassword: {{ .Values.globals.kubePrometheusStack.grafana.adminPassword }} - - rbac: - ## If true, Grafana PSPs will be created - ## - pspEnabled: false - - ingress: - ## If true, Grafana Ingress will be created - ## - enabled: true - - ## IngressClassName for Grafana Ingress. - ## Should be provided if Ingress is enable. - ## - # ingressClassName: nginx - - ## Annotations for Grafana Ingress - ## - annotations: - kubernetes.io/ingress.class: {{ .Values.globals.kubePrometheusStack.ingressClass }} - cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }} - - ## Labels to be added to the Ingress - ## - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enable. - ## - # hosts: - # - grafana.domain.com - hosts: - {{- range .Values.globals.kubePrometheusStack.grafana.hosts }} - - {{ . }} - {{- end }} - - ## Path for grafana ingress - path: / - - ## TLS configuration for grafana Ingress - ## Secret must be manually created in the namespace - ## - tls: - {{- range .Values.globals.kubePrometheusStack.grafana.hosts }} - - secretName: {{ . }}-tls - hosts: - - {{ . }} - {{- end }} - # - secretName: grafana-general-tls - # hosts: - # - grafana.example.com - - # # To make Grafana persistent (Using Statefulset) - # # - persistence: - enabled: true - type: sts - storageClassName: {{ .Values.globals.kubePrometheusStack.storageClass }} - accessModes: - - ReadWriteOnce - size: {{ .Values.globals.kubePrometheusStack.grafana.storageSize }} - finalizers: - - kubernetes.io/pvc-protection - - serviceAccount: - create: true - autoMount: true - - sidecar: - dashboards: - enabled: true - label: grafana_dashboard - labelValue: "1" - # Allow discovery in all namespaces for dashboards - searchNamespace: ALL - - # Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels - enableNewTablePanelSyntax: false - - ## Annotations for Grafana dashboard configmaps - ## - annotations: {} - multicluster: - global: - enabled: false - etcd: - enabled: false - provider: - allowUiUpdates: false - datasources: - enabled: true - defaultDatasourceEnabled: true - isDefaultDatasource: true - - name: Prometheus - uid: prometheus - - ## URL of prometheus datasource - ## - # url: http://prometheus-stack-prometheus:9090/ - - ## Prometheus request timeout in seconds - # timeout: 30 - - # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default - # defaultDatasourceScrapeInterval: 15s - - ## Annotations for Grafana datasource configmaps - ## - annotations: {} - - ## Set method for HTTP to send query to datasource - httpMethod: POST - - ## Create datasource for each Pod of Prometheus StatefulSet; - ## this uses headless service `prometheus-operated` which is - ## created by Prometheus Operator - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286 - createPrometheusReplicasDatasources: false - label: grafana_datasource - labelValue: "1" - - ## Field with internal link pointing to existing data source in Grafana. - ## Can be provisioned via additionalDataSources - exemplarTraceIdDestinations: {} - # datasourceUid: Jaeger - # traceIdLabelName: trace_id - alertmanager: - enabled: true - name: Alertmanager - uid: alertmanager - handleGrafanaManagedAlerts: false - implementation: prometheus - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # configMap: certs-configmap - # readOnly: true - - deleteDatasources: [] - # - name: example-datasource - # orgId: 1 - - ## Configure additional grafana datasources (passed through tpl) - ## ref: http://docs.grafana.org/administration/provisioning/#datasources - additionalDataSources: [] - # - name: prometheus-sample - # access: proxy - # basicAuth: true - # secureJsonData: - # basicAuthPassword: pass - # basicAuthUser: daco - # editable: false - # jsonData: - # tlsSkipVerify: true - # orgId: 1 - # type: prometheus - # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 - # version: 1 - - # Flag to mark provisioned data sources for deletion if they are no longer configured. - # It takes no effect if data sources are already listed in the deleteDatasources section. - # ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-config-file - prune: false - - ## Passed to grafana subchart and used by servicemonitor below - ## - service: - portName: http-web - ipFamilies: [] - ipFamilyPolicy: "" - - serviceMonitor: - # If true, a ServiceMonitor CRD is created for a prometheus operator - # https://github.com/coreos/prometheus-operator - # - enabled: true - - # Path to use for scraping metrics. Might be different if server.root_url is set - # in grafana.ini - path: "/metrics" - - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - - # labels for the ServiceMonitor - labels: {} - - # Scrape interval. If not set, the Prometheus default scrape interval is used. - # - interval: "" - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Flag to disable all the kubernetes component scrapers -## -kubernetesServiceMonitors: - enabled: true - -## Component scraping the kube api server -## -kubeApiServer: - enabled: true - tlsConfig: - serverName: kubernetes - insecureSkipVerify: false - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - jobLabel: component - selector: - matchLabels: - component: apiserver - provider: kubernetes - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: - # Drop excessively noisy apiserver buckets. - - action: drop - regex: (etcd_request|apiserver_request_slo|apiserver_request_sli|apiserver_request)_duration_seconds_bucket;(0\.15|0\.2|0\.3|0\.35|0\.4|0\.45|0\.6|0\.7|0\.8|0\.9|1\.25|1\.5|1\.75|2|3|3\.5|4|4\.5|6|7|8|9|15|20|30|40|45|50)(\.0)? - sourceLabels: - - __name__ - - le - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: - # - __meta_kubernetes_namespace - # - __meta_kubernetes_service_name - # - __meta_kubernetes_endpoint_port_name - # action: keep - # regex: default;kubernetes;https - # - targetLabel: __address__ - # replacement: kubernetes.default.svc:443 - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping the kubelet and kubelet-hosted cAdvisor -## -kubelet: - enabled: true - namespace: kube-system - - serviceMonitor: - ## Enable scraping /metrics from kubelet's service - kubelet: true - - ## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator. - ## - attachMetadata: - node: false - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## If true, Prometheus use (respect) labels provided by exporter. - ## - honorLabels: true - - ## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape. - ## - honorTimestamps: true - - ## If true, defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false. - ## We recommend enabling this if you want the best possible accuracy for container_ metrics scraped from cadvisor. - ## For more details see: https://github.com/prometheus-community/helm-charts/pull/5063#issuecomment-2545374849 - trackTimestampsStaleness: true - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## Enable scraping the kubelet over https. For requirements to enable this see - ## https://github.com/prometheus-operator/prometheus-operator/issues/926 - ## - https: true - - ## Skip TLS certificate validation when scraping. - ## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs - ## - insecureSkipVerify: true - - ## Enable scraping /metrics/probes from kubelet's service - ## - probes: true - - ## Enable scraping /metrics/resource from kubelet's service - ## This is disabled by default because container metrics are already exposed by cAdvisor - ## - resource: false - # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource - resourcePath: "/metrics/resource/v1alpha1" - ## Configure the scrape interval for resource metrics. This is configured to the default Kubelet cAdvisor - ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored - ## if kubelet.serviceMonitor.interval is not empty. - resourceInterval: 10s - - ## Enable scraping /metrics/cadvisor from kubelet's service - ## - cAdvisor: true - ## Configure the scrape interval for cAdvisor. This is configured to the default Kubelet cAdvisor - ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored - ## if kubelet.serviceMonitor.interval is not empty. - cAdvisorInterval: 10s - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - cAdvisorMetricRelabelings: - # Drop less useful container CPU metrics. - - sourceLabels: [__name__] - action: drop - regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)' - # Drop less useful container / always zero filesystem metrics. - - sourceLabels: [__name__] - action: drop - regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)' - # Drop less useful / always zero container memory metrics. - - sourceLabels: [__name__] - action: drop - regex: 'container_memory_(mapped_file|swap)' - # Drop less useful container process metrics. - - sourceLabels: [__name__] - action: drop - regex: 'container_(file_descriptors|tasks_state|threads_max)' - # Drop container_memory_failures_total{scope="hierarchy"} metrics, - # we only need the container scope. - - sourceLabels: [__name__, scope] - action: drop - regex: 'container_memory_failures_total;hierarchy' - # Drop container_network_... metrics that match various interfaces that - # correspond to CNI and similar interfaces. This avoids capturing network - # metrics for host network containers. - - sourceLabels: [__name__, interface] - action: drop - regex: 'container_network_;(cali|cilium|cni|lxc|nodelocaldns|tunl)' - # Drop container spec metrics that overlap with kube-state-metrics. - - sourceLabels: [__name__] - action: drop - regex: 'container_spec' - # Drop cgroup metrics with no pod. - - sourceLabels: [id, pod] - action: drop - regex: '.+;' - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - probesMetricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - ## metrics_path is required to match upstream rules and charts - cAdvisorRelabelings: - - action: replace - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - probesRelabelings: - - action: replace - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - resourceRelabelings: - - action: replace - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: - # Reduce bucket cardinality of kubelet storage operations. - - action: drop - sourceLabels: [__name__, le] - regex: (csi_operations|storage_operation_duration)_seconds_bucket;(0.25|2.5|15|25|120|600)(\.0)? - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - ## metrics_path is required to match upstream rules and charts - relabelings: - - action: replace - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping the kube controller manager -## -kubeControllerManager: - enabled: true - - ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeControllerManager.endpoints only the port and targetPort are used - ## - service: - enabled: true - ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change - ## of default port in Kubernetes 1.22. - ## - port: null - targetPort: null - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - # selector: - # component: kube-controller-manager - - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## port: Name of the port the metrics will be scraped from - ## - port: http-metrics - - jobLabel: jobLabel - selector: {} - # matchLabels: - # component: kube-controller-manager - - ## Enable scraping kube-controller-manager over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. - ## If null or unset, the value is determined dynamically based on target Kubernetes version. - ## - https: null - - # Skip TLS certificate validation when scraping - insecureSkipVerify: null - - # Name of the server to use when validating TLS certificate - serverName: null - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping coreDns. Use either this or kubeDns -## -coreDns: - enabled: true - service: - enabled: true - port: 9153 - targetPort: 9153 - - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - # selector: - # k8s-app: kube-dns - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## port: Name of the port the metrics will be scraped from - ## - port: http-metrics - - jobLabel: jobLabel - selector: {} - # matchLabels: - # k8s-app: kube-dns - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping kubeDns. Use either this or coreDns -## -kubeDns: - enabled: false - service: - dnsmasq: - port: 10054 - targetPort: 10054 - skydns: - port: 10055 - targetPort: 10055 - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - # selector: - # k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - jobLabel: jobLabel - selector: {} - # matchLabels: - # k8s-app: kube-dns - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - dnsmasqMetricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - dnsmasqRelabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping etcd -## -kubeEtcd: - enabled: true - - ## If your etcd is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used - ## - service: - enabled: true - port: 2381 - targetPort: 2381 - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - # selector: - # component: etcd - - ## Configure secure access to the etcd cluster by loading a secret into prometheus and - ## specifying security configuration below. For example, with a secret named etcd-client-cert - ## - ## serviceMonitor: - ## scheme: https - ## insecureSkipVerify: false - ## serverName: localhost - ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client - ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - ## - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - scheme: http - insecureSkipVerify: false - serverName: "" - caFile: "" - certFile: "" - keyFile: "" - - ## port: Name of the port the metrics will be scraped from - ## - port: http-metrics - - jobLabel: jobLabel - selector: {} - # matchLabels: - # component: etcd - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping kube scheduler -## -kubeScheduler: - enabled: true - - ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeScheduler.endpoints only the port and targetPort are used - ## - service: - enabled: true - ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change - ## of default port in Kubernetes 1.23. - ## - port: null - targetPort: null - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - # selector: - # component: kube-scheduler - - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - ## Enable scraping kube-scheduler over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. - ## If null or unset, the value is determined dynamically based on target Kubernetes version. - ## - https: null - - ## port: Name of the port the metrics will be scraped from - ## - port: http-metrics - - jobLabel: jobLabel - selector: {} - # matchLabels: - # component: kube-scheduler - - ## Skip TLS certificate validation when scraping - insecureSkipVerify: null - - ## Name of the server to use when validating TLS certificate - serverName: null - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping kube proxy -## -kubeProxy: - enabled: true - - ## If your kube proxy is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - service: - enabled: true - port: 10249 - targetPort: 10249 - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - # selector: - # k8s-app: kube-proxy - - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## port: Name of the port the metrics will be scraped from - ## - port: http-metrics - - jobLabel: jobLabel - selector: {} - # matchLabels: - # k8s-app: kube-proxy - - ## Enable scraping kube-proxy over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## Additional labels - ## - additionalLabels: {} - # foo: bar - - ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor - targetLabels: [] - -## Component scraping kube state metrics -## -kubeStateMetrics: - enabled: true - -## Configuration for kube-state-metrics subchart -## -kube-state-metrics: - namespaceOverride: "" - rbac: - create: true - releaseLabel: true - prometheus: - monitor: - enabled: true - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. - ## - scrapeTimeout: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - # Keep labels from scraped data, overriding server-side labels - ## - honorLabels: true - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - selfMonitor: - enabled: false - -## Deploy node exporter as a daemonset to all nodes -## -nodeExporter: - enabled: true - operatingSystems: - linux: - enabled: true - aix: - enabled: true - darwin: - enabled: true - - ## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled - ## - forceDeployDashboards: false - -## Configuration for prometheus-node-exporter subchart -## -prometheus-node-exporter: - namespaceOverride: "" - podLabels: - ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards - ## - jobLabel: node-exporter - releaseLabel: true - extraArgs: - - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) - - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ - service: - portName: http-metrics - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - labels: - jobLabel: node-exporter - - prometheus: - monitor: - enabled: true - - jobLabel: jobLabel - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. - ## - scrapeTimeout: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - sourceLabels: [__name__] - # separator: ; - # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ - # replacement: $1 - # action: drop - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above. - ## - # attachMetadata: - # node: false - - rbac: - ## If true, create PSPs for node-exporter - ## - pspEnabled: false - -## Manages Prometheus and Alertmanager components -## -prometheusOperator: - enabled: true - - fullnameOverride: "" - - ## Number of old replicasets to retain ## - ## The default value is 10, 0 will garbage-collect old replicasets ## - revisionHistoryLimit: 10 - - ## Strategy of the deployment - ## - strategy: {} - - ## Prometheus-Operator v0.39.0 and later support TLS natively. - ## - tls: - enabled: true - # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants - tlsMinVersion: VersionTLS13 - # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. - internalPort: 10250 - - ## Liveness probe for the prometheusOperator deployment - ## - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 0 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - ## Readiness probe for the prometheusOperator deployment - ## - readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 0 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - - ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted - ## rules from making their way into prometheus and potentially preventing the container from starting - admissionWebhooks: - ## Valid values: Fail, Ignore, IgnoreOnInstallOnly - ## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail" - failurePolicy: "" - ## The default timeoutSeconds is 10 and the maximum value is 30. - timeoutSeconds: 10 - enabled: true - ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. - ## If unspecified, system trust roots on the apiserver are used. - caBundle: "" - ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. - ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own - ## certs ahead of time if you wish. - ## - annotations: {} - # argocd.argoproj.io/hook: PreSync - # argocd.argoproj.io/hook-delete-policy: HookSucceeded - - namespaceSelector: {} - objectSelector: {} - - mutatingWebhookConfiguration: - annotations: {} - # argocd.argoproj.io/hook: PreSync - - validatingWebhookConfiguration: - annotations: {} - # argocd.argoproj.io/hook: PreSync - - deployment: - enabled: false - - ## Number of replicas - ## - replicas: 1 - - ## Strategy of the deployment - ## - strategy: {} - - # Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - podDisruptionBudget: {} - # maxUnavailable: 1 - # minAvailable: 1 - - ## Number of old replicasets to retain ## - ## The default value is 10, 0 will garbage-collect old replicasets ## - revisionHistoryLimit: 10 - - ## Prometheus-Operator v0.39.0 and later support TLS natively. - ## - tls: - enabled: true - # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants - tlsMinVersion: VersionTLS13 - # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. - internalPort: 10250 - - ## Service account for Prometheus Operator Webhook to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - annotations: {} - automountServiceAccountToken: false - create: true - name: "" - - ## Configuration for Prometheus operator Webhook service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 31080 - - nodePortTls: 31443 - - ## Additional ports to open for Prometheus operator Webhook service - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - ## - additionalPorts: [] - - ## Loadbalancer IP - ## Only use if service.type is "LoadBalancer" - ## - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## NodePort, ClusterIP, LoadBalancer - ## - type: ClusterIP - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - # ## Labels to add to the operator webhook deployment - # ## - labels: {} - - ## Annotations to add to the operator webhook deployment - ## - annotations: {} - - ## Labels to add to the operator webhook pod - ## - podLabels: {} - - ## Annotations to add to the operator webhook pod - ## - podAnnotations: {} - - ## Assign a PriorityClassName to pods if set - # priorityClassName: "" - - ## Define Log Format - # Use logfmt (default) or json logging - # logFormat: logfmt - - ## Decrease log verbosity to errors only - # logLevel: error - - ## Prometheus-operator webhook image - ## - image: - registry: quay.io - repository: prometheus-operator/admission-webhook - # if not set appVersion field from Chart.yaml is used - tag: "" - sha: "" - pullPolicy: IfNotPresent - - ## Define Log Format - # Use logfmt (default) or json logging - # logFormat: logfmt - - ## Decrease log verbosity to errors only - # logLevel: error - - - ## Liveness probe - ## - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - - ## Readiness probe - ## - readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - - ## Resource limits & requests - ## - resources: {} - # limits: - # cpu: 200m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - ## - hostNetwork: false - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Assign custom affinity rules to the prometheus operator - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - securityContext: - fsGroup: 65534 - runAsGroup: 65534 - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault - - ## Container-specific security context configuration - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - containerSecurityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - - ## If false then the user will opt out of automounting API credentials. - ## - automountServiceAccountToken: true - - patch: - enabled: true - image: - registry: registry.k8s.io - repository: ingress-nginx/kube-webhook-certgen - tag: v1.5.1 # latest tag: https://github.com/kubernetes/ingress-nginx/blob/main/images/kube-webhook-certgen/TAG - sha: "" - pullPolicy: IfNotPresent - resources: {} - ## Provide a priority class name to the webhook patching job - ## - priorityClassName: "" - ttlSecondsAfterFinished: 60 - annotations: {} - # argocd.argoproj.io/hook: PreSync - # argocd.argoproj.io/hook-delete-policy: HookSucceeded - podAnnotations: {} - nodeSelector: {} - affinity: {} - tolerations: [] - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - runAsGroup: 2000 - runAsNonRoot: true - runAsUser: 2000 - seccompProfile: - type: RuntimeDefault - ## Service account for Prometheus Operator Webhook Job Patch to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - annotations: {} - automountServiceAccountToken: true - - # Security context for create job container - createSecretJob: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - - # Security context for patch job container - patchWebhookJob: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - - # Use certmanager to generate webhook certs - certManager: - enabled: false - # self-signed root certificate - rootCert: - duration: "" # default to be 5y - admissionCert: - duration: "" # default to be 1y - # issuerRef: - # name: "issuer" - # kind: "ClusterIssuer" - - ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). - ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration - ## - namespaces: {} - # releaseNamespace: true - # additional: - # - kube-system - - ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). - ## - denyNamespaces: [] - - ## Filter namespaces to look for prometheus-operator custom resources - ## - alertmanagerInstanceNamespaces: [] - alertmanagerConfigNamespaces: [] - prometheusInstanceNamespaces: [] - thanosRulerInstanceNamespaces: [] - - ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. - ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) - ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 - ## - # clusterDomain: "cluster.local" - - networkPolicy: - ## Enable creation of NetworkPolicy resources. - ## - enabled: false - - ## Flavor of the network policy to use. - # Can be: - # * kubernetes for networking.k8s.io/v1/NetworkPolicy - # * cilium for cilium.io/v2/CiliumNetworkPolicy - flavor: kubernetes - - # cilium: - # egress: - - ## match labels used in selector - # matchLabels: {} - - ## Service account for Prometheus Operator to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - automountServiceAccountToken: true - annotations: {} - - # -- terminationGracePeriodSeconds for container lifecycle hook - terminationGracePeriodSeconds: 30 - # -- Specify lifecycle hooks for the controller - lifecycle: {} - ## Configuration for Prometheus operator service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30080 - - nodePortTls: 30443 - - ## Additional ports to open for Prometheus operator service - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - ## - additionalPorts: [] - - ## Loadbalancer IP - ## Only use if service.type is "LoadBalancer" - ## - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## NodePort, ClusterIP, LoadBalancer - ## - type: ClusterIP - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - # ## Labels to add to the operator deployment - # ## - labels: {} - - ## Annotations to add to the operator deployment - ## - annotations: {} - - ## Labels to add to the operator pod - ## - podLabels: {} - - ## Annotations to add to the operator pod - ## - podAnnotations: {} - - ## Assign a PriorityClassName to pods if set - # priorityClassName: "" - - ## Define Log Format - # Use logfmt (default) or json logging - # logFormat: logfmt - - ## Decrease log verbosity to errors only - # logLevel: error - - kubeletService: - ## If true, the operator will create and maintain a service for scraping kubelets - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md - ## - enabled: true - namespace: kube-system - selector: "" - name: "" - - ## Create Endpoints objects for kubelet targets. - kubeletEndpointsEnabled: true - ## Create EndpointSlice objects for kubelet targets. - kubeletEndpointSliceEnabled: false - - ## Extra arguments to pass to prometheusOperator - # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/operator.md - extraArgs: [] - # - --labels="cluster=talos-cluster" - - ## Create a servicemonitor for the operator - ## - serviceMonitor: - ## If true, create a serviceMonitor for prometheus operator - ## - selfMonitor: true - - ## Labels for ServiceMonitor - additionalLabels: {} - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. - scrapeTimeout: "" - - ## Metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Resource limits & requests - ## - resources: {} - # limits: - # cpu: 200m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - - ## Operator Environment - ## env: - ## VARIABLE: value - env: - GOGC: "30" - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - ## - hostNetwork: false - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Assign custom affinity rules to the prometheus operator - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - securityContext: - fsGroup: 65534 - runAsGroup: 65534 - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault - - ## Container-specific security context configuration - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - containerSecurityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - - # Enable vertical pod autoscaler support for prometheus-operator - verticalPodAutoscaler: - enabled: false - - # Recommender responsible for generating recommendation for the object. - # List should be empty (then the default recommender will generate the recommendation) - # or contain exactly one recommender. - # recommenders: - # - name: custom-recommender-performance - - # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory - controlledResources: [] - # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. - # controlledValues: RequestsAndLimits - - # Define the max allowed resources for the pod - maxAllowed: {} - # cpu: 200m - # memory: 100Mi - # Define the min allowed resources for the pod - minAllowed: {} - # cpu: 200m - # memory: 100Mi - - updatePolicy: - # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction - # minReplicas: 1 - # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates - # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". - updateMode: Auto - - ## Prometheus-operator image - ## - image: - registry: quay.io - repository: prometheus-operator/prometheus-operator - # if not set appVersion field from Chart.yaml is used - tag: "" - sha: "" - pullPolicy: IfNotPresent - - ## Prometheus image to use for prometheuses managed by the operator - ## - # prometheusDefaultBaseImage: prometheus/prometheus - - ## Prometheus image registry to use for prometheuses managed by the operator - ## - # prometheusDefaultBaseImageRegistry: quay.io - - ## Alertmanager image to use for alertmanagers managed by the operator - ## - # alertmanagerDefaultBaseImage: prometheus/alertmanager - - ## Alertmanager image registry to use for alertmanagers managed by the operator - ## - # alertmanagerDefaultBaseImageRegistry: quay.io - - ## Prometheus-config-reloader - ## - prometheusConfigReloader: - image: - registry: quay.io - repository: prometheus-operator/prometheus-config-reloader - # if not set appVersion field from Chart.yaml is used - tag: "" - sha: "" - - # add prometheus config reloader liveness and readiness probe. Default: false - enableProbe: false - - # resource config for prometheusConfigReloader - resources: {} - # requests: - # cpu: 200m - # memory: 50Mi - # limits: - # cpu: 200m - # memory: 50Mi - - ## Thanos side-car image when configured - ## - thanosImage: - registry: quay.io - repository: thanos/thanos - tag: v0.37.2 - sha: "" - - ## Set a Label Selector to filter watched prometheus and prometheusAgent - ## - prometheusInstanceSelector: "" - - ## Set a Label Selector to filter watched alertmanager - ## - alertmanagerInstanceSelector: "" - - ## Set a Label Selector to filter watched thanosRuler - thanosRulerInstanceSelector: "" - - ## Set a Field Selector to filter watched secrets - ## - secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1" - - ## If false then the user will opt out of automounting API credentials. - ## - automountServiceAccountToken: true - - ## Additional volumes - ## - extraVolumes: [] - - ## Additional volume mounts - ## - extraVolumeMounts: [] - -## Deploy a Prometheus instance -## -prometheus: - enabled: true - - ## Toggle prometheus into agent mode - ## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/designs/prometheus-agent.md - ## - agentMode: false - - ## Annotations for Prometheus - ## - annotations: {} - - ## Configure network policy for the prometheus - networkPolicy: - enabled: false - - ## Flavor of the network policy to use. - # Can be: - # * kubernetes for networking.k8s.io/v1/NetworkPolicy - # * cilium for cilium.io/v2/CiliumNetworkPolicy - flavor: kubernetes - - # cilium: - # endpointSelector: - # egress: - # ingress: - - # egress: - # - {} - # ingress: - # - {} - # podSelector: - # matchLabels: - # app: prometheus - - ## Service account for Prometheuses to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - annotations: {} - automountServiceAccountToken: true - - # Service for thanos service discovery on sidecar - # Enable this can make Thanos Query can use - # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery - # Thanos sidecar on prometheus nodes - # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!) - thanosService: - enabled: false - annotations: {} - labels: {} - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: ClusterIP - - ## Service dual stack - ## - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - - ## gRPC port config - portName: grpc - port: 10901 - targetPort: "grpc" - - ## HTTP port config (for metrics) - httpPortName: http - httpPort: 10902 - targetHttpPort: "http" - - ## ClusterIP to assign - # Default is to make this a headless service ("None") - clusterIP: "None" - - ## Port to expose on each node, if service type is NodePort - ## - nodePort: 30901 - httpNodePort: 30902 - - # ServiceMonitor to scrape Sidecar metrics - # Needs thanosService to be enabled as well - thanosServiceMonitor: - enabled: false - interval: "" - - ## Additional labels - ## - additionalLabels: {} - - ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig - tlsConfig: {} - - bearerTokenFile: - - ## Metric relabel configs to apply to samples before ingestion. - metricRelabelings: [] - - ## relabel configs to apply to samples before ingestion. - relabelings: [] - - # Service for external access to sidecar - # Enabling this creates a service to expose thanos-sidecar outside the cluster. - thanosServiceExternal: - enabled: false - annotations: {} - labels: {} - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## gRPC port config - portName: grpc - port: 10901 - targetPort: "grpc" - - ## HTTP port config (for metrics) - httpPortName: http - httpPort: 10902 - targetHttpPort: "http" - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: LoadBalancer - - ## Port to expose on each node - ## - nodePort: 30901 - httpNodePort: 30902 - - ## Configuration for Prometheus service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - - ## Port for Prometheus Service to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## Port for Prometheus Reloader to listen on - ## - reloaderWebPort: 8080 - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30090 - - ## Loadbalancer IP - ## Only use if service.type is "LoadBalancer" - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: ClusterIP - - ## Additional ports to open for Prometheus service - ## - additionalPorts: [] - # additionalPorts: - # - name: oauth-proxy - # port: 8081 - # targetPort: 8081 - # - name: oauth-metrics - # port: 8082 - # targetPort: 8082 - - ## Consider that all endpoints are considered "ready" even if the Pods themselves are not - ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec - publishNotReadyAddresses: false - - ## If you want to make sure that connections from a particular client are passed to the same Pod each time - ## Accepts 'ClientIP' or 'None' - ## - sessionAffinity: None - - ## If you want to modify the ClientIP sessionAffinity timeout - ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP" - ## - sessionAffinityConfig: - clientIP: - timeoutSeconds: 10800 - - ## Configuration for creating a separate Service for each statefulset Prometheus replica - ## - servicePerReplica: - enabled: false - annotations: {} - - ## Port for Prometheus Service per replica to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## Port to expose on each node - ## Only used if servicePerReplica.type is 'NodePort' - ## - nodePort: 30091 - - ## Loadbalancer source IP ranges - ## Only used if servicePerReplica.type is "LoadBalancer" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: ClusterIP - - ## Service dual stack - ## - ipDualStack: - enabled: false - ipFamilies: ["IPv6", "IPv4"] - ipFamilyPolicy: "PreferDualStack" - - ## Configure pod disruption budgets for Prometheus - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - # Ingress exposes thanos sidecar outside the cluster - thanosIngress: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - labels: {} - servicePort: 10901 - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30901 - - ## Hosts must be provided if Ingress is enabled. - ## - hosts: [] - # - thanos-gateway.domain.com - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## TLS configuration for Thanos Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: thanos-gateway-tls - # hosts: - # - thanos-gateway.domain.com - # - - ## ExtraSecret can be used to store various data in an extra secret - ## (use it for example to store hashed basic auth credentials) - extraSecret: - ## if not set, name will be auto generated - # name: "" - annotations: {} - data: {} - # auth: | - # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 - # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. - - ingress: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - labels: {} - - ## Redirect ingress to an additional defined port on the service - # servicePort: 8081 - - ## Hostnames. - ## Must be provided if Ingress is enabled. - ## - # hosts: - # - prometheus.domain.com - hosts: [] - - ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## TLS configuration for Prometheus Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-general-tls - # hosts: - # - prometheus.example.com - - # -- BETA: Configure the gateway routes for the chart here. - # More routes can be added by adding a dictionary key like the 'main' route. - # Be aware that this is an early beta of this feature, - # kube-prometheus-stack does not guarantee this works and is subject to change. - # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk - # [[ref]](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1alpha2) - route: - main: - # -- Enables or disables the route - enabled: false - - # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2 - apiVersion: gateway.networking.k8s.io/v1 - # -- Set the route kind - # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute - kind: HTTPRoute - - annotations: {} - labels: {} - - hostnames: [] - # - my-filter.example.com - parentRefs: [] - # - name: acme-gw - - matches: - - path: - type: PathPrefix - value: / - - ## Filters define the filters that are applied to requests that match this rule. - filters: [] - - ## Additional custom rules that can be added to the route - additionalRules: [] - - ## Configuration for creating an Ingress that will map to each Prometheus replica service - ## prometheus.servicePerReplica must be enabled - ## - ingressPerReplica: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - labels: {} - - ## Final form of the hostname for each per replica ingress is - ## - ## Prefix for the per replica ingress that will have `-$replicaNumber` - ## appended to the end - hostPrefix: "" - ## Domain that will be used for the per replica ingress - hostDomain: "" - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## Secret name containing the TLS certificate for Prometheus per replica ingress - ## Secret must be manually created in the namespace - tlsSecretName: "" - - ## Separated secret for each per replica Ingress. Can be used together with cert-manager - ## - tlsSecretPerReplica: - enabled: false - ## Final form of the secret for each per replica ingress is - ## - prefix: "prometheus" - - ## Configure additional options for default pod security policy for Prometheus - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - podSecurityPolicy: - allowedCapabilities: [] - allowedHostPaths: [] - volumes: [] - - serviceMonitor: - ## If true, create a serviceMonitor for prometheus - ## - selfMonitor: true - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Additional labels - ## - additionalLabels: {} - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 - - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig - tlsConfig: {} - - bearerTokenFile: - - ## Metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^()$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Additional Endpoints - ## - additionalEndpoints: [] - # - port: oauth-metrics - # path: /metrics - - ## Settings affecting prometheusSpec - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec - ## - prometheusSpec: - ## Statefulset's persistent volume claim retention policy - ## whenDeleted and whenScaled determine whether - ## statefulset's PVCs are deleted (true) or retained (false) - ## on scaling down and deleting statefulset, respectively. - ## Requires Kubernetes version 1.27.0+. - ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - persistentVolumeClaimRetentionPolicy: {} - # whenDeleted: Retain - # whenScaled: Retain - - ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos - ## - ## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod, - ## If the field isn’t set, the operator mounts the service account token by default. - ## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery, - ## It is possible to use strategic merge patch to project the service account token into the ‘prometheus’ container. - automountServiceAccountToken: true - - disableCompaction: false - ## APIServerConfig - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig - ## - apiserverConfig: {} - - ## Allows setting additional arguments for the Prometheus container - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus - additionalArgs: [] - - ## Interval between consecutive scrapes. - ## Defaults to 30s. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 - ## - scrapeInterval: "" - - ## Number of seconds to wait for target to respond before erroring - ## - scrapeTimeout: "" - - ## List of scrape classes to expose to scraping objects such as - ## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs. - ## - scrapeClasses: [] - # - name: istio-mtls - # default: false - # tlsConfig: - # caFile: /etc/prometheus/secrets/istio.default/root-cert.pem - # certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem - - ## Interval between consecutive evaluations. - ## - evaluationInterval: "" - - ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. - ## - listenLocal: false - - ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. - ## This is disabled by default. - ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis - ## - enableAdminAPI: false - - ## Sets version of Prometheus overriding the Prometheus version as derived - ## from the image tag. Useful in cases where the tag does not follow semver v2. - version: "" - - ## WebTLSConfig defines the TLS parameters for HTTPS - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig - web: {} - - ## Exemplars related settings that are runtime reloadable. - ## It requires to enable the exemplar storage feature to be effective. - exemplars: {} - ## Maximum number of exemplars stored in memory for all series. - ## If not set, Prometheus uses its default value. - ## A value of zero or less than zero disables the storage. - # maxSize: 100000 - - # EnableFeatures API enables access to Prometheus disabled features. - # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ - enableFeatures: [] - # - exemplar-storage - - ## Image of Prometheus. - ## - image: - registry: quay.io - repository: prometheus/prometheus - tag: v3.1.0 - sha: "" - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## If specified, the pod's topology spread constraints. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app: prometheus - - ## Alertmanagers to which alerts will be sent - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints - ## - ## Default configuration will connect to the alertmanager deployed as part of this release - ## - alertingEndpoints: [] - # - name: "" - # namespace: "" - # port: http - # scheme: http - # pathPrefix: "" - # tlsConfig: {} - # bearerTokenFile: "" - # apiVersion: v2 - - ## External labels to add to any time series or alerts when communicating with external systems - ## - externalLabels: {} - - ## enable --web.enable-remote-write-receiver flag on prometheus-server - ## - enableRemoteWriteReceiver: false - - ## Name of the external label used to denote replica name - ## - replicaExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote replica name - ## - replicaExternalLabelNameClear: false - - ## Name of the external label used to denote Prometheus instance name - ## - prometheusExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote Prometheus instance name - ## - prometheusExternalLabelNameClear: false - - ## External URL at which Prometheus will be reachable. - ## - externalUrl: "" - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not - ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated - ## with the new list of secrets. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. - ## - configMaps: [] - - ## QuerySpec defines the query command line flags when starting Prometheus. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec - ## - query: {} - - ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery. - ruleNamespaceSelector: {} - ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel" - # ruleNamespaceSelector: - # matchLabels: - # prometheus: somelabel - - ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the PrometheusRule resources created - ## - ruleSelectorNilUsesHelmValues: true - - ## PrometheusRules to be selected for target discovery. - ## If {}, select all PrometheusRules - ## - ruleSelector: {} - ## Example which select all PrometheusRules resources - ## with label "prometheus" with values any of "example-rules" or "example-rules-2" - # ruleSelector: - # matchExpressions: - # - key: prometheus - # operator: In - # values: - # - example-rules - # - example-rules-2 - # - ## Example which select all PrometheusRules resources with label "role" set to "example-rules" - # ruleSelector: - # matchLabels: - # role: example-rules - - ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the servicemonitors created - ## - serviceMonitorSelectorNilUsesHelmValues: true - - ## ServiceMonitors to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - serviceMonitorSelector: {} - ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" - # serviceMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for ServiceMonitor discovery. - ## - serviceMonitorNamespaceSelector: {} - ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" - # serviceMonitorNamespaceSelector: - # matchLabels: - # prometheus: somelabel - - ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the podmonitors created - ## - podMonitorSelectorNilUsesHelmValues: true - - ## PodMonitors to be selected for target discovery. - ## If {}, select all PodMonitors - ## - podMonitorSelector: {} - ## Example which selects PodMonitors with label "prometheus" set to "somelabel" - # podMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery. - podMonitorNamespaceSelector: {} - ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel" - # podMonitorNamespaceSelector: - # matchLabels: - # prometheus: somelabel - - ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the probes created - ## - probeSelectorNilUsesHelmValues: true - - ## Probes to be selected for target discovery. - ## If {}, select all Probes - ## - probeSelector: {} - ## Example which selects Probes with label "prometheus" set to "somelabel" - # probeSelector: - # matchLabels: - # prometheus: somelabel - - ## If nil, select own namespace. Namespaces to be selected for Probe discovery. - probeNamespaceSelector: {} - ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel" - # probeNamespaceSelector: - # matchLabels: - # prometheus: somelabel - - ## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the scrapeConfigs created - ## - ## If null and scrapeConfigSelector is also null, exclude field from the prometheusSpec - ## (keeping downward compatibility with older versions of CRD) - ## - scrapeConfigSelectorNilUsesHelmValues: true - - ## scrapeConfigs to be selected for target discovery. - ## If {}, select all scrapeConfigs - ## - scrapeConfigSelector: {} - ## Example which selects scrapeConfigs with label "prometheus" set to "somelabel" - # scrapeConfigSelector: - # matchLabels: - # prometheus: somelabel - - ## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery. - ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) - scrapeConfigNamespaceSelector: {} - ## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel" - # scrapeConfigNamespaceSelector: - # matchLabels: - # prometheus: somelabel - - ## How long to retain metrics - ## - retention: 10d - - ## Maximum size of metrics - ## - retentionSize: "" - - ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration - ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb - tsdb: - outOfOrderTimeWindow: 0s - - ## Enable compression of the write-ahead log using Snappy. - ## - walCompression: true - - ## If true, the Operator won't process any Prometheus configuration changes - ## - paused: false - - ## Number of replicas of each shard to deploy for a Prometheus deployment. - ## Number of replicas multiplied by shards is the total number of Pods created. - ## - replicas: 1 - - ## EXPERIMENTAL: Number of shards to distribute targets onto. - ## Number of replicas multiplied by shards is the total number of Pods created. - ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. - ## Increasing shards will not reshard data either but it will continue to be available from the same instances. - ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. - ## Sharding is done on the content of the `__address__` target meta-label. - ## - shards: 1 - - ## Log level for Prometheus be configured in - ## - logLevel: info - - ## Log format for Prometheus be configured in - ## - logFormat: logfmt - - ## Prefix used to register routes, overriding externalUrl route. - ## Useful for proxies that rewrite URLs. - ## - routePrefix: / - - ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the prometheus pods. - ## - podMetadata: {} - # labels: - # app: prometheus - # k8s-app: prometheus - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - podAntiAffinity: "soft" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the prometheus instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## The remote_read spec configuration for Prometheus. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec - remoteRead: [] - # - url: http://remote1/read - ## additionalRemoteRead is appended to remoteRead - additionalRemoteRead: [] - - ## The remote_write spec configuration for Prometheus. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec - remoteWrite: [] - # - url: http://remote1/push - ## additionalRemoteWrite is appended to remoteWrite - additionalRemoteWrite: [] - - ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature - remoteWriteDashboards: false - - ## Resource limits & requests - ## - resources: {} - # requests: - # memory: 400Mi - - ## Prometheus StorageSpec for persistent data - storageSpec: - ## Using PersistentVolumeClaim - ## - volumeClaimTemplate: - spec: - storageClassName: {{ .Values.globals.kubePrometheusStack.storageClass }} - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: {{ .Values.globals.kubePrometheusStack.prometheus.storageSize }} - selector: {} - - ## Using tmpfs volume - ## - # emptyDir: - # medium: Memory - - # Additional volumes on the output StatefulSet definition. - volumes: [] - - # Additional VolumeMounts on the output StatefulSet definition. - volumeMounts: [] - - ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations - ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form - ## as specified in the official Prometheus documentation: - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are - ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility - ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible - ## scrape configs are going to break Prometheus after the upgrade. - ## AdditionalScrapeConfigs can be defined as a list or as a templated string. - ## - ## The scrape configuration example below will find master nodes, provided they have the name mst, relabel the - ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes - ## - additionalScrapeConfigs: [] - # - job_name: kube-etcd - # kubernetes_sd_configs: - # - role: node - # scheme: https - # tls_config: - # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client - # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - # relabel_configs: - # - action: labelmap - # regex: __meta_kubernetes_node_label_(.+) - # - source_labels: [__address__] - # action: replace - # targetLabel: __address__ - # regex: ([^:;]+):(\d+) - # replacement: ${1}:2379 - # - source_labels: [__meta_kubernetes_node_name] - # action: keep - # regex: mst - # - source_labels: [__meta_kubernetes_node_name] - # action: replace - # targetLabel: node - # regex: () - # replacement: ${1} - # metric_relabel_configs: - # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) - # action: labeldrop - # - ## If scrape config contains a repetitive section, you may want to use a template. - ## In the following example, you can see how to define `gce_sd_configs` for multiple zones - # additionalScrapeConfigs: | - # - job_name: "node-exporter" - # gce_sd_configs: - # relabel_configs: - # ... - - - ## If additional scrape configurations are already deployed in a single secret file you can use this section. - ## Expected values are the secret name and key - ## Cannot be used with additionalScrapeConfigs - additionalScrapeConfigsSecret: {} - # enabled: false - # name: - # key: - - ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful - ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' - additionalPrometheusSecretsAnnotations: {} - - ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified - ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. - ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. - ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this - ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release - ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. - ## - additionalAlertManagerConfigs: [] - # - consul_sd_configs: - # - server: consul.dev.test:8500 - # scheme: http - # datacenter: dev - # tag_separator: ',' - # services: - # - metrics-prometheus-alertmanager - - ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage - ## them separately from the helm deployment, you can use this section. - ## Expected values are the secret name and key - ## Cannot be used with additionalAlertManagerConfigs - additionalAlertManagerConfigsSecret: {} - # name: - # key: - # optional: false - - ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended - ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the - ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the - ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel - ## configs are going to break Prometheus after the upgrade. - ## - additionalAlertRelabelConfigs: [] - # - separator: ; - # regex: prometheus_replica - # replacement: $1 - # action: labeldrop - - ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage - ## them separately from the helm deployment, you can use this section. - ## Expected values are the secret name and key - ## Cannot be used with additionalAlertRelabelConfigs - additionalAlertRelabelConfigsSecret: {} - # name: - # key: - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md - ## - securityContext: - runAsGroup: 2000 - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - seccompProfile: - type: RuntimeDefault - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. - ## This section is experimental, it may change significantly without deprecation notice in any release. - ## This is experimental and may change significantly without backward compatibility in any release. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec - ## - thanos: {} - # secretProviderClass: - # provider: gcp - # parameters: - # secrets: | - # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest" - # fileName: "objstore.yaml" - ## ObjectStorageConfig configures object storage in Thanos. - # objectStorageConfig: - # # use existing secret, if configured, objectStorageConfig.secret will not be used - # existingSecret: {} - # # name: "" - # # key: "" - # # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource, - # # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set - # # https://thanos.io/tip/thanos/storage.md/#s3 - # secret: {} - # # type: S3 - # # config: - # # bucket: "" - # # endpoint: "" - # # region: "" - # # access_key: "" - # # secret_key: "" - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. - ## if using proxy extraContainer update targetPort with proxy container port - containers: [] - # containers: - # - name: oauth-proxy - # image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1 - # args: - # - --upstream=http://127.0.0.1:9090 - # - --http-address=0.0.0.0:8081 - # - --metrics-address=0.0.0.0:8082 - # - ... - # ports: - # - containerPort: 8081 - # name: oauth-proxy - # protocol: TCP - # - containerPort: 8082 - # name: oauth-metrics - # protocol: TCP - # resources: {} - - ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes - ## (permissions, dir tree) on mounted volumes before starting prometheus - initContainers: [] - - ## PortName to use for Prometheus. - ## - portName: "http-web" - - ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files - ## on the file system of the Prometheus container e.g. bearer token files. - arbitraryFSAccessThroughSMs: false - - ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor - ## or PodMonitor to true, this overrides honor_labels to false. - overrideHonorLabels: false - - ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. - overrideHonorTimestamps: false - - ## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored, - ## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object, - ## and servicemonitors will be installed in the default service namespace. - ## Defaults to false. - ignoreNamespaceSelectors: false - - ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. - ## The label value will always be the namespace of the object that is being created. - ## Disabled by default - enforcedNamespaceLabel: "" - - ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. - ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair - ## Deprecated, use `excludedFromEnforcement` instead - prometheusRulesExcludedFromEnforce: [] - - ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects - ## to be excluded from enforcing a namespace label of origin. - ## Works only if enforcedNamespaceLabel set to true. - ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference - excludedFromEnforcement: [] - - ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, - ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such - ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions - ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) - queryLogFile: false - - # Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor. - # Set to 'false' to disable global sample_limit. or set to a number to override the default value. - sampleLimit: false - - # EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory. - # The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets - # is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit. - enforcedKeepDroppedTargets: 0 - - ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit - ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall - ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. - enforcedSampleLimit: false - - ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set - ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall - ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except - ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced. - enforcedTargetLimit: false - - - ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present - ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions - ## 2.27.0 and newer. - enforcedLabelLimit: false - - ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number - ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions - ## 2.27.0 and newer. - enforcedLabelNameLengthLimit: false - - ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this - ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus - ## versions 2.27.0 and newer. - enforcedLabelValueLengthLimit: false - - ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental - ## in Prometheus so it may change in any upcoming release. - allowOverlappingBlocks: false - - ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to - ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). - minReadySeconds: 0 - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it. - # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically. - hostNetwork: false - - # HostAlias holds the mapping between IP and hostnames that will be injected - # as an entry in the pod’s hosts file. - hostAliases: [] - # - ip: 10.10.0.100 - # hostnames: - # - a1.app.local - # - b1.app.local - - ## TracingConfig configures tracing in Prometheus. - ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheustracingconfig - tracingConfig: {} - - ## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints. - ## If set, the value should be either “Endpoints” or “EndpointSlice”. If unset, the operator assumes the “Endpoints” role. - serviceDiscoveryRole: "" - - ## Additional configuration which is not covered by the properties above. (passed through tpl) - additionalConfig: {} - - ## Additional configuration which is not covered by the properties above. - ## Useful, if you need advanced templating inside alertmanagerSpec. - ## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl) - additionalConfigString: "" - - ## Defines the maximum time that the `prometheus` container's startup probe - ## will wait before being considered failed. The startup probe will return - ## success after the WAL replay is complete. If set, the value should be - ## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15 - ## minutes). - maximumStartupDurationSeconds: 0 - - additionalRulesForClusterRole: [] - # - apiGroups: [ "" ] - # resources: - # - nodes/proxy - # verbs: [ "get", "list", "watch" ] - - additionalServiceMonitors: [] - ## Name of the ServiceMonitor to create - ## - # - name: "" - - ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from - ## the chart - ## - # additionalLabels: {} - - ## Service label for use in assembling a job name of the form