feat: Enable monitoring on traefik
This commit is contained in:
parent
6fe09ecd38
commit
f8be48ead2
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,2 +1,4 @@
|
||||
.env
|
||||
secrets.yaml
|
||||
secrets.yml
|
||||
venv
|
||||
|
@ -1,6 +1,5 @@
|
||||
networks:
|
||||
traefik:
|
||||
driver: overlay
|
||||
attachable: true
|
||||
name: traefik
|
||||
|
||||
|
@ -1,83 +0,0 @@
|
||||
---
|
||||
repositories:
|
||||
- name: traefik
|
||||
url: https://helm.traefik.io/traefik
|
||||
- name: jetstack
|
||||
url: https://charts.jetstack.io
|
||||
- name: rancher-stable
|
||||
url: https://releases.rancher.com/server-charts/stable
|
||||
- name: longhorn
|
||||
url: https://charts.longhorn.io
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: nfs-subdir-external-provisioner
|
||||
url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
|
||||
- name: prometheus-community
|
||||
url: https://prometheus-community.github.io/helm-charts
|
||||
|
||||
environments:
|
||||
staging: &staging
|
||||
values:
|
||||
- ./values/globals/staging.yaml.gotmpl
|
||||
production:
|
||||
default: *staging
|
||||
|
||||
---
|
||||
releases:
|
||||
- name: traefik
|
||||
namespace: {{ .Values.globals.traefik.namespace }}
|
||||
createNamespace: true
|
||||
chart: traefik/traefik
|
||||
values:
|
||||
- ./values/traefik/values.yaml.gotmpl
|
||||
|
||||
- name: cert-manager
|
||||
namespace: {{ .Values.globals.certManager.namespace }}
|
||||
createNamespace: true
|
||||
chart: jetstack/cert-manager
|
||||
values:
|
||||
- ./values/cert-manager/values.yml
|
||||
|
||||
- name: certs
|
||||
chart: ./charts/certs
|
||||
needs:
|
||||
- {{ .Values.globals.certManager.namespace }}/cert-manager
|
||||
values:
|
||||
- ./values/certs/values.yaml.gotmpl
|
||||
|
||||
- name: nfs-subdir-external-provisioner
|
||||
namespace: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }}
|
||||
createNamespace: true
|
||||
chart: {{ .Values.globals.nfsSubdirExternalProvisioner.namespace }}/nfs-subdir-external-provisioner
|
||||
values:
|
||||
- ./values/nfs-subdir-external-provisioner/values.yaml.gotmpl
|
||||
|
||||
- name: rancher
|
||||
namespace: {{ .Values.globals.rancher.namespace }}
|
||||
createNamespace: true
|
||||
chart: rancher-stable/rancher
|
||||
needs:
|
||||
- {{ .Values.globals.certManager.namespace }}/cert-manager
|
||||
values:
|
||||
- ./values/rancher/values.yaml.gotmpl
|
||||
|
||||
- name: longhorn
|
||||
namespace: {{ .Values.globals.longhorn.namespace }}
|
||||
createNamespace: true
|
||||
chart: longhorn/longhorn
|
||||
values:
|
||||
- ./values/longhorn/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.rancher.namespace }}/rancher
|
||||
|
||||
|
||||
- name: kube-prometheus-stack
|
||||
namespace: {{ .Values.globals.kubePrometheusStack.namespace }}
|
||||
createNamespace: true
|
||||
chart: prometheus-community/kube-prometheus-stack
|
||||
needs:
|
||||
- {{ .Values.globals.certManager.namespace }}/cert-manager
|
||||
- {{ .Values.globals.longhorn.namespace }}/longhorn
|
||||
values:
|
||||
- ./values/kube-prometheus-stack/values.yaml.gotmpl
|
||||
|
@ -1,67 +0,0 @@
|
||||
---
|
||||
repositories:
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: runix
|
||||
url: https://helm.runix.net
|
||||
|
||||
environments:
|
||||
staging: &staging
|
||||
values:
|
||||
- ./values/globals/staging.yaml.gotmpl
|
||||
production:
|
||||
default: *staging
|
||||
|
||||
---
|
||||
releases:
|
||||
- name: mysql
|
||||
namespace: {{ .Values.globals.mysql.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/mysql
|
||||
values:
|
||||
- ./values/mysql/values.yaml.gotmpl
|
||||
|
||||
- name: phpmyadmin
|
||||
namespace: {{ .Values.globals.phpmyadmin.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/phpmyadmin
|
||||
values:
|
||||
- ./values/phpmyadmin/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.mysql.namespace }}/mysql
|
||||
|
||||
- name: postgres
|
||||
namespace: {{ .Values.globals.postgres.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/postgresql
|
||||
values:
|
||||
- ./values/postgres/values.yaml.gotmpl
|
||||
|
||||
- name: pgadmin4
|
||||
namespace: {{ .Values.globals.pgadmin4.namespace }}
|
||||
createNamespace: true
|
||||
chart: runix/pgadmin4
|
||||
values:
|
||||
- ./values/pgadmin4/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.postgres.namespace }}/postgres
|
||||
|
||||
- name: init-dbs
|
||||
# It doesn't really matter where we put this, but I don't want it polluting
|
||||
# the default namespace
|
||||
namespace: init-dbs
|
||||
createNamespace: true
|
||||
chart: ./charts/init-dbs
|
||||
values:
|
||||
- ./values/init-dbs/values.yaml.gotmpl
|
||||
needs:
|
||||
- {{ .Values.globals.postgres.namespace }}/postgres
|
||||
- {{ .Values.globals.mysql.namespace }}/mysql
|
||||
|
||||
- name: redis
|
||||
namespace: {{ .Values.globals.redis.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/redis
|
||||
values:
|
||||
- ./values/redis/values.yaml.gotmpl
|
||||
|
@ -1,64 +0,0 @@
|
||||
---
|
||||
repositories:
|
||||
- name: gitlab
|
||||
url: https://charts.gitlab.io
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: uptime-kuma
|
||||
url: https://helm.irsigler.cloud
|
||||
- name: authentik
|
||||
url: https://charts.goauthentik.io
|
||||
- name: harbor
|
||||
url: https://helm.goharbor.io
|
||||
|
||||
environments:
|
||||
staging: &staging
|
||||
values:
|
||||
- ./values/globals/staging.yaml.gotmpl
|
||||
production:
|
||||
default: *staging
|
||||
|
||||
---
|
||||
releases:
|
||||
- name: uptime-kuma
|
||||
namespace: {{ .Values.globals.uptimeKuma.namespace }}
|
||||
createNamespace: true
|
||||
chart: uptime-kuma/uptime-kuma
|
||||
values:
|
||||
- ./values/uptime-kuma/values.yaml.gotmpl
|
||||
|
||||
- name: authentik
|
||||
namespace: {{ .Values.globals.authentik.namespace }}
|
||||
createNamespace: true
|
||||
chart: authentik/authentik
|
||||
values:
|
||||
- ./values/authentik/values.yaml.gotmpl
|
||||
|
||||
- name: argo-cd
|
||||
namespace: {{ .Values.globals.argocd.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/argo-cd
|
||||
values:
|
||||
- ./values/argo-cd/values.yaml.gotmpl
|
||||
|
||||
- name: harbor
|
||||
namespace: {{ .Values.globals.harbor.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/harbor
|
||||
values:
|
||||
- ./values/harbor/values.yaml.gotmpl
|
||||
|
||||
- name: ghost
|
||||
namespace: {{ .Values.globals.ghost.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/ghost
|
||||
values:
|
||||
- ./values/ghost/values.yaml.gotmpl
|
||||
|
||||
- name: gitea
|
||||
installed: false
|
||||
namespace: {{ .Values.globals.gitea.namespace }}
|
||||
createNamespace: true
|
||||
chart: bitnami/gitea
|
||||
values:
|
||||
- ./values/gitea/values.yaml.gotmpl
|
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
@ -1,24 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: certs
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.16.0"
|
@ -1,62 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "certs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "certs.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "certs.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "certs.labels" -}}
|
||||
helm.sh/chart: {{ include "certs.chart" . }}
|
||||
{{ include "certs.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "certs.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "certs.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "certs.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "certs.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,17 +0,0 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ .Values.certificateName }}
|
||||
namespace: {{ .Values.certificateNamespace }}
|
||||
spec:
|
||||
secretName: {{ .Values.certificateSecretName }}
|
||||
issuerRef:
|
||||
name: {{ .Values.issuerName | quote }}
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.commonName | quote }}
|
||||
dnsNames:
|
||||
{{- range .Values.dnsNames }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
||||
|
@ -1,10 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.cloudflareTokenSecretName }}
|
||||
namespace: {{ .Values.certManagerNamespace }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
cloudflare-token: {{ .Values.cloudflareSecretToken }}
|
||||
|
@ -1,24 +0,0 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ .Values.issuerName }}
|
||||
spec:
|
||||
acme:
|
||||
server: {{- if eq .Values.issuerMode "staging" }} "https://acme-staging-v02.api.letsencrypt.org/directory" {{- else }} "https://acme-v02.api.letsencrypt.org/directory" {{- end }}
|
||||
email: {{ .Values.acmeEmail }}
|
||||
privateKeySecretRef:
|
||||
name: {{ .Values.privateKeySecretRef }}
|
||||
solvers:
|
||||
- dns01:
|
||||
cloudflare:
|
||||
email: {{ .Values.cloudflareEmail }}
|
||||
apiTokenSecretRef:
|
||||
name: {{ .Values.cloudflareTokenSecretName }}
|
||||
key: cloudflare-token
|
||||
selector:
|
||||
dnsZones:
|
||||
{{- range .Values.dnsZones }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
||||
|
@ -1,28 +0,0 @@
|
||||
acmeEmail: tonydu121@hotmail.com
|
||||
cloudflareEmail: tonydu121@hotmail.com
|
||||
|
||||
# staging or production
|
||||
issuerMode: staging
|
||||
|
||||
issuerName: letsencrypt
|
||||
privateKeySecretRef: letsencrypt
|
||||
|
||||
certManagerNamespace: cert-manager
|
||||
|
||||
cloudflareSecretToken: redacted
|
||||
cloudflareTokenSecretName: cloudflare-token-secret
|
||||
|
||||
dnsZones:
|
||||
- "mnke.org"
|
||||
- "*.mnke.org"
|
||||
- "*.hl.mnke.org"
|
||||
|
||||
# TODO: Allow for multiple creation
|
||||
certificateName: hl-mnke-org
|
||||
certificateNamespace: default
|
||||
certificateSecretName: hl-mnke-org-tls
|
||||
|
||||
commonName: "*.hl.mnke.org"
|
||||
dnsNames:
|
||||
- "hl.mnke.org"
|
||||
- "*.hl.mnke.org"
|
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
@ -1,24 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: init-dbs
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.16.0"
|
@ -1,51 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "init-dbs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "init-dbs.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "init-dbs.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "init-dbs.labels" -}}
|
||||
helm.sh/chart: {{ include "init-dbs.chart" . }}
|
||||
{{ include "init-dbs.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "init-dbs.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "init-dbs.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
@ -1,112 +0,0 @@
|
||||
{{- range .Values.postgres.databases }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
{{- toYaml $.Values.imagePullSecrets | nindent 8 }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: {{ include "init-dbs.fullname" $ }}-postgres-{{ .database }}
|
||||
image: {{ $.Values.postgres.image.ref }}
|
||||
imagePullPolicy: {{ $.Values.postgres.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
args:
|
||||
# If the username and database exists, whatever, just exit.
|
||||
# Yeah, if something else went wrong, we're still exiting with code 0,
|
||||
# but it should be fine.
|
||||
- |
|
||||
sleep 10s && \
|
||||
psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \
|
||||
-tc "SELECT 1 FROM pg_database WHERE datname = '$DATABASE'" |\
|
||||
grep -q 1 ||\
|
||||
psql -h {{ $.Values.postgres.host }} -U $PGUSER postgres \
|
||||
-c "CREATE USER $USERNAME WITH ENCRYPTED PASSWORD '$PASSWORD';" \
|
||||
-c "CREATE DATABASE $DATABASE WITH OWNER = $USERNAME;"
|
||||
env:
|
||||
- name: PGUSER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: username
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: PGPASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: password
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-username
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-password
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
- name: DATABASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-database
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
{{- end }}
|
||||
|
||||
{{- range .Values.mysql.databases }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
{{- toYaml $.Values.imagePullSecrets | nindent 8 }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: {{ include "init-dbs.fullname" $ }}-mysql-{{ .database }}
|
||||
image: {{ $.Values.mysql.image.ref }}
|
||||
imagePullPolicy: {{ $.Values.mysql.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
args:
|
||||
- |
|
||||
sleep 10s && \
|
||||
mysql -h {{ $.Values.mysql.host }} -u $MYUSER mysql --password=$MYPASSWORD \
|
||||
-e "CREATE DATABASE IF NOT EXISTS $DATABASE;" \
|
||||
-e "CREATE USER IF NOT EXISTS '$USERNAME'@'%' IDENTIFIED BY '$PASSWORD';" \
|
||||
-e "GRANT ALL PRIVILEGES ON $DATABASE TO '$USERNAME'@'%';"
|
||||
env:
|
||||
- name: MYUSER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: username
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: MYPASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: password
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-username
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-password
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
- name: DATABASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: {{ .database }}-database
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
{{- end }}
|
@ -1,33 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-postgres
|
||||
labels:
|
||||
{{- include "init-dbs.labels" $ | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
username: {{ .Values.postgres.username }}
|
||||
password: {{ .Values.postgres.password }}
|
||||
{{- range .Values.postgres.databases }}
|
||||
{{ .database }}-database: {{ .database }}
|
||||
{{ .database }}-username: {{ .username }}
|
||||
{{ .database }}-password: {{ .password }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "init-dbs.fullname" $ }}-mysql
|
||||
labels:
|
||||
{{- include "init-dbs.labels" $ | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
username: {{ .Values.mysql.username }}
|
||||
password: {{ .Values.mysql.password }}
|
||||
{{- range .Values.mysql.databases }}
|
||||
{{ .database }}-database: {{ .database }}
|
||||
{{ .database }}-username: {{ .username }}
|
||||
{{ .database }}-password: {{ .password }}
|
||||
{{- end }}
|
@ -1,36 +0,0 @@
|
||||
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
postgres:
|
||||
image:
|
||||
ref: postgres
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: ""
|
||||
username: postgres
|
||||
password: ""
|
||||
databases:
|
||||
- database: test
|
||||
username: test
|
||||
password: test
|
||||
- database: test1
|
||||
username: test1
|
||||
password: test1
|
||||
mysql:
|
||||
image:
|
||||
ref: mysql
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: ""
|
||||
username: root
|
||||
password: ""
|
||||
databases:
|
||||
- database: test
|
||||
username: test
|
||||
password: test
|
||||
- database: test1
|
||||
username: test1
|
||||
password: test1
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,11 +0,0 @@
|
||||
crds:
|
||||
enabled: true
|
||||
replicaCount: 3
|
||||
extraArgs:
|
||||
- --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53
|
||||
- --dns01-recursive-nameservers-only
|
||||
podDnsPolicy: None
|
||||
podDnsConfig:
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
- 9.9.9.9
|
@ -1,28 +0,0 @@
|
||||
acmeEmail: {{ .Values.globals.certs.acmeEmail }}
|
||||
cloudflareEmail: {{ .Values.globals.certs.cloudflareEmail }}
|
||||
|
||||
# staging or production
|
||||
issuerMode: {{ .Values.globals.certs.certIssuerMode }}
|
||||
|
||||
issuerName: {{ .Values.globals.certs.issuerName }}
|
||||
privateKeySecretRef: {{ .Values.globals.certs.privateKeySecretRef }}
|
||||
|
||||
certManagerNamespace: {{ .Values.globals.certManager.namespace }}
|
||||
|
||||
cloudflareSecretToken: {{ .Values.globals.certs.cloudflareSecretToken }}
|
||||
cloudflareTokenSecretName: {{ .Values.globals.certs.cloudflareTokenSecretName }}
|
||||
|
||||
dnsZones:
|
||||
{{- range .Values.globals.certs.hlMnkeOrg.dnsZones }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
||||
|
||||
certificateName: {{ .Values.globals.certs.hlMnkeOrg.certificateName }}
|
||||
certificateSecretName: {{ .Values.globals.certs.hlMnkeOrg.certificateSecretName }}
|
||||
certificateNamespace: {{ .Values.globals.certs.hlMnkeOrg.certificateNamespace }}
|
||||
|
||||
commonName: {{ .Values.globals.certs.hlMnkeOrg.commonName }}
|
||||
dnsNames:
|
||||
{{- range .Values.globals.certs.hlMnkeOrg.dnsNames }}
|
||||
- {{ . | quote }}
|
||||
{{- end}}
|
@ -1,876 +0,0 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: {{ .Values.globals.ghost.storageClass }}
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: auto
|
||||
## @section Common parameters
|
||||
|
||||
## @param kubeVersion Override Kubernetes version
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @param nameOverride String to partially override common.names.fullname
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override common.names.fullname
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param commonLabels Labels to add to all deployed objects
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param commonAnnotations Annotations to add to all deployed objects
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param clusterDomain Kubernetes cluster domain name
|
||||
##
|
||||
clusterDomain: cluster.local
|
||||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
## Enable diagnostic mode in the deployment
|
||||
##
|
||||
diagnosticMode:
|
||||
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
|
||||
##
|
||||
enabled: false
|
||||
## @param diagnosticMode.command Command to override all containers in the deployment
|
||||
##
|
||||
command:
|
||||
- sleep
|
||||
## @param diagnosticMode.args Args to override all containers in the deployment
|
||||
##
|
||||
args:
|
||||
- infinity
|
||||
## @section Ghost Image parameters
|
||||
|
||||
## Bitnami Ghost image
|
||||
## ref: https://hub.docker.com/r/bitnami/ghost/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] Ghost image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/ghost] Ghost image repository
|
||||
## @skip image.tag Ghost image tag (immutable tags are recommended)
|
||||
## @param image.digest Ghost image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy Ghost image pull policy
|
||||
## @param image.pullSecrets Ghost image pull secrets
|
||||
## @param image.debug Enable image debug mode
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/ghost
|
||||
tag: 5.108.1-debian-12-r0
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Enable debug mode
|
||||
##
|
||||
debug: true
|
||||
## @section Ghost Configuration parameters
|
||||
## Ghost settings based on environment variables
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost#configuration
|
||||
|
||||
## @param ghostUsername Ghost user name
|
||||
##
|
||||
ghostUsername: user
|
||||
## @param ghostPassword Ghost user password
|
||||
## Defaults to a random 10-character alphanumeric string if not set
|
||||
##
|
||||
ghostPassword: "{{ .Values.globals.ghost.ghostPassword }}"
|
||||
## @param existingSecret Name of existing secret containing Ghost credentials
|
||||
## NOTE: Must contain key `ghost-password`
|
||||
## NOTE: When it's set, the `ghostPassword` parameter is ignored
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param ghostEmail Ghost user email
|
||||
##
|
||||
ghostEmail: {{ .Values.globals.ghost.ghostEmail }}
|
||||
## @param ghostBlogTitle Ghost Blog title
|
||||
##
|
||||
ghostBlogTitle: User's Blog
|
||||
## @param ghostHost Ghost host to create application URLs
|
||||
##
|
||||
ghostHost: {{ .Values.globals.ghost.primaryHost }}
|
||||
## @param ghostPath URL sub path where to server the Ghost application
|
||||
##
|
||||
ghostPath: /
|
||||
## @param ghostEnableHttps Configure Ghost to build application URLs using https
|
||||
##
|
||||
## This turns on whether URLs like the home page button use HTTPS URL schemes.
|
||||
## If you turn this on, then shit will break because it seems like
|
||||
## the reverse proxy (or any client) encounters a 301 redirect to https from
|
||||
## ghost, but then ghost doesn't actually serve HTTPS so the reverse proxy
|
||||
## ends up 501 internal server error'ing. We _do_ want to have HTTPS URLS in
|
||||
## Ghost, so we need to turn this on, and for some ABSOLUTELY UNGODLY REASON,
|
||||
## if I turn this on (and thereby making every request 501 error) AND THEN turn
|
||||
## it off again (allowing normal traffic again), we continue generating HTTPS
|
||||
## URLs. I'm going to fucking punch my monitor.
|
||||
##
|
||||
## - Tony
|
||||
ghostEnableHttps: false
|
||||
## SMTP mail delivery configuration
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/ghost/#smtp-configuration
|
||||
## @param smtpHost SMTP server host
|
||||
## @param smtpPort SMTP server port
|
||||
## @param smtpUser SMTP username
|
||||
## @param smtpPassword SMTP user password
|
||||
## @param smtpService SMTP service
|
||||
## @param smtpProtocol SMTP protocol (ssl or tls)
|
||||
##
|
||||
smtpHost: ""
|
||||
smtpPort: ""
|
||||
smtpUser: ""
|
||||
smtpPassword: ""
|
||||
smtpService: ""
|
||||
smtpProtocol: ""
|
||||
## @param smtpExistingSecret The name of an existing secret with SMTP credentials
|
||||
## NOTE: Must contain key `smtp-password`
|
||||
## NOTE: When it's set, the `smtpPassword` parameter is ignored
|
||||
##
|
||||
smtpExistingSecret: ""
|
||||
## @param allowEmptyPassword Allow the container to be started with blank passwords
|
||||
##
|
||||
allowEmptyPassword: false
|
||||
## @param ghostSkipInstall Skip performing the initial bootstrapping for Ghost
|
||||
##
|
||||
ghostSkipInstall: false
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: []
|
||||
## @param args Override default container args (useful when using custom images)
|
||||
##
|
||||
args: []
|
||||
## @param extraEnvVars Array with extra environment variables to add to the Ghost container
|
||||
## e.g:
|
||||
## extraEnvVars:
|
||||
## - name: FOO
|
||||
## value: "bar"
|
||||
##
|
||||
extraEnvVars: []
|
||||
## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param extraEnvVarsSecret Name of existing Secret containing extra env vars
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @section Ghost deployment parameters
|
||||
|
||||
## @param replicaCount Number of Ghost replicas to deploy
|
||||
## NOTE: ReadWriteMany PVC(s) are required if replicaCount > 1
|
||||
##
|
||||
replicaCount: 1
|
||||
## @param updateStrategy.type Ghost deployment strategy type
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
## NOTE: Set it to `Recreate` if you use a PV that cannot be mounted on multiple pods
|
||||
## e.g:
|
||||
## updateStrategy:
|
||||
## type: RollingUpdate
|
||||
## rollingUpdate:
|
||||
## maxSurge: 25%
|
||||
## maxUnavailable: 25%
|
||||
##
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
## @param priorityClassName Ghost pod priority class name
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param schedulerName Name of the k8s scheduler (other than default)
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param hostAliases Ghost pod host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases: []
|
||||
## @param extraVolumes Optionally specify extra list of additional volumes for Ghost pods
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for Ghost container(s)
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## @param sidecars Add additional sidecar containers to the Ghost pod
|
||||
## e.g:
|
||||
## sidecars:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
sidecars: []
|
||||
## @param initContainers Add additional init containers to the Ghost pods
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
## e.g:
|
||||
## initContainers:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
initContainers: []
|
||||
## Pod Disruption Budget configuration
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
||||
## @param pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## @param lifecycleHooks Add lifecycle hooks to the Ghost deployment
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param podLabels Extra labels for Ghost pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @param podAnnotations Annotations for Ghost pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
##
|
||||
type: ""
|
||||
## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
|
||||
##
|
||||
key: ""
|
||||
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param affinity Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param nodeSelector Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## @param tolerations Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## Ghost containers' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "medium"
|
||||
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## Container ports
|
||||
## @param containerPorts.http Ghost HTTP container port
|
||||
## @param containerPorts.https Ghost HTTPS container port
|
||||
##
|
||||
containerPorts:
|
||||
http: 2368
|
||||
https: 2368
|
||||
## @param extraContainerPorts Optionally specify extra list of additional ports for WordPress container(s)
|
||||
## e.g:
|
||||
## extraContainerPorts:
|
||||
## - name: myservice
|
||||
## containerPort: 9090
|
||||
##
|
||||
extraContainerPorts: []
|
||||
## Configure Pods Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param podSecurityContext.enabled Enabled Ghost pods' Security Context
|
||||
## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param podSecurityContext.fsGroup Set Ghost pod's Security Context fsGroup
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## Configure Container Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param containerSecurityContext.enabled Enabled containers' Security Context
|
||||
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
||||
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
||||
## @param containerSecurityContext.privileged Set container's Security Context privileged
|
||||
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
||||
## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
# capabilities:
|
||||
# drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## Configure extra options for Ghost containers' liveness, readiness and startup probes
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param startupProbe.enabled Enable startupProbe
|
||||
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: false
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param livenessProbe.enabled Enable livenessProbe
|
||||
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param readinessProbe.enabled Enable readinessProbe
|
||||
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
## @param customLivenessProbe Custom livenessProbe that overrides the default one
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param customReadinessProbe Custom readinessProbe that overrides the default one
|
||||
#
|
||||
# The default httpGet probe fails, and I think it's because of this:
|
||||
# - We enabled ghostEnableHttps
|
||||
# - The httpGet probe probes for http://xyz:2368, which then redirects to
|
||||
# https://xyz:2368
|
||||
# - However, Ghost itself does not provide TLS. That option just makes HTTP
|
||||
# redirect to HTTPS
|
||||
# - The probe is now expecting TLS, but Ghost is still sending regular HTTP
|
||||
# and the probe thus fails
|
||||
#
|
||||
# So we're just gonna do a TCP port check. The alternative is curl'ing and
|
||||
# expecting a 301 response, but that doesn't seem much better than the TCP
|
||||
# check, especially since it's so simple.
|
||||
customReadinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "true"
|
||||
# tcpSocket:
|
||||
# port: 2368
|
||||
|
||||
## @section Traffic Exposure Parameters
|
||||
|
||||
## Ghost service parameters
|
||||
##
|
||||
service:
|
||||
## @param service.type Ghost service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## @param service.ports.http Ghost service HTTP port
|
||||
## @param service.ports.https Ghost service HTTPS port
|
||||
##
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
## Node ports to expose
|
||||
## @param service.nodePorts.http Node port for HTTP
|
||||
## @param service.nodePorts.https Node port for HTTPS
|
||||
## NOTE: choose port between <30000-32767>
|
||||
##
|
||||
nodePorts:
|
||||
http: ""
|
||||
https: ""
|
||||
## @param service.clusterIP Ghost service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param service.loadBalancerIP Ghost service Load Balancer IP
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param service.loadBalancerSourceRanges Ghost service Load Balancer sources
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param service.externalTrafficPolicy Ghost service external traffic policy
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param service.annotations Additional custom annotations for Ghost service
|
||||
##
|
||||
annotations: {}
|
||||
## @param service.extraPorts Extra port to expose on Ghost service
|
||||
##
|
||||
extraPorts: []
|
||||
## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
sessionAffinityConfig: {}
|
||||
## Configure the ingress resource that allows you to access the Ghost installation
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
##
|
||||
ingress:
|
||||
## @param ingress.enabled Enable ingress record generation for Ghost
|
||||
##
|
||||
enabled: true
|
||||
## @param ingress.pathType Ingress path type
|
||||
##
|
||||
pathType: ImplementationSpecific
|
||||
## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
|
||||
##
|
||||
apiVersion: ""
|
||||
## @param ingress.hostname Default host for the ingress record
|
||||
##
|
||||
hostname: {{ .Values.globals.ghost.primaryHost }}
|
||||
## @param ingress.path Default path for the ingress record
|
||||
## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
|
||||
##
|
||||
path: /
|
||||
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
|
||||
## Use this parameter to set the required annotations for cert-manager, see
|
||||
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
||||
##
|
||||
## e.g:
|
||||
## annotations:
|
||||
## kubernetes.io/ingress.class: nginx
|
||||
## cert-manager.io/cluster-issuer: cluster-issuer-name
|
||||
##
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.ghost.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
|
||||
## TLS certificates will be retrieved from a TLS secret with name: `\{\{- printf "%s-tls" .Values.ingress.hostname \}\}`
|
||||
## You can:
|
||||
## - Use the `ingress.secrets` parameter to create this TLS secret
|
||||
## - Rely on cert-manager to create it by setting the corresponding annotations
|
||||
## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
|
||||
##
|
||||
tls: true
|
||||
## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
|
||||
##
|
||||
selfSigned: false
|
||||
## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
|
||||
## e.g:
|
||||
## extraHosts:
|
||||
## - name: ghost.local
|
||||
## path: /
|
||||
##
|
||||
extraHosts: []
|
||||
## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
|
||||
## e.g:
|
||||
## extraPaths:
|
||||
## - path: /*
|
||||
## backend:
|
||||
## serviceName: ssl-redirect
|
||||
## servicePort: use-annotation
|
||||
##
|
||||
extraPaths: []
|
||||
## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
||||
## e.g:
|
||||
## extraTls:
|
||||
## - hosts:
|
||||
## - ghost.local
|
||||
## secretName: ghost.local-tls
|
||||
##
|
||||
extraTls: []
|
||||
## @param ingress.secrets Custom TLS certificates as secrets
|
||||
## NOTE: 'key' and 'certificate' are expected in PEM format
|
||||
## NOTE: 'name' should line up with a 'secretName' set further up
|
||||
## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
|
||||
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
## e.g:
|
||||
## secrets:
|
||||
## - name: ghost.local-tls
|
||||
## key: |-
|
||||
## REDACTED
|
||||
## ...
|
||||
## REDACTED
|
||||
## certificate: |-
|
||||
## -----BEGIN CERTIFICATE-----
|
||||
## ...
|
||||
## -----END CERTIFICATE-----
|
||||
##
|
||||
secrets: []
|
||||
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
||||
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
||||
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
||||
##
|
||||
ingressClassName: {{ .Values.globals.ghost.ingressClass }}
|
||||
## @param ingress.extraRules Additional rules to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
||||
## e.g:
|
||||
## extraRules:
|
||||
## - host: example.local
|
||||
## http:
|
||||
## path: /
|
||||
## backend:
|
||||
## service:
|
||||
## name: example-svc
|
||||
## port:
|
||||
## name: http
|
||||
##
|
||||
extraRules: []
|
||||
## @section Persistence Parameters
|
||||
|
||||
## Persistence Parameters
|
||||
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
|
||||
##
|
||||
persistence:
|
||||
## @param persistence.enabled Enable persistence using Persistent Volume Claims
|
||||
##
|
||||
enabled: true
|
||||
## @param persistence.storageClass Persistent Volume storage class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
|
||||
##
|
||||
storageClass: ""
|
||||
## @param persistence.annotations Additional custom annotations for the PVC
|
||||
##
|
||||
annotations: {}
|
||||
## @param persistence.accessModes [array] Persistent Volume access modes
|
||||
##
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
## @param persistence.size Persistent Volume size
|
||||
##
|
||||
size: 8Gi
|
||||
## @param persistence.existingClaim The name of an existing PVC to use for persistence
|
||||
##
|
||||
existingClaim: ""
|
||||
## @param persistence.subPath The name of a volume's sub path to mount for persistence
|
||||
##
|
||||
subPath: ""
|
||||
## 'volumePermissions' init container parameters
|
||||
## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
|
||||
## based on the podSecurityContext/containerSecurityContext parameters
|
||||
##
|
||||
volumePermissions:
|
||||
## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
|
||||
##
|
||||
enabled: false
|
||||
## OS Shell + Utility image
|
||||
## ref: https://hub.docker.com/r/bitnami/os-shell/tags/
|
||||
## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry
|
||||
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository
|
||||
## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended)
|
||||
## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy
|
||||
## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/os-shell
|
||||
tag: 12-debian-12-r35
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Init container's resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "none"
|
||||
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## Init container Container Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param volumePermissions.securityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param volumePermissions.securityContext.runAsUser Set init container's Security Context runAsUser
|
||||
## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
|
||||
## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
|
||||
## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
|
||||
##
|
||||
securityContext:
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 0
|
||||
## @section Database Parameters
|
||||
|
||||
## MySQL chart configuration
|
||||
## ref: https://github.com/bitnami/charts/blob/main/bitnami/mysql/values.yaml
|
||||
##
|
||||
mysql:
|
||||
## @param mysql.enabled Deploy a MySQL server to satisfy the applications database requirements
|
||||
## To use an external database set this to false and configure the `externalDatabase` parameters
|
||||
##
|
||||
enabled: false
|
||||
## @param mysql.architecture MySQL architecture. Allowed values: `standalone` or `replication`
|
||||
##
|
||||
architecture: standalone
|
||||
## MySQL Authentication parameters
|
||||
## @param mysql.auth.rootPassword MySQL root password
|
||||
## @param mysql.auth.database MySQL custom database
|
||||
## @param mysql.auth.username MySQL custom user name
|
||||
## @param mysql.auth.password MySQL custom user password
|
||||
## @param mysql.auth.existingSecret Existing secret with MySQL credentials
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-the-root-password-on-first-run
|
||||
## https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-on-first-run
|
||||
## https://github.com/bitnami/containers/tree/main/bitnami/mysql/#creating-a-database-user-on-first-run
|
||||
auth:
|
||||
rootPassword: "password"
|
||||
database: bitnami_ghost
|
||||
username: bn_ghost
|
||||
password: "password"
|
||||
existingSecret: ""
|
||||
## MySQL Primary configuration
|
||||
##
|
||||
primary:
|
||||
## MySQL Primary Persistence parameters
|
||||
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
|
||||
## @param mysql.primary.persistence.enabled Enable persistence on MySQL using PVC(s)
|
||||
## @param mysql.primary.persistence.storageClass Persistent Volume storage class
|
||||
## @param mysql.primary.persistence.accessModes [array] Persistent Volume access modes
|
||||
## @param mysql.primary.persistence.size Persistent Volume size
|
||||
##
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 8Gi
|
||||
## MySQL primary container's resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param mysql.primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
## @param mysql.primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## External Database Configuration
|
||||
## All of these values are only used if `mysql.enabled=false`
|
||||
##
|
||||
externalDatabase:
|
||||
## @param externalDatabase.host External Database server host
|
||||
##
|
||||
host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local
|
||||
## @param externalDatabase.port External Database server port
|
||||
##
|
||||
port: 3306
|
||||
## @param externalDatabase.user External Database username
|
||||
##
|
||||
user: {{ .Values.globals.ghost.mysql.username }}
|
||||
## @param externalDatabase.password External Database user password
|
||||
##
|
||||
password: {{ .Values.globals.ghost.mysql.password }}
|
||||
## @param externalDatabase.database External Database database name
|
||||
##
|
||||
database: {{ .Values.globals.ghost.mysql.database }}
|
||||
## @param externalDatabase.existingSecret The name of an existing secret with database credentials
|
||||
## NOTE: Must contain key `mysql-password`
|
||||
## NOTE: When it's set, the `externalDatabase.password` parameter is ignored
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param externalDatabase.ssl External Database ssl
|
||||
##
|
||||
ssl: false
|
||||
## @param externalDatabase.sslCaFile External Database ssl CA filepath
|
||||
##
|
||||
sslCaFile: ""
|
||||
## @section NetworkPolicy parameters
|
||||
|
||||
## Network Policy configuration
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
|
||||
## Pods Service Account
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
## @param serviceAccount.create Specifies whether a ServiceAccount should be created
|
||||
## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template.
|
||||
## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account
|
||||
## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
|
||||
##
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: ""
|
||||
automountServiceAccountToken: false
|
||||
annotations: {}
|
||||
|
@ -1,782 +0,0 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
##
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: ""
|
||||
storageClass: ""
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: auto
|
||||
## @section Common parameters
|
||||
##
|
||||
|
||||
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @param nameOverride String to partially override gitea.fullname template (will maintain the release name)
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override gitea.fullname template
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param namespaceOverride String to fully override common.names.namespace
|
||||
##
|
||||
namespaceOverride: ""
|
||||
## @param commonAnnotations Common annotations to add to all Gitea resources (sub-charts are not considered). Evaluated as a template
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param commonLabels Common labels to add to all Gitea resources (sub-charts are not considered). Evaluated as a template
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template).
|
||||
##
|
||||
extraDeploy: []
|
||||
## @section Gitea parameters
|
||||
##
|
||||
|
||||
## Bitnami Gitea image version
|
||||
## ref: https://hub.docker.com/r/bitnami/gitea/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] Gitea image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/gitea] Gitea Image name
|
||||
## @skip image.tag Gitea Image tag
|
||||
## @param image.digest Gitea image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy Gitea image pull policy
|
||||
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||
## @param image.debug Specify if debug logs should be enabled
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/gitea
|
||||
tag: 1.23.1-debian-12-r3
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Set to true if you would like to see extra information on logs
|
||||
##
|
||||
debug: false
|
||||
## @param adminUsername User of the application
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
adminUsername: bn_user
|
||||
## @param adminPassword Application password
|
||||
## Defaults to a random 10-character alphanumeric string if not set
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
adminPassword: ""
|
||||
## @param adminEmail Admin email
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
adminEmail: user@example.com
|
||||
## @param appName Gitea application name
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
appName: example
|
||||
## @param runMode Gitea application host
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea#configuration
|
||||
##
|
||||
runMode: prod
|
||||
## @param exposeSSH Make the SSH server accesible
|
||||
##
|
||||
exposeSSH: true
|
||||
## @param rootURL UI Root URL (for link generation)
|
||||
##
|
||||
rootURL: ""
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: []
|
||||
## @param args Override default container args (useful when using custom images)
|
||||
##
|
||||
args: []
|
||||
## @param updateStrategy.type Update strategy - only really applicable for deployments with RWO PVs attached
|
||||
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
|
||||
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
|
||||
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
|
||||
##
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
## @param priorityClassName Gitea pods' priorityClassName
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param schedulerName Name of the k8s scheduler (other than default)
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param hostAliases [array] Add deployment host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases: []
|
||||
## @param extraEnvVars Extra environment variables
|
||||
## For example:
|
||||
##
|
||||
extraEnvVars: []
|
||||
# - name: BEARER_AUTH
|
||||
# value: true
|
||||
## @param extraEnvVarsCM ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param extraEnvVarsSecret Secret containing extra env vars (in case of sensitive data)
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @param extraVolumes Array of extra volumes to be added to the deployment (evaluated as template). Requires setting `extraVolumeMounts`
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param extraVolumeMounts Array of extra volume mounts to be added to the container (evaluated as template). Normally used with `extraVolumes`.
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## @param initContainers Add additional init containers to the pod (evaluated as a template)
|
||||
##
|
||||
initContainers: []
|
||||
## Pod Disruption Budget configuration
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
||||
## @param pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## @param sidecars Attach additional containers to the pod (evaluated as a template)
|
||||
##
|
||||
sidecars: []
|
||||
## @param tolerations Tolerations for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## @param existingSecret Name of a secret with the application password
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param existingSecretKey Key inside the existing secret containing the password
|
||||
##
|
||||
existingSecretKey: "admin-password"
|
||||
## SMTP mail delivery configuration
|
||||
## ref: https://github.com/bitnami/containers/tree/main/bitnami/gitea/#smtp-configuration
|
||||
## @param smtpHost SMTP host
|
||||
## @param smtpPort SMTP port
|
||||
## @param smtpUser SMTP user
|
||||
## @param smtpPassword SMTP password
|
||||
##
|
||||
smtpHost: ""
|
||||
smtpPort: ""
|
||||
smtpUser: ""
|
||||
smtpPassword: ""
|
||||
## @param smtpExistingSecret The name of an existing secret with SMTP credentials
|
||||
## NOTE: Must contain key `smtp-password`
|
||||
## NOTE: When it's set, the `smtpPassword` parameter is ignored
|
||||
##
|
||||
smtpExistingSecret: ""
|
||||
## @param containerPorts [object] Container ports
|
||||
##
|
||||
containerPorts:
|
||||
http: 3000
|
||||
ssh: 2222
|
||||
## @param extraContainerPorts Optionally specify extra list of additional ports for Gitea container(s)
|
||||
## e.g:
|
||||
## extraContainerPorts:
|
||||
## - name: myservice
|
||||
## containerPort: 9090
|
||||
##
|
||||
extraContainerPorts: []
|
||||
## Enable OpenID Configurations
|
||||
## @param openid.enableSignIn Enable sign in with OpenID
|
||||
## @param openid.enableSignUp Enable sign up with OpenID
|
||||
openid:
|
||||
enableSignIn: false
|
||||
enableSignUp: false
|
||||
## Enable persistence using Persistent Volume Claims
|
||||
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
|
||||
##
|
||||
persistence:
|
||||
## @param persistence.enabled Enable persistence using PVC
|
||||
##
|
||||
enabled: true
|
||||
## @param persistence.storageClass PVC Storage Class for Gitea volume
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: ""
|
||||
## @param persistence.accessModes PVC Access Mode for Gitea volume
|
||||
## Requires persistence.enabled: true
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
##
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
## @param persistence.size PVC Storage Request for Gitea volume
|
||||
##
|
||||
size: 8Gi
|
||||
## @param persistence.dataSource Custom PVC data source
|
||||
##
|
||||
dataSource: {}
|
||||
## @param persistence.existingClaim A manually managed Persistent Volume Claim
|
||||
## Requires persistence.enabled: true
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
##
|
||||
existingClaim: ""
|
||||
## @param persistence.hostPath If defined, the gitea-data volume will mount to the specified hostPath.
|
||||
## Requires persistence.enabled: true
|
||||
## Requires persistence.existingClaim: nil|false
|
||||
## Default: nil.
|
||||
##
|
||||
hostPath: ""
|
||||
## @param persistence.annotations Persistent Volume Claim annotations
|
||||
##
|
||||
annotations: {}
|
||||
## @param persistence.selector Selector to match an existing Persistent Volume for Gitea data PVC
|
||||
## If set, the PVC can't have a PV dynamically provisioned for it
|
||||
## E.g.
|
||||
## selector:
|
||||
## matchLabels:
|
||||
## app: my-app
|
||||
##
|
||||
selector: {}
|
||||
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
|
||||
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
type: ""
|
||||
## E.g.
|
||||
## key: "kubernetes.io/e2e-az-name"
|
||||
##
|
||||
key: ""
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param affinity Affinity for pod assignment
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param nodeSelector Node labels for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## Gitea container's resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "micro"
|
||||
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## Configure Pods Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param podSecurityContext.enabled Enable Gitea pods' Security Context
|
||||
## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param podSecurityContext.fsGroup Gitea pods' group ID
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## Configure Container Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param containerSecurityContext.enabled Enabled containers' Security Context
|
||||
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
||||
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
||||
## @param containerSecurityContext.privileged Set container's Security Context privileged
|
||||
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
||||
## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
||||
##
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## Configure extra options for startup probe
|
||||
## Gitea core exposes / to unauthenticated requests, making it a good
|
||||
## default startup and readiness path. However, that may not always be the
|
||||
## case. For example, if the image value is overridden to an image containing a
|
||||
## module that alters that route, or an image that does not auto-install Gitea.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param startupProbe.enabled Enable startupProbe
|
||||
## @param startupProbe.path Request path for startupProbe
|
||||
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: false
|
||||
path: /
|
||||
initialDelaySeconds: 600
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
successThreshold: 1
|
||||
## Configure extra options for liveness probe
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param livenessProbe.enabled Enable livenessProbe
|
||||
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 600
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
successThreshold: 1
|
||||
## Configure extra options for readiness probe
|
||||
## Gitea core exposes / to unauthenticated requests, making it a good
|
||||
## default startup and readiness path. However, that may not always be the
|
||||
## case. For example, if the image value is overridden to an image containing a
|
||||
## module that alters that route, or an image that does not auto-install Gitea.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
||||
## @param readinessProbe.enabled Enable readinessProbe
|
||||
## @param readinessProbe.path Request path for readinessProbe
|
||||
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
path: /
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 5
|
||||
successThreshold: 1
|
||||
## @param customStartupProbe Override default startup probe
|
||||
##
|
||||
customStartupProbe: {}
|
||||
## @param customLivenessProbe Override default liveness probe
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param customReadinessProbe Override default readiness probe
|
||||
##
|
||||
customReadinessProbe: {}
|
||||
## @param lifecycleHooks LifecycleHook to set additional configuration at startup Evaluated as a template
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param podAnnotations Pod annotations
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param podLabels Add additional labels to the pod (evaluated as a template)
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @section Traffic Exposure Parameters
|
||||
##
|
||||
|
||||
## Kubernetes configuration. For minikube, set this to NodePort, elsewhere use LoadBalancer
|
||||
##
|
||||
service:
|
||||
## @param service.type Kubernetes Service type
|
||||
##
|
||||
type: LoadBalancer
|
||||
## @param service.ports.http Service HTTP port
|
||||
## @param service.ports.ssh Service SSH port
|
||||
##
|
||||
ports:
|
||||
http: 80
|
||||
ssh: 22
|
||||
## @param service.loadBalancerSourceRanges Restricts access for LoadBalancer (only with `service.type: LoadBalancer`)
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 0.0.0.0/0
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param service.loadBalancerIP loadBalancerIP for the Gitea Service (optional, cloud specific)
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param service.nodePorts [object] Kubernetes node port
|
||||
## nodePorts:
|
||||
## http: <to set explicitly, choose port between 30000-32767>
|
||||
## https: <to set explicitly, choose port between 30000-32767>
|
||||
##
|
||||
nodePorts:
|
||||
http: ""
|
||||
ssh: ""
|
||||
## @param service.externalTrafficPolicy Enable client source IP preservation
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param service.clusterIP Gitea service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
|
||||
##
|
||||
extraPorts: []
|
||||
## @param service.annotations Additional custom annotations for Gitea service
|
||||
##
|
||||
annotations: {}
|
||||
## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
|
||||
## Network Policy configuration
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
|
||||
## Configure the ingress resource that allows you to access the
|
||||
## Gitea installation. Set up the URL
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
##
|
||||
ingress:
|
||||
## @param ingress.enabled Enable ingress controller resource
|
||||
##
|
||||
enabled: false
|
||||
## @param ingress.pathType Ingress Path type
|
||||
##
|
||||
pathType: ImplementationSpecific
|
||||
## @param ingress.apiVersion Override API Version (automatically detected if not set)
|
||||
##
|
||||
apiVersion: ""
|
||||
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
||||
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
||||
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
||||
##
|
||||
ingressClassName: ""
|
||||
## @param ingress.hostname Default host for the ingress resource
|
||||
##
|
||||
hostname: "gitea.local"
|
||||
## @param ingress.path The Path to Gitea. You may need to set this to '/*' in order to use this
|
||||
## with ALB ingress controllers.
|
||||
##
|
||||
path: /
|
||||
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
|
||||
## Use this parameter to set the required annotations for cert-manager, see
|
||||
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
||||
##
|
||||
## e.g:
|
||||
## annotations:
|
||||
## kubernetes.io/ingress.class: nginx
|
||||
## cert-manager.io/cluster-issuer: cluster-issuer-name
|
||||
##
|
||||
annotations: {}
|
||||
## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
|
||||
## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
|
||||
##
|
||||
tls: false
|
||||
## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
|
||||
##
|
||||
selfSigned: false
|
||||
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
|
||||
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
|
||||
## extraHosts:
|
||||
## - name: gitea.local
|
||||
## path: /
|
||||
##
|
||||
extraHosts: []
|
||||
## @param ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host.
|
||||
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
|
||||
## extraPaths:
|
||||
## - path: /*
|
||||
## backend:
|
||||
## serviceName: ssl-redirect
|
||||
## servicePort: use-annotation
|
||||
##
|
||||
extraPaths: []
|
||||
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
|
||||
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
||||
## extraTls:
|
||||
## - hosts:
|
||||
## - gitea.local
|
||||
## secretName: gitea.local-tls
|
||||
##
|
||||
extraTls: []
|
||||
## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
## REDACTED
|
||||
##
|
||||
## name should line up with a tlsSecret set further up
|
||||
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
|
||||
##
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
## Example:
|
||||
## - name: gitea.local-tls
|
||||
## key:
|
||||
## certificate:
|
||||
##
|
||||
secrets: []
|
||||
## @param ingress.extraRules Additional rules to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
||||
## e.g:
|
||||
## extraRules:
|
||||
## - host: example.local
|
||||
## http:
|
||||
## path: /
|
||||
## backend:
|
||||
## service:
|
||||
## name: example-svc
|
||||
## port:
|
||||
## name: http
|
||||
##
|
||||
extraRules: []
|
||||
## @section Other Parameters
|
||||
##
|
||||
|
||||
## Service account for Gitea to use.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
##
|
||||
serviceAccount:
|
||||
## @param serviceAccount.create Enable creation of ServiceAccount for Gitea pod
|
||||
##
|
||||
create: true
|
||||
## @param serviceAccount.name The name of the ServiceAccount to use.
|
||||
## If not set and create is true, a name is generated using the common.names.fullname template
|
||||
##
|
||||
name: ""
|
||||
## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
||||
## Can be set to false if pods using this serviceAccount do not need to use K8s API
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
||||
##
|
||||
annotations: {}
|
||||
## @section Database parameters
|
||||
##
|
||||
|
||||
## PostgreSQL chart configuration
|
||||
## ref: https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml
|
||||
## @param postgresql.enabled Switch to enable or disable the PostgreSQL helm chart
|
||||
## @param postgresql.auth.username Name for a custom user to create
|
||||
## @param postgresql.auth.password Password for the custom user to create
|
||||
## @param postgresql.auth.database Name for a custom database to create
|
||||
## @param postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials
|
||||
## @param postgresql.architecture PostgreSQL architecture (`standalone` or `replication`)
|
||||
## @param postgresql.service.ports.postgresql PostgreSQL service port
|
||||
##
|
||||
postgresql:
|
||||
enabled: false
|
||||
auth:
|
||||
username: bn_gitea
|
||||
password: ""
|
||||
database: bitnami_gitea
|
||||
existingSecret: ""
|
||||
architecture: standalone
|
||||
service:
|
||||
ports:
|
||||
postgresql: 5432
|
||||
## External PostgreSQL configuration
|
||||
## All of these values are only used when postgresql.enabled is set to false
|
||||
## @param externalDatabase.host Database host
|
||||
## @param externalDatabase.port Database port number
|
||||
## @param externalDatabase.user Non-root username for JupyterHub
|
||||
## @param externalDatabase.password Password for the non-root username for JupyterHub
|
||||
## @param externalDatabase.database JupyterHub database name
|
||||
## @param externalDatabase.existingSecret Name of an existing secret resource containing the database credentials
|
||||
## @param externalDatabase.existingSecretPasswordKey Name of an existing secret key containing the database credentials
|
||||
##
|
||||
externalDatabase:
|
||||
host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
|
||||
port: 5432
|
||||
user: {{ .Values.globals.gitea.postgres.username }}
|
||||
database: {{ .Values.globals.gitea.postgres.database }}
|
||||
password: {{ .Values.globals.gitea.postgres.password }}
|
||||
existingSecret: ""
|
||||
existingSecretPasswordKey: "db-password"
|
||||
## @section Volume Permissions parameters
|
||||
##
|
||||
|
||||
## Init containers parameters:
|
||||
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
|
||||
##
|
||||
volumePermissions:
|
||||
## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work)
|
||||
##
|
||||
enabled: false
|
||||
## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry
|
||||
## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name
|
||||
## @skip volumePermissions.image.tag Init container volume-permissions image tag
|
||||
## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
|
||||
## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/os-shell
|
||||
tag: 12-debian-12-r35
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Init containers' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "nano"
|
||||
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
|
File diff suppressed because it is too large
Load Diff
1
k8s/helmfile.d/values/globals/.gitignore
vendored
1
k8s/helmfile.d/values/globals/.gitignore
vendored
@ -1 +0,0 @@
|
||||
secrets.yaml
|
@ -1,216 +0,0 @@
|
||||
{{ $email := "tonydu121@hotmail.com" }}
|
||||
{{ $domain := "mnke.org" }}
|
||||
{{ $subdomain := "dolo" }}
|
||||
{{ $appDomain := print $subdomain "." $domain }}
|
||||
# This should be an IP in the MetalLB range
|
||||
{{ $primaryLoadBalancerIP := "10.0.185.128" }}
|
||||
{{ $environment := "staging" }}
|
||||
{{ $ingressClass := "traefik" }}
|
||||
{{ $nfsStorageClass := "nfs-client" }}
|
||||
{{ $longhornStorageClass := "longhorn" }}
|
||||
|
||||
{{
|
||||
$ghostDatabase := dict
|
||||
"database" "ghost"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/mysql/password" )
|
||||
}}
|
||||
{{
|
||||
$mysqlDatabases := list
|
||||
$ghostDatabase
|
||||
}}
|
||||
|
||||
{{
|
||||
$authentikDatabase := dict
|
||||
"database" "authentik"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/postgres/password" )
|
||||
}}
|
||||
{{
|
||||
$harborDatabase := dict
|
||||
"database" "harborcore"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/postgres/password" )
|
||||
}}
|
||||
{{
|
||||
$giteaDatabase := dict
|
||||
"database" "gitea"
|
||||
"username" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/username" )
|
||||
"password" ( fetchSecretValue "ref+file://values/globals/secrets.yaml#gitea/postgres/password" )
|
||||
}}
|
||||
{{
|
||||
$postgresDatabases := list
|
||||
$authentikDatabase
|
||||
$harborDatabase
|
||||
$giteaDatabase
|
||||
}}
|
||||
|
||||
globals:
|
||||
email: {{ $email }}
|
||||
environment: {{ $environment }}
|
||||
|
||||
certManager:
|
||||
namespace: cert-manager
|
||||
|
||||
traefik:
|
||||
namespace: traefik
|
||||
ingressClass: {{ $ingressClass }}
|
||||
loadBalancerIP: {{ $primaryLoadBalancerIP }}
|
||||
|
||||
certs:
|
||||
acmeEmail: {{ $email }}
|
||||
cloudflareEmail: {{ $email }}
|
||||
certIssuerMode: {{ $environment }}
|
||||
|
||||
cloudflareSecretToken: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#cloudflare/secretToken" }}
|
||||
cloudflareTokenSecretName: cloudflare-token-secret
|
||||
|
||||
issuerName: letsencrypt
|
||||
privateKeySecretRef: letsencrypt
|
||||
|
||||
hlMnkeOrg:
|
||||
certificateName: {{ $subdomain }}.{{ $domain }}
|
||||
certificateSecretName: {{ $subdomain }}.{{ $domain }}-tls
|
||||
certificateNamespace: default
|
||||
commonName: "{{ $appDomain }}"
|
||||
dnsZones:
|
||||
- "{{ $domain }}"
|
||||
dnsNames:
|
||||
- "{{ $appDomain }}"
|
||||
- "*.{{ $appDomain }}"
|
||||
|
||||
longhorn:
|
||||
namespace: longhorn-system
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
|
||||
nfsSubdirExternalProvisioner:
|
||||
namespace: nfs-subdir-external-provisioner
|
||||
replicaCount: 1
|
||||
nfs:
|
||||
server: truenas.local
|
||||
path: /mnt/emc14t9/k8s-pv
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
accessModes: ReadWriteMany
|
||||
|
||||
rancher:
|
||||
namespace: cattle-system
|
||||
ingressClass: {{ $ingressClass }}
|
||||
hostname: rancher.{{ $appDomain }}
|
||||
replicas: 3
|
||||
bootstrapPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#rancher/bootstrapPassword" }}
|
||||
|
||||
uptimeKuma:
|
||||
namespace: uptime-kuma
|
||||
ingressClass: {{ $ingressClass }}
|
||||
hosts:
|
||||
- uptime.{{ $appDomain }}
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
|
||||
mysql:
|
||||
namespace: db
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/username" }}
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/password" }}
|
||||
rootPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#mysql/rootPassword" }}
|
||||
databases:
|
||||
{{ $mysqlDatabases | toYaml | nindent 4 }}
|
||||
|
||||
postgres:
|
||||
namespace: db
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/username" }}
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/password" }}
|
||||
postgresPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#postgres/postgresPassword" }}
|
||||
databases:
|
||||
{{ $postgresDatabases | toYaml | nindent 4 }}
|
||||
|
||||
phpmyadmin:
|
||||
namespace: db
|
||||
hostname: pma.{{ $appDomain }}
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
|
||||
pgadmin4:
|
||||
namespace: db
|
||||
hostname: pg.{{ $appDomain }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
storageSize: 2Gi
|
||||
accessMode: ReadWriteOnce
|
||||
# can be email or nickname
|
||||
email: tony@mnke.org
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#pgadmin4/password" }}
|
||||
|
||||
redis:
|
||||
namespace: redis
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
storageSize: 8Gi
|
||||
accessMode: ReadWriteMany
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#redis/password" }}
|
||||
|
||||
ghost:
|
||||
namespace: ghost
|
||||
primaryHost: blog.mnke.org
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
ghostEmail: {{ $email }}
|
||||
ghostPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#ghost/password" }}
|
||||
mysql:
|
||||
{{ $ghostDatabase | toYaml | nindent 6 }}
|
||||
|
||||
authentik:
|
||||
namespace: authentik
|
||||
storageClass: {{ $longhornStorageClass }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
secretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#authentik/secretKey" }}
|
||||
hostnames:
|
||||
- auth.{{ $appDomain }}
|
||||
- auth.{{ $domain }}
|
||||
postgres:
|
||||
{{ $authentikDatabase | toYaml | nindent 6 }}
|
||||
|
||||
harbor:
|
||||
namespace: harbor
|
||||
hostname: harbor.{{ $appDomain }}
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
username: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/username" }}
|
||||
password: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/password" }}
|
||||
htpasswd: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/htpasswd" }}
|
||||
registrySecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/registrySecret" }}
|
||||
jobserviceSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/jobserviceSecret" }}
|
||||
coreSecretKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecretKey" }}
|
||||
coreSecret: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreSecret" }}
|
||||
coreCsrfKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreCsrfKey" }}
|
||||
coreTlsKey: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsKey" | quote }}
|
||||
coreTlsCert: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#harbor/coreTlsCert" | quote }}
|
||||
|
||||
postgres:
|
||||
{{ $harborDatabase | toYaml | nindent 6 }}
|
||||
|
||||
kubePrometheusStack:
|
||||
namespace: kube-prometheus-stack
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
thanosRuler:
|
||||
storageSize: 4Gi
|
||||
prometheus:
|
||||
storageSize: 4Gi
|
||||
grafana:
|
||||
storageSize: 4Gi
|
||||
adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#kubePrometheusStack/grafana/adminPassword" }}
|
||||
hosts:
|
||||
- gf.{{ $appDomain }}
|
||||
|
||||
argocd:
|
||||
namespace: argo-cd
|
||||
ingressClass: {{ $ingressClass }}
|
||||
storageClass: {{ $nfsStorageClass }}
|
||||
hostname: argocd.{{ $appDomain }}
|
||||
adminPassword: {{ fetchSecretValue "ref+file://values/globals/secrets.yaml#argocd/adminPassword" }}
|
||||
|
||||
gitea:
|
||||
namespace: gitea
|
||||
ingressClass: {{ $ingressClass }}
|
||||
postgres:
|
||||
{{ $giteaDatabase | toYaml | nindent 6 }}
|
File diff suppressed because it is too large
Load Diff
@ -1,34 +0,0 @@
|
||||
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
postgres:
|
||||
image:
|
||||
ref: postgres
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local
|
||||
username: postgres
|
||||
password: {{ .Values.globals.postgres.postgresPassword }}
|
||||
databases:
|
||||
{{- range .Values.globals.postgres.databases }}
|
||||
- database: {{ .database }}
|
||||
username: {{ .username }}
|
||||
password: {{ .password }}
|
||||
{{- end }}
|
||||
mysql:
|
||||
image:
|
||||
ref: mysql
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
host: mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local
|
||||
username: root
|
||||
password: {{ .Values.globals.mysql.rootPassword }}
|
||||
databases:
|
||||
{{- range .Values.globals.mysql.databases }}
|
||||
- database: {{ .database }}
|
||||
username: {{ .username }}
|
||||
password: {{ .password }}
|
||||
{{- end }}
|
File diff suppressed because it is too large
Load Diff
@ -1,539 +0,0 @@
|
||||
# Default values for longhorn.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
# -- Toleration for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
|
||||
tolerations: []
|
||||
# -- Node selector for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer.
|
||||
nodeSelector: {}
|
||||
cattle:
|
||||
# -- Default system registry.
|
||||
systemDefaultRegistry: ""
|
||||
windowsCluster:
|
||||
# -- Setting that allows Longhorn to run on a Rancher Windows cluster.
|
||||
enabled: false
|
||||
# -- Toleration for Linux nodes that can run user-deployed Longhorn components.
|
||||
tolerations:
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
# -- Node selector for Linux nodes that can run user-deployed Longhorn components.
|
||||
nodeSelector:
|
||||
kubernetes.io/os: "linux"
|
||||
defaultSetting:
|
||||
# -- Toleration for system-managed Longhorn components.
|
||||
taintToleration: cattle.io/os=linux:NoSchedule
|
||||
# -- Node selector for system-managed Longhorn components.
|
||||
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
|
||||
|
||||
networkPolicies:
|
||||
# -- Setting that allows you to enable network policies that control access to Longhorn pods.
|
||||
enabled: false
|
||||
# -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1")
|
||||
type: "k3s"
|
||||
|
||||
image:
|
||||
longhorn:
|
||||
engine:
|
||||
# -- Repository for the Longhorn Engine image.
|
||||
repository: longhornio/longhorn-engine
|
||||
# -- Tag for the Longhorn Engine image.
|
||||
tag: v1.8.0
|
||||
manager:
|
||||
# -- Repository for the Longhorn Manager image.
|
||||
repository: longhornio/longhorn-manager
|
||||
# -- Tag for the Longhorn Manager image.
|
||||
tag: v1.8.0
|
||||
ui:
|
||||
# -- Repository for the Longhorn UI image.
|
||||
repository: longhornio/longhorn-ui
|
||||
# -- Tag for the Longhorn UI image.
|
||||
tag: v1.8.0
|
||||
instanceManager:
|
||||
# -- Repository for the Longhorn Instance Manager image.
|
||||
repository: longhornio/longhorn-instance-manager
|
||||
# -- Tag for the Longhorn Instance Manager image.
|
||||
tag: v1.8.0
|
||||
shareManager:
|
||||
# -- Repository for the Longhorn Share Manager image.
|
||||
repository: longhornio/longhorn-share-manager
|
||||
# -- Tag for the Longhorn Share Manager image.
|
||||
tag: v1.8.0
|
||||
backingImageManager:
|
||||
# -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/backing-image-manager
|
||||
# -- Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value.
|
||||
tag: v1.8.0
|
||||
supportBundleKit:
|
||||
# -- Repository for the Longhorn Support Bundle Manager image.
|
||||
repository: longhornio/support-bundle-kit
|
||||
# -- Tag for the Longhorn Support Bundle Manager image.
|
||||
tag: v0.0.49
|
||||
csi:
|
||||
attacher:
|
||||
# -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-attacher
|
||||
# -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value.
|
||||
tag: v4.8.0
|
||||
provisioner:
|
||||
# -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-provisioner
|
||||
# -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value.
|
||||
tag: v5.1.0-20241220
|
||||
nodeDriverRegistrar:
|
||||
# -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-node-driver-registrar
|
||||
# -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value.
|
||||
tag: v2.13.0
|
||||
resizer:
|
||||
# -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-resizer
|
||||
# -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value.
|
||||
tag: v1.13.1
|
||||
snapshotter:
|
||||
# -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/csi-snapshotter
|
||||
# -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value.
|
||||
tag: v8.2.0
|
||||
livenessProbe:
|
||||
# -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
|
||||
repository: longhornio/livenessprobe
|
||||
# -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value.
|
||||
tag: v2.15.0
|
||||
openshift:
|
||||
oauthProxy:
|
||||
# -- Repository for the OAuth Proxy image. Specify the upstream image (for example, "quay.io/openshift/origin-oauth-proxy"). This setting applies only to OpenShift users.
|
||||
repository: ""
|
||||
# -- Tag for the OAuth Proxy image. Specify OCP/OKD version 4.1 or later (including version 4.15, which is available at quay.io/openshift/origin-oauth-proxy:4.15). This setting applies only to OpenShift users.
|
||||
tag: ""
|
||||
# -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI.
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
ui:
|
||||
# -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy")
|
||||
type: ClusterIP
|
||||
# -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767.
|
||||
nodePort: null
|
||||
manager:
|
||||
# -- Service type for Longhorn Manager.
|
||||
type: ClusterIP
|
||||
# -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767.
|
||||
nodePort: ""
|
||||
|
||||
persistence:
|
||||
# -- Setting that allows you to specify the default Longhorn StorageClass.
|
||||
defaultClass: true
|
||||
# -- Filesystem type of the default Longhorn StorageClass.
|
||||
defaultFsType: ext4
|
||||
# -- mkfs parameters of the default Longhorn StorageClass.
|
||||
defaultMkfsParams: ""
|
||||
# -- Replica count of the default Longhorn StorageClass.
|
||||
defaultClassReplicaCount: 3
|
||||
# -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort")
|
||||
defaultDataLocality: disabled
|
||||
# -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete")
|
||||
reclaimPolicy: Delete
|
||||
# -- Setting that allows you to enable live migration of a Longhorn volume from one node to another.
|
||||
migratable: false
|
||||
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the volume-head-xxx.img file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery.
|
||||
disableRevisionCounter: "true"
|
||||
# -- Set NFS mount options for Longhorn StorageClass for RWX volumes
|
||||
nfsOptions: ""
|
||||
recurringJobSelector:
|
||||
# -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)
|
||||
jobList: []
|
||||
backingImage:
|
||||
# -- Setting that allows you to use a backing image in a Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.
|
||||
name: ~
|
||||
# -- Data source type of a backing image used in a Longhorn StorageClass.
|
||||
# If the backing image exists in the cluster, Longhorn uses this setting to verify the image.
|
||||
# If the backing image does not exist, Longhorn creates one using the specified data source type.
|
||||
dataSourceType: ~
|
||||
# -- Data source parameters of a backing image used in a Longhorn StorageClass.
|
||||
# You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`)
|
||||
dataSourceParameters: ~
|
||||
# -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass.
|
||||
expectedChecksum: ~
|
||||
defaultDiskSelector:
|
||||
# -- Setting that allows you to enable the disk selector for the default Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Disk selector for the default Longhorn StorageClass. Longhorn uses only disks with the specified tags for storing volume data. (Examples: "nvme,sata")
|
||||
selector: ""
|
||||
defaultNodeSelector:
|
||||
# -- Setting that allows you to enable the node selector for the default Longhorn StorageClass.
|
||||
enable: false
|
||||
# -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")
|
||||
selector: ""
|
||||
# -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled")
|
||||
removeSnapshotsDuringFilesystemTrim: ignored
|
||||
# -- Setting that allows you to specify the data engine version for the default Longhorn StorageClass. (Options: "v1", "v2")
|
||||
dataEngine: v1
|
||||
# -- Setting that allows you to specify the backup target for the default Longhorn StorageClass.
|
||||
backupTargetName: default
|
||||
|
||||
preUpgradeChecker:
|
||||
# -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions.
|
||||
jobEnabled: true
|
||||
# -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled.
|
||||
upgradeVersionCheck: true
|
||||
|
||||
csi:
|
||||
# -- kubelet root directory. When unspecified, Longhorn uses the default value.
|
||||
kubeletRootDir: ~
|
||||
# -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3").
|
||||
attacherReplicaCount: ~
|
||||
# -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3").
|
||||
provisionerReplicaCount: ~
|
||||
# -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3").
|
||||
resizerReplicaCount: ~
|
||||
# -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3").
|
||||
snapshotterReplicaCount: ~
|
||||
|
||||
defaultSettings:
|
||||
# -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.
|
||||
allowRecurringJobWhileVolumeDetached: ~
|
||||
# -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.
|
||||
createDefaultDiskLabeledNodes: ~
|
||||
# -- Default path for storing data on a host. The default value is "/var/lib/longhorn/".
|
||||
defaultDataPath: ~
|
||||
# -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.
|
||||
defaultDataLocality: ~
|
||||
# -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default.
|
||||
replicaSoftAntiAffinity: ~
|
||||
# -- Setting that automatically rebalances replicas when an available node is discovered.
|
||||
replicaAutoBalance: ~
|
||||
# -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100".
|
||||
storageOverProvisioningPercentage: ~
|
||||
# -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25".
|
||||
storageMinimalAvailablePercentage: ~
|
||||
# -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node.
|
||||
storageReservedPercentageForDefaultDisk: ~
|
||||
# -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default
|
||||
upgradeChecker: ~
|
||||
# -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3".
|
||||
defaultReplicaCount: ~
|
||||
# -- Default name of Longhorn static StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. "storageClassName" needs to be an existing StorageClass. The default value is "longhorn-static".
|
||||
defaultLonghornStaticStorageClass: ~
|
||||
# -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled.
|
||||
failedBackupTTL: ~
|
||||
# -- Number of minutes that Longhorn allows for the backup execution. The default value is "1".
|
||||
backupExecutionTimeout: ~
|
||||
# -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration.
|
||||
restoreVolumeRecurringJobs: ~
|
||||
# -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained.
|
||||
recurringSuccessfulJobsHistoryLimit: ~
|
||||
# -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.
|
||||
recurringFailedJobsHistoryLimit: ~
|
||||
# -- Maximum number of snapshots or backups to be retained.
|
||||
recurringJobMaxRetention: ~
|
||||
# -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles.
|
||||
supportBundleFailedHistoryLimit: ~
|
||||
# -- Taint or toleration for system-managed Longhorn components.
|
||||
# Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect).
|
||||
taintToleration: ~
|
||||
# -- Node selector for system-managed Longhorn components.
|
||||
systemManagedComponentsNodeSelector: ~
|
||||
# -- PriorityClass for system-managed Longhorn components.
|
||||
# This setting can help prevent Longhorn components from being evicted under Node Pressure.
|
||||
# Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`.
|
||||
priorityClass: &defaultPriorityClassNameRef "longhorn-critical"
|
||||
# -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default.
|
||||
autoSalvage: ~
|
||||
# -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.
|
||||
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
|
||||
# -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default.
|
||||
disableSchedulingOnCordonedNode: ~
|
||||
# -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=<Zone name of the node>" in the Kubernetes node object.
|
||||
replicaZoneSoftAntiAffinity: ~
|
||||
# -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default.
|
||||
replicaDiskSoftAntiAffinity: ~
|
||||
# -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed.
|
||||
nodeDownPodDeletionPolicy: ~
|
||||
# -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained.
|
||||
nodeDrainPolicy: ~
|
||||
# -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned.
|
||||
detachManuallyAttachedVolumesWhenCordoned: ~
|
||||
# -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume.
|
||||
replicaReplenishmentWaitInterval: ~
|
||||
# -- Maximum number of replicas that can be concurrently rebuilt on each node.
|
||||
concurrentReplicaRebuildPerNodeLimit: ~
|
||||
# -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled.
|
||||
concurrentVolumeBackupRestorePerNodeLimit: ~
|
||||
# -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI.
|
||||
disableRevisionCounter: "true"
|
||||
# -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart.
|
||||
systemManagedPodsImagePullPolicy: ~
|
||||
# -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation.
|
||||
allowVolumeCreationWithDegradedAvailability: ~
|
||||
# -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed.
|
||||
autoCleanupSystemGeneratedSnapshot: ~
|
||||
# -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job.
|
||||
autoCleanupRecurringJobBackupSnapshot: ~
|
||||
# -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version.
|
||||
concurrentAutomaticEngineUpgradePerNodeLimit: ~
|
||||
# -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it.
|
||||
backingImageCleanupWaitInterval: ~
|
||||
# -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown".
|
||||
backingImageRecoveryWaitInterval: ~
|
||||
# -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12".
|
||||
guaranteedInstanceManagerCPU: ~
|
||||
# -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler.
|
||||
kubernetesClusterAutoscalerEnabled: ~
|
||||
# -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up.
|
||||
orphanAutoDeletion: ~
|
||||
# -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network.
|
||||
storageNetwork: ~
|
||||
# -- Flag that prevents accidental uninstallation of Longhorn.
|
||||
deletingConfirmationFlag: ~
|
||||
# -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8".
|
||||
engineReplicaTimeout: ~
|
||||
# -- Setting that allows you to enable and disable snapshot hashing and data integrity checks.
|
||||
snapshotDataIntegrity: ~
|
||||
# -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance.
|
||||
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
|
||||
# -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format.
|
||||
snapshotDataIntegrityCronjob: ~
|
||||
# -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files.
|
||||
removeSnapshotsDuringFilesystemTrim: ~
|
||||
# -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check".
|
||||
fastReplicaRebuildEnabled: ~
|
||||
# -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed.
|
||||
replicaFileSyncHttpClientTimeout: ~
|
||||
# -- Number of seconds that Longhorn allows for the completion of replica rebuilding and snapshot cloning operations.
|
||||
longGRPCTimeOut: ~
|
||||
# -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")
|
||||
logLevel: ~
|
||||
# -- Setting that allows you to specify a backup compression method.
|
||||
backupCompressionMethod: ~
|
||||
# -- Maximum number of worker threads that can concurrently run for each backup.
|
||||
backupConcurrentLimit: ~
|
||||
# -- Maximum number of worker threads that can concurrently run for each restore operation.
|
||||
restoreConcurrentLimit: ~
|
||||
# -- Setting that allows you to enable the V1 Data Engine.
|
||||
v1DataEngine: ~
|
||||
# -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is an experimental feature and should not be used in production environments.
|
||||
v2DataEngine: ~
|
||||
# -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine.
|
||||
v2DataEngineHugepageLimit: ~
|
||||
# -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250".
|
||||
v2DataEngineGuaranteedInstanceManagerCPU: ~
|
||||
# -- CPU cores on which the Storage Performance Development Kit (SPDK) target daemon should run. The SPDK target daemon is located in each Instance Manager pod. Ensure that the number of cores is less than or equal to the guaranteed Instance Manager CPUs for the V2 Data Engine. The default value is "0x1".
|
||||
v2DataEngineCPUMask: ~
|
||||
# -- Setting that allows scheduling of empty node selector volumes to any node.
|
||||
allowEmptyNodeSelectorVolume: ~
|
||||
# -- Setting that allows scheduling of empty disk selector volumes to any disk.
|
||||
allowEmptyDiskSelectorVolume: ~
|
||||
# -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses.
|
||||
allowCollectingLonghornUsageMetrics: ~
|
||||
# -- Setting that temporarily prevents all attempts to purge volume snapshots.
|
||||
disableSnapshotPurge: ~
|
||||
# -- Maximum snapshot count for a volume. The value should be between 2 to 250
|
||||
snapshotMaxCount: ~
|
||||
# -- Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
|
||||
v2DataEngineLogLevel: ~
|
||||
# -- Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine.
|
||||
v2DataEngineLogFlags: ~
|
||||
# -- Setting that freezes the filesystem on the root partition before a snapshot is created.
|
||||
freezeFilesystemForSnapshot: ~
|
||||
# -- Setting that automatically cleans up the snapshot when the backup is deleted.
|
||||
autoCleanupSnapshotWhenDeleteBackup: ~
|
||||
# -- Setting that allows Longhorn to detect node failure and immediately migrate affected RWX volumes.
|
||||
rwxVolumeFastFailover: ~
|
||||
|
||||
# -- Setting that allows you to update the default backupstore.
|
||||
defaultBackupStore:
|
||||
# -- Endpoint used to access the default backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE")
|
||||
backupTarget: ~
|
||||
# -- Name of the Kubernetes secret associated with the default backup target.
|
||||
backupTargetCredentialSecret: ~
|
||||
# -- Number of seconds that Longhorn waits before checking the default backupstore for new backups. The default value is "300". When the value is "0", polling is disabled.
|
||||
pollInterval: ~
|
||||
|
||||
privateRegistry:
|
||||
# -- Setting that allows you to create a private registry secret.
|
||||
createSecret: ~
|
||||
# -- URL of a private registry. When unspecified, Longhorn uses the default system registry.
|
||||
registryUrl: ~
|
||||
# -- User account used for authenticating with a private registry.
|
||||
registryUser: ~
|
||||
# -- Password for authenticating with a private registry.
|
||||
registryPasswd: ~
|
||||
# -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name.
|
||||
registrySecret: ~
|
||||
|
||||
longhornManager:
|
||||
log:
|
||||
# -- Format of Longhorn Manager logs. (Options: "plain", "json")
|
||||
format: plain
|
||||
# -- PriorityClass for Longhorn Manager.
|
||||
priorityClass: *defaultPriorityClassNameRef
|
||||
# -- Toleration for Longhorn Manager on nodes allowed to run Longhorn components.
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager.
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
# -- Annotation for the Longhorn Manager service.
|
||||
serviceAnnotations: {}
|
||||
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# annotation-key1: "annotation-value1"
|
||||
# annotation-key2: "annotation-value2"
|
||||
|
||||
longhornDriver:
|
||||
log:
|
||||
# -- Format of longhorn-driver logs. (Options: "plain", "json")
|
||||
format: plain
|
||||
# -- PriorityClass for Longhorn Driver.
|
||||
priorityClass: *defaultPriorityClassNameRef
|
||||
# -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components.
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver.
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
longhornUI:
|
||||
# -- Replica count for Longhorn UI.
|
||||
replicas: 2
|
||||
# -- PriorityClass for Longhorn UI.
|
||||
priorityClass: *defaultPriorityClassNameRef
|
||||
# -- Toleration for Longhorn UI on nodes allowed to run Longhorn components.
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI.
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
|
||||
ingress:
|
||||
# -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service.
|
||||
enabled: false
|
||||
|
||||
# -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller.
|
||||
# ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases.
|
||||
ingressClassName: ~
|
||||
|
||||
# -- Hostname of the Layer 7 load balancer.
|
||||
host: sslip.io
|
||||
|
||||
# -- Setting that allows you to enable TLS on ingress records.
|
||||
tls: false
|
||||
|
||||
# -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443.
|
||||
secureBackends: false
|
||||
|
||||
# -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records.
|
||||
tlsSecret: longhorn.local-tls
|
||||
|
||||
# -- Default ingress path. You can access the Longhorn UI by following the full ingress path \{\{host\}\}+\{\{path\}\}.
|
||||
path: /
|
||||
|
||||
# -- Ingress path type. To maintain backward compatibility, the default value is "ImplementationSpecific".
|
||||
pathType: ImplementationSpecific
|
||||
|
||||
## If you're using kube-lego, you will want to add:
|
||||
## kubernetes.io/tls-acme: true
|
||||
##
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
|
||||
##
|
||||
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
|
||||
# -- Ingress annotations in the form of key-value pairs.
|
||||
annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: true
|
||||
|
||||
# -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses.
|
||||
secrets:
|
||||
## If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
## REDACTED
|
||||
##
|
||||
## name should line up with a tlsSecret set further up
|
||||
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
|
||||
##
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
# - name: longhorn.local-tls
|
||||
# key:
|
||||
# certificate:
|
||||
|
||||
# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled.
|
||||
enablePSP: false
|
||||
|
||||
# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`.
|
||||
namespaceOverride: ""
|
||||
|
||||
# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional.
|
||||
annotations: {}
|
||||
|
||||
serviceAccount:
|
||||
# -- Annotations to add to the service account
|
||||
annotations: {}
|
||||
|
||||
metrics:
|
||||
serviceMonitor:
|
||||
# -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components.
|
||||
enabled: false
|
||||
# -- Additional labels for the Prometheus ServiceMonitor resource.
|
||||
additionalLabels: {}
|
||||
# -- Annotations for the Prometheus ServiceMonitor resource.
|
||||
annotations: {}
|
||||
# -- Interval at which Prometheus scrapes the metrics from the target.
|
||||
interval: ""
|
||||
# -- Timeout after which Prometheus considers the scrape to be failed.
|
||||
scrapeTimeout: ""
|
||||
# -- Configures the relabeling rules to apply the target’s metadata labels. See the [Prometheus Operator
|
||||
# documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
|
||||
# formatting details.
|
||||
relabelings: []
|
||||
# -- Configures the relabeling rules to apply to the samples before ingestion. See the [Prometheus Operator
|
||||
# documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for
|
||||
# formatting details.
|
||||
metricRelabelings: []
|
||||
|
||||
## openshift settings
|
||||
openshift:
|
||||
# -- Setting that allows Longhorn to integrate with OpenShift.
|
||||
enabled: false
|
||||
ui:
|
||||
# -- Route for connections between Longhorn and the OpenShift web console.
|
||||
route: "longhorn-ui"
|
||||
# -- Port for accessing the OpenShift web console.
|
||||
port: 443
|
||||
# -- Port for proxy that provides access to the OpenShift web console.
|
||||
proxy: 8443
|
||||
|
||||
# -- Setting that allows Longhorn to generate code coverage profiles.
|
||||
enableGoCoverDir: false
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,115 +0,0 @@
|
||||
replicaCount: {{ .Values.globals.nfsSubdirExternalProvisioner.replicaCount }}
|
||||
strategyType: Recreate
|
||||
|
||||
image:
|
||||
repository: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner
|
||||
tag: v4.0.2
|
||||
pullPolicy: IfNotPresent
|
||||
imagePullSecrets: []
|
||||
|
||||
nfs:
|
||||
server: {{ .Values.globals.nfsSubdirExternalProvisioner.nfs.server }}
|
||||
path: {{ .Values.globals.nfsSubdirExternalProvisioner.nfs.path }}
|
||||
mountOptions:
|
||||
volumeName: nfs-subdir-external-provisioner-root
|
||||
# Reclaim policy for the main nfs volume
|
||||
reclaimPolicy: Retain
|
||||
|
||||
# For creating the StorageClass automatically:
|
||||
storageClass:
|
||||
create: true
|
||||
|
||||
# Set a provisioner name. If unset, a name will be generated.
|
||||
# provisionerName:
|
||||
|
||||
# Set StorageClass as the default StorageClass
|
||||
# Ignored if storageClass.create is false
|
||||
defaultClass: true
|
||||
|
||||
# Set a StorageClass name
|
||||
# Ignored if storageClass.create is false
|
||||
name: {{ .Values.globals.nfsSubdirExternalProvisioner.storageClass }}
|
||||
|
||||
# Allow volume to be expanded dynamically
|
||||
allowVolumeExpansion: true
|
||||
|
||||
# Method used to reclaim an obsoleted volume
|
||||
reclaimPolicy: Delete
|
||||
|
||||
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
|
||||
archiveOnDelete: true
|
||||
|
||||
# If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory.
|
||||
# Overrides archiveOnDelete.
|
||||
# Ignored if value not set.
|
||||
onDelete:
|
||||
|
||||
# Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace.
|
||||
# Ignored if value not set.
|
||||
pathPattern:
|
||||
|
||||
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
|
||||
accessModes: {{ .Values.globals.nfsSubdirExternalProvisioner.accessModes }}
|
||||
|
||||
# Set volume bindinng mode - Immediate or WaitForFirstConsumer
|
||||
volumeBindingMode: Immediate
|
||||
|
||||
# Storage class annotations
|
||||
annotations: {}
|
||||
|
||||
leaderElection:
|
||||
# When set to false leader election will be disabled
|
||||
enabled: true
|
||||
|
||||
## For RBAC support:
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
# If true, create & use Pod Security Policy resources
|
||||
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
|
||||
# Deployment pod annotations
|
||||
podAnnotations: {}
|
||||
|
||||
## Set pod priorityClassName
|
||||
# priorityClassName: ""
|
||||
|
||||
podSecurityContext: {}
|
||||
|
||||
securityContext: {}
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
# Additional labels for any resource created
|
||||
labels: {}
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
maxUnavailable: 1
|
||||
|
@ -1,420 +0,0 @@
|
||||
# Default values for pgAdmin4.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
## pgAdmin4 container image
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: dpage/pgadmin4
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Deployment annotations
|
||||
annotations: {}
|
||||
|
||||
## commonLabels Add labels to all the deployed resources
|
||||
commonLabels: {}
|
||||
|
||||
## priorityClassName
|
||||
priorityClassName: ""
|
||||
|
||||
## Deployment entrypoint override
|
||||
## Useful when there's a requirement to modify container's default:
|
||||
## https://www.vaultproject.io/docs/platform/k8s/injector/examples#environment-variable-example
|
||||
## ref: https://github.com/postgres/pgadmin4/blob/master/Dockerfile#L206
|
||||
# command: "['/bin/sh', '-c', 'source /vault/secrets/config && <entrypoint script>']"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
clusterIP: ""
|
||||
loadBalancerIP: ""
|
||||
port: 80
|
||||
targetPort: 80
|
||||
# targetPort: 4181 To be used with a proxy extraContainer
|
||||
portName: http
|
||||
|
||||
annotations: {}
|
||||
## Special annotations at the service level, e.g
|
||||
## this will set vnet internal IP's rather than public ip's
|
||||
## service.beta.kubernetes.io/azure-load-balancer-internal: "true"
|
||||
|
||||
## Specify the nodePort value for the service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
|
||||
## Pod Service Account
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
##
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
# Opt out of API credential automounting.
|
||||
# If you don't want the kubelet to automatically mount a ServiceAccount's API credentials,
|
||||
# you can opt out of the default behavior
|
||||
automountServiceAccountToken: false
|
||||
|
||||
## Pod HostAliases
|
||||
## ref: https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/
|
||||
##
|
||||
hostAliases:
|
||||
# - ip: "127.0.0.1"
|
||||
# hostnames:
|
||||
# - "pgadmin4.local"
|
||||
|
||||
## Strategy used to replace old Pods by new ones
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
##
|
||||
strategy: {}
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 0
|
||||
# maxUnavailable: 1
|
||||
|
||||
## Server definitions will be loaded at launch time. This allows connection
|
||||
## information to be pre-loaded into the instance of pgAdmin4 in the container.
|
||||
## Note that server definitions are only loaded on first launch,
|
||||
## i.e. when the configuration database is created, and not on subsequent launches using the same configuration database.
|
||||
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html
|
||||
##
|
||||
serverDefinitions:
|
||||
## If true, server definitions will be created
|
||||
##
|
||||
enabled: true
|
||||
|
||||
## The resource type to use for deploying server definitions.
|
||||
## Can either be ConfigMap or Secret
|
||||
resourceType: ConfigMap
|
||||
|
||||
# If resource type is set to ConfigMap, specify existingConfigmap containing definitions
|
||||
existingConfigmap: ""
|
||||
|
||||
# If resource type is set to Secret, specify existingSecret containing definitions
|
||||
existingSecret: ""
|
||||
|
||||
servers:
|
||||
postgres:
|
||||
Name: "main"
|
||||
Group: "Servers"
|
||||
Port: 5432
|
||||
Username: "postgres"
|
||||
Host: "postgres-postgresql.{{ .Values.globals.postgres.namespace }}.svc.cluster.local"
|
||||
SSLMode: "prefer"
|
||||
MaintenanceDB: "postgres"
|
||||
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
|
||||
## Ingress
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.pgadmin4.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
ingressClassName: {{ .Values.globals.pgadmin4.ingressClass }}
|
||||
hosts:
|
||||
- host: {{ .Values.globals.pgadmin4.hostname }}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- secretName: pg-hl-mnke-org-tls
|
||||
hosts:
|
||||
- {{ .Values.globals.pgadmin4.hostname }}
|
||||
|
||||
# Additional config maps to be mounted inside a container
|
||||
# Can be used to map config maps for sidecar as well
|
||||
extraConfigmapMounts: []
|
||||
# - name: certs-configmap
|
||||
# mountPath: /etc/ssl/certs
|
||||
# subPath: ""
|
||||
# configMap: certs-configmap
|
||||
# readOnly: true
|
||||
|
||||
extraSecretMounts: []
|
||||
# - name: pgpassfile
|
||||
# secret: pgpassfile
|
||||
# subPath: pgpassfile
|
||||
# mountPath: "/var/lib/pgadmin/storage/pgadmin/file.pgpass"
|
||||
# readOnly: true
|
||||
|
||||
## Additional volumes to be mounted inside a container
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
|
||||
## Specify additional containers in extraContainers.
|
||||
## For example, to add an authentication proxy to a pgadmin4 pod.
|
||||
extraContainers: |
|
||||
# - name: proxy
|
||||
# image: quay.io/gambol99/keycloak-proxy:latest
|
||||
# args:
|
||||
# - -provider=github
|
||||
# - -client-id=
|
||||
# - -client-secret=
|
||||
# - -github-org=<ORG_NAME>
|
||||
# - -email-domain=*
|
||||
# - -cookie-secret=
|
||||
# - -http-address=http://0.0.0.0:4181
|
||||
# - -upstream-url=http://127.0.0.1:3000
|
||||
# ports:
|
||||
# - name: proxy-web
|
||||
# containerPort: 4181
|
||||
|
||||
## @param existingSecret Name of existing secret to use for default pgadmin credentials. `env.password` will be ignored and picked up from this secret.
|
||||
##
|
||||
existingSecret: ""
|
||||
## @param secretKeys.pgadminPasswordKey Name of key in existing secret to use for default pgadmin credentials. Only used when `existingSecret` is set.
|
||||
##
|
||||
secretKeys:
|
||||
pgadminPasswordKey: password
|
||||
|
||||
## pgAdmin4 startup configuration
|
||||
## Values in here get injected as environment variables
|
||||
## Needed chart reinstall for apply changes
|
||||
env:
|
||||
# can be email or nickname
|
||||
email: {{ .Values.globals.pgadmin4.email }}
|
||||
password: {{ .Values.globals.pgadmin4.password }}
|
||||
# pgpassfile: /var/lib/pgadmin/storage/pgadmin/file.pgpass
|
||||
|
||||
# set context path for application (e.g. /pgadmin4/*)
|
||||
# contextPath: /pgadmin4
|
||||
|
||||
## If True, allows pgAdmin4 to create session cookies based on IP address
|
||||
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html
|
||||
##
|
||||
enhanced_cookie_protection: "False"
|
||||
|
||||
## Add custom environment variables that will be injected to deployment
|
||||
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html
|
||||
##
|
||||
variables: []
|
||||
# - name: PGADMIN_LISTEN_ADDRESS
|
||||
# value: "0.0.0.0"
|
||||
# - name: PGADMIN_LISTEN_PORT
|
||||
# value: "8080"
|
||||
|
||||
## Additional environment variables from ConfigMaps
|
||||
envVarsFromConfigMaps: []
|
||||
# - array-of
|
||||
# - config-map-names
|
||||
|
||||
## Additional environment variables from Secrets
|
||||
envVarsFromSecrets: []
|
||||
# - array-of
|
||||
# - secret-names
|
||||
|
||||
## Additional environment variables
|
||||
envVarsExtra: []
|
||||
# - name: POSTGRES_USERNAME
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: pgadmin.pgadmin-db.credentials.postgresql.acid.zalan.do
|
||||
# key: username
|
||||
# - name: POSTGRES_PASSWORD
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: pgadmin.pgadmin-db.credentials.postgresql.acid.zalan.do
|
||||
# key: password
|
||||
|
||||
persistentVolume:
|
||||
## If true, pgAdmin4 will create/use a Persistent Volume Claim
|
||||
## If false, use emptyDir
|
||||
##
|
||||
enabled: true
|
||||
|
||||
## pgAdmin4 Persistent Volume Claim annotations
|
||||
##
|
||||
annotations: {}
|
||||
|
||||
## pgAdmin4 Persistent Volume access modes
|
||||
## Must match those of existing PV or dynamic provisioner
|
||||
## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
accessModes:
|
||||
- {{ .Values.globals.pgadmin4.accessMode }}
|
||||
|
||||
## pgAdmin4 Persistent Volume Size
|
||||
##
|
||||
size: {{ .Values.globals.pgadmin4.storageSize }}
|
||||
|
||||
## pgAdmin4 Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: {{ .Values.globals.pgadmin4.storageClass }}
|
||||
# existingClaim: ""
|
||||
|
||||
## Subdirectory of pgAdmin4 Persistent Volume to mount
|
||||
## Useful if the volume's root directory is not empty
|
||||
##
|
||||
subPath: ""
|
||||
|
||||
## Additional volumes to be added to the deployment
|
||||
##
|
||||
extraVolumes: []
|
||||
|
||||
## Security context to be added to pgAdmin4 pods
|
||||
##
|
||||
securityContext:
|
||||
runAsUser: 5050
|
||||
runAsGroup: 5050
|
||||
fsGroup: 5050
|
||||
|
||||
containerSecurityContext:
|
||||
enabled: false
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
## pgAdmin4 readiness and liveness probe initial delay and timeout
|
||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
##
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 60
|
||||
timeoutSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
|
||||
## Required to be enabled pre pgAdmin4 4.16 release, to set the ACL on /var/lib/pgadmin.
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
|
||||
##
|
||||
VolumePermissions:
|
||||
## If true, enables an InitContainer to set permissions on /var/lib/pgadmin.
|
||||
##
|
||||
enabled: false
|
||||
|
||||
## @param extraDeploy list of extra manifests to deploy
|
||||
##
|
||||
extraDeploy: []
|
||||
|
||||
## Additional InitContainers to initialize the pod
|
||||
##
|
||||
extraInitContainers: |
|
||||
# - name: add-folder-for-pgpass
|
||||
# image: "dpage/pgadmin4:latest"
|
||||
# command: ["/bin/mkdir", "-p", "/var/lib/pgadmin/storage/pgadmin"]
|
||||
# volumeMounts:
|
||||
# - name: pgadmin-data
|
||||
# mountPath: /var/lib/pgadmin
|
||||
# securityContext:
|
||||
# runAsUser: 5050
|
||||
|
||||
containerPorts:
|
||||
http: 80
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
## Horizontal Pod Autoscaling
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
|
||||
#
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
## Node labels for pgAdmin4 pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## Node tolerations for server scheduling to nodes with taints
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Pod affinity
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
## Pod DNS Policy
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||
|
||||
dnsPolicy: ""
|
||||
|
||||
## Update pod DNS Config
|
||||
## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
|
||||
|
||||
dnsConfig: {}
|
||||
# nameservers:
|
||||
# - 192.0.2.1
|
||||
# searches:
|
||||
# - ns1.svc.cluster-domain.example
|
||||
# - my.dns.search.suffix
|
||||
# options:
|
||||
# - name: ndots
|
||||
# value: "2"
|
||||
# - name: edns0
|
||||
|
||||
## Pod annotations
|
||||
##
|
||||
podAnnotations: {}
|
||||
templatedPodAnnotations: |-
|
||||
# checksum/configmap-oauth2:{{ `{{ include "<parent-chart-name>/templates/configmap-oauth2.yaml" $ | sha256sum }}` }}
|
||||
# checksum/secret-oauth2: "{{ `{{ include "<parent-chart-name>/templates/secret-oauth2.yaml" $ | sha256sum }}` }}"
|
||||
# checksum/secret-pgpass: "{{ `{{ include "<parent-chart-name>/templates/secret-pgpass.yaml" $ | sha256sum }}` }}"
|
||||
|
||||
## Pod labels
|
||||
##
|
||||
podLabels: {}
|
||||
# key1: value1
|
||||
# key2: value2
|
||||
|
||||
# -- The name of the Namespace to deploy
|
||||
# If not set, `.Release.Namespace` is used
|
||||
namespace: null
|
||||
|
||||
init:
|
||||
## Init container resources
|
||||
##
|
||||
resources: {}
|
||||
|
||||
## Define values for chart tests
|
||||
test:
|
||||
## Container image for test-connection.yaml
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: busybox
|
||||
tag: latest
|
||||
## Resources request/limit for test-connection Pod
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 50m
|
||||
# memory: 32Mi
|
||||
# requests:
|
||||
# cpu: 25m
|
||||
# memory: 16Mi
|
||||
## Security context for test-connection Pod
|
||||
securityContext:
|
||||
runAsUser: 5051
|
||||
runAsGroup: 5051
|
||||
fsGroup: 5051
|
||||
|
@ -1,811 +0,0 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s)
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
imagePullSecrets: []
|
||||
defaultStorageClass: {{ .Values.globals.phpmyadmin.storageClass }}
|
||||
## Security parameters
|
||||
##
|
||||
security:
|
||||
## @param global.security.allowInsecureImages Allows skipping image verification
|
||||
allowInsecureImages: false
|
||||
## Compatibility adaptations for Kubernetes platforms
|
||||
##
|
||||
compatibility:
|
||||
## Compatibility adaptations for Openshift
|
||||
##
|
||||
openshift:
|
||||
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
|
||||
##
|
||||
adaptSecurityContext: disabled
|
||||
## @section Common parameters
|
||||
|
||||
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
|
||||
##
|
||||
kubeVersion: ""
|
||||
## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
|
||||
##
|
||||
nameOverride: ""
|
||||
## @param fullnameOverride String to fully override common.names.fullname template
|
||||
##
|
||||
fullnameOverride: ""
|
||||
## @param commonLabels Add labels to all the deployed resources
|
||||
##
|
||||
commonLabels: {}
|
||||
## @param commonAnnotations Add annotations to all the deployed resources
|
||||
##
|
||||
commonAnnotations: {}
|
||||
## @param clusterDomain Kubernetes Cluster Domain
|
||||
##
|
||||
clusterDomain: cluster.local
|
||||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
## @section phpMyAdmin parameters
|
||||
|
||||
## Bitnami PhpMyAdmin image version
|
||||
## ref: https://hub.docker.com/r/bitnami/phpmyadmin/tags/
|
||||
## @param image.registry [default: REGISTRY_NAME] phpMyAdmin image registry
|
||||
## @param image.repository [default: REPOSITORY_NAME/phpmyadmin] phpMyAdmin image repository
|
||||
## @skip image.tag phpMyAdmin image tag (immutable tags are recommended)
|
||||
## @param image.digest phpMyAdmin image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param image.pullPolicy Image pull policy
|
||||
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||
## @param image.debug Enable phpmyadmin image debug mode
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/phpmyadmin
|
||||
tag: 5.2.2-debian-12-r0
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## Example:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## Enable debug mode
|
||||
##
|
||||
debug: false
|
||||
## @param command Override default container command (useful when using custom images)
|
||||
##
|
||||
command: []
|
||||
## @param args Override default container args (useful when using custom images)
|
||||
##
|
||||
args: []
|
||||
## @param lifecycleHooks for the phpmyadmin container(s) to automate configuration before or after startup
|
||||
##
|
||||
lifecycleHooks: {}
|
||||
## @param extraEnvVars Extra environment variables to be set on PhpMyAdmin container
|
||||
## For example:
|
||||
## extraEnvVars:
|
||||
## - name: PHP_UPLOAD_MAX_FILESIZE
|
||||
## value: "80M"
|
||||
##
|
||||
extraEnvVars: []
|
||||
## @param extraEnvVarsCM Name of a existing ConfigMap containing extra env vars
|
||||
##
|
||||
extraEnvVarsCM: ""
|
||||
## @param extraEnvVarsSecret Name of a existing Secret containing extra env vars
|
||||
##
|
||||
extraEnvVarsSecret: ""
|
||||
## @section phpMyAdmin deployment parameters
|
||||
|
||||
## @param automountServiceAccountToken Mount Service Account token in pod
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param hostAliases [array] Deployment pod host aliases
|
||||
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
|
||||
##
|
||||
hostAliases:
|
||||
## Necessary for apache-exporter to work
|
||||
##
|
||||
- ip: "127.0.0.1"
|
||||
hostnames:
|
||||
- "status.localhost"
|
||||
## phpMyAdmin container ports to open
|
||||
## @param containerPorts.http HTTP port to expose at container level
|
||||
## @param containerPorts.https HTTPS port to expose at container level
|
||||
##
|
||||
containerPorts:
|
||||
http: 8080
|
||||
https: 8443
|
||||
## @param extraContainerPorts Optionally specify extra list of additional ports for phpMyAdmin container(s)
|
||||
## e.g:
|
||||
## extraContainerPorts:
|
||||
## - name: myservice
|
||||
## containerPort: 9090
|
||||
##
|
||||
extraContainerPorts: []
|
||||
## @param updateStrategy.type Strategy to use to update Pods
|
||||
##
|
||||
updateStrategy:
|
||||
## StrategyType
|
||||
## Can be set to RollingUpdate or OnDelete
|
||||
##
|
||||
type: RollingUpdate
|
||||
## phpMyAdmin pods' Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
## @param podSecurityContext.enabled Enable phpMyAdmin pods' Security Context
|
||||
## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
|
||||
## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface
|
||||
## @param podSecurityContext.supplementalGroups Set filesystem extra groups
|
||||
## @param podSecurityContext.fsGroup User ID for the container
|
||||
##
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroupChangePolicy: Always
|
||||
sysctls: []
|
||||
supplementalGroups: []
|
||||
fsGroup: 1001
|
||||
## phpMyAdmin containers' Security Context (only main container)
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
## @param containerSecurityContext.enabled Enabled containers' Security Context
|
||||
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
|
||||
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
|
||||
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
|
||||
## @param containerSecurityContext.privileged Set container's Security Context privileged
|
||||
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
|
||||
## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation
|
||||
## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped
|
||||
## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
|
||||
##
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
seLinuxOptions: {}
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
## phpMyAdmin number of pod replicas
|
||||
## @param replicas Number of replicas
|
||||
replicas: 1
|
||||
## phpMyAdmin containers' resource requests and limits
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "micro"
|
||||
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
##
|
||||
resources: {}
|
||||
## phpMyAdmin containers' startup probe. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
## @param startupProbe.enabled Enable startupProbe
|
||||
## @param startupProbe.httpGet.path Request path for startupProbe
|
||||
## @param startupProbe.httpGet.port Port for startupProbe
|
||||
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
|
||||
## @param startupProbe.periodSeconds Period seconds for startupProbe
|
||||
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
|
||||
## @param startupProbe.failureThreshold Failure threshold for startupProbe
|
||||
## @param startupProbe.successThreshold Success threshold for startupProbe
|
||||
##
|
||||
startupProbe:
|
||||
enabled: false
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
## phpMyAdmin containers' liveness probe. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
## @param livenessProbe.enabled Enable livenessProbe
|
||||
## @param livenessProbe.tcpSocket.port Port for livenessProbe
|
||||
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
|
||||
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
|
||||
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
|
||||
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
|
||||
## @param livenessProbe.successThreshold Success threshold for livenessProbe
|
||||
##
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
tcpSocket:
|
||||
port: http
|
||||
## phpMyAdmin containers' readiness probes. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
## @param readinessProbe.enabled Enable readinessProbe
|
||||
## @param readinessProbe.httpGet.path Request path for readinessProbe
|
||||
## @param readinessProbe.httpGet.port Port for readinessProbe
|
||||
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
|
||||
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
|
||||
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
|
||||
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
|
||||
## @param readinessProbe.successThreshold Success threshold for readinessProbe
|
||||
##
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
## @param customStartupProbe Override default startup probe
|
||||
##
|
||||
customStartupProbe: {}
|
||||
## @param customLivenessProbe Override default liveness probe
|
||||
##
|
||||
customLivenessProbe: {}
|
||||
## @param customReadinessProbe Override default readiness probe
|
||||
##
|
||||
customReadinessProbe: {}
|
||||
## @param podLabels Extra labels for PhpMyAdmin pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
##
|
||||
podLabels: {}
|
||||
## @param podAnnotations Annotations for PhpMyAdmin pods
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
podAnnotations: {}
|
||||
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAffinityPreset: ""
|
||||
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
||||
##
|
||||
podAntiAffinityPreset: soft
|
||||
## Node affinity preset
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
##
|
||||
nodeAffinityPreset:
|
||||
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
|
||||
##
|
||||
type: ""
|
||||
## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set.
|
||||
## E.g.
|
||||
## key: "kubernetes.io/e2e-az-name"
|
||||
##
|
||||
key: ""
|
||||
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
|
||||
## E.g.
|
||||
## values:
|
||||
## - e2e-az1
|
||||
## - e2e-az2
|
||||
##
|
||||
values: []
|
||||
## @param affinity Affinity for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
affinity: {}
|
||||
## @param nodeSelector Node labels for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||
##
|
||||
nodeSelector: {}
|
||||
## @param tolerations Tolerations for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
## @param priorityClassName phpmyadmin pods' priorityClassName
|
||||
##
|
||||
priorityClassName: ""
|
||||
## @param schedulerName Name of the k8s scheduler (other than default)
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
schedulerName: ""
|
||||
## @param topologySpreadConstraints Topology Spread Constraints for pod assignment
|
||||
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
## The value is evaluated as a template
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
## @param extraVolumes Optionally specify extra list of additional volumes for PhpMyAdmin pods
|
||||
##
|
||||
extraVolumes: []
|
||||
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for PhpMyAdmin container(s)
|
||||
##
|
||||
extraVolumeMounts: []
|
||||
## @param initContainers Add init containers to the PhpMyAdmin pods
|
||||
## Example:
|
||||
## initContainers:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
initContainers: []
|
||||
## Pod Disruption Budget configuration
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb
|
||||
## @param pdb.create Enable/disable a Pod Disruption Budget creation
|
||||
## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled
|
||||
## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty.
|
||||
##
|
||||
pdb:
|
||||
create: true
|
||||
minAvailable: ""
|
||||
maxUnavailable: ""
|
||||
## @param sidecars Add sidecar containers to the PhpMyAdmin pods
|
||||
## Example:
|
||||
## sidecars:
|
||||
## - name: your-image-name
|
||||
## image: your-image
|
||||
## imagePullPolicy: Always
|
||||
## ports:
|
||||
## - name: portname
|
||||
## containerPort: 1234
|
||||
##
|
||||
sidecars: []
|
||||
## @section Traffic Exposure parameters
|
||||
|
||||
## Service configuration
|
||||
##
|
||||
service:
|
||||
## @param service.type Kubernetes Service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## @param service.ports.http Service HTTP port
|
||||
## @param service.ports.https Service HTTPS port
|
||||
##
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
## Specify the nodePort values for the LoadBalancer and NodePort service types
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
## @param service.nodePorts.http Kubernetes http node port
|
||||
## @param service.nodePorts.https Kubernetes https node port
|
||||
##
|
||||
nodePorts:
|
||||
http: ""
|
||||
https: ""
|
||||
## @param service.clusterIP PhpMyAdmin service clusterIP IP
|
||||
## e.g:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param service.loadBalancerIP Load balancer IP for the phpMyAdmin Service (optional, cloud specific)
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
|
||||
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## Example:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param service.externalTrafficPolicy Enable client source IP preservation
|
||||
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
|
||||
##
|
||||
extraPorts: []
|
||||
## @param service.annotations Provide any additional annotations that may be required for the PhpMyAdmin service
|
||||
##
|
||||
annotations: {}
|
||||
## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
## Ingress configuration
|
||||
##
|
||||
ingress:
|
||||
## @param ingress.enabled Set to true to enable ingress record generation
|
||||
##
|
||||
enabled: true
|
||||
## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
|
||||
##
|
||||
apiVersion: ""
|
||||
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
|
||||
## certManager: false
|
||||
##
|
||||
|
||||
## @param ingress.hostname When the ingress is enabled, a host pointing to this will be created
|
||||
##
|
||||
hostname: {{ .Values.globals.phpmyadmin.hostname }}
|
||||
## @param ingress.pathType Ingress path type
|
||||
##
|
||||
pathType: ImplementationSpecific
|
||||
## @param ingress.path Default path for the ingress record
|
||||
## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
|
||||
##
|
||||
path: /
|
||||
## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
|
||||
## e.g:
|
||||
## extraPaths:
|
||||
## - path: /*
|
||||
## backend:
|
||||
## serviceName: ssl-redirect
|
||||
## servicePort: use-annotation
|
||||
##
|
||||
extraPaths: []
|
||||
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
|
||||
## For a full list of possible ingress annotations, please see
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
|
||||
## Use this parameter to set the required annotations for cert-manager, see
|
||||
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
|
||||
##
|
||||
## e.g:
|
||||
## annotations:
|
||||
## kubernetes.io/ingress.class: nginx
|
||||
## cert-manager.io/cluster-issuer: cluster-issuer-name
|
||||
##
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.phpmyadmin.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter
|
||||
## TLS certificates will be retrieved from a TLS secret with name: \{\{- printf "%s-tls" .Values.ingress.hostname \}\}
|
||||
## You can use the ingress.secrets parameter to create this TLS secret, relay on cert-manager to create it, or
|
||||
## let the chart create self-signed certificates for you
|
||||
##
|
||||
tls: true
|
||||
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
|
||||
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
|
||||
## Example:
|
||||
## extraHosts:
|
||||
## - name: phpmyadmin.local
|
||||
## path: /
|
||||
##
|
||||
extraHosts: []
|
||||
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
|
||||
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
|
||||
## Example:
|
||||
## extraTls:
|
||||
## - hosts:
|
||||
## - phpmyadmin.local
|
||||
## secretName: phpmyadmin.local-tls
|
||||
##
|
||||
extraTls: []
|
||||
## @param ingress.secrets If you're providing your own certificates and want to manage the secret via helm,
|
||||
## please use this to add the certificates as secrets key and certificate should start with
|
||||
## -----BEGIN CERTIFICATE----- or REDACTED
|
||||
## name should line up with a secretName set further up
|
||||
##
|
||||
## If it is not set and you're using cert-manager, this is unneeded, as it will create the secret for you
|
||||
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created
|
||||
## It is also possible to create and manage the certificates outside of this helm chart
|
||||
## Please see README.md for more information
|
||||
##
|
||||
## Example
|
||||
## secrets:
|
||||
## - name: phpmyadmin.local-tls
|
||||
## key: ""
|
||||
## certificate: ""
|
||||
##
|
||||
secrets: []
|
||||
## @param ingress.existingSecretName If you're providing your own certificate and want to manage the secret yourself,
|
||||
## please provide the name of the secret with this parameter. This secret will then be used for tls termination.
|
||||
## It has higher priority than the cert-manager or the generation of the certificate from the chart.
|
||||
##
|
||||
## Example:
|
||||
## existingSecretName: "byo-phpmyadmin-tls"
|
||||
##
|
||||
existingSecretName: ""
|
||||
## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
|
||||
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
|
||||
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
|
||||
##
|
||||
ingressClassName: ""
|
||||
## @param ingress.extraRules Additional rules to be covered with this ingress record
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
|
||||
## e.g:
|
||||
## extraRules:
|
||||
## - host: phpmyadmin.local
|
||||
## http:
|
||||
## path: /
|
||||
## backend:
|
||||
## service:
|
||||
## name: phpmyadmin-svc
|
||||
## port:
|
||||
## name: http
|
||||
##
|
||||
extraRules: []
|
||||
## @section Database parameters
|
||||
|
||||
## Database configuration
|
||||
##
|
||||
db:
|
||||
## @param db.allowArbitraryServer Enable connection to arbitrary MySQL server
|
||||
## If you do not want the user to be able to specify an arbitrary MySQL server at login time, set this to false
|
||||
##
|
||||
allowArbitraryServer: true
|
||||
## @param db.port Database port to use to connect
|
||||
##
|
||||
port: 3306
|
||||
## @param db.chartName Database suffix if included in the same release
|
||||
## If you are deploying phpMyAdmin as part of a release and the database is part
|
||||
## of the release, you can pass a suffix that will be used to find the database
|
||||
## in releasename-dbSuffix. Please note that this setting precedes db.host
|
||||
## e.g:
|
||||
## chartName: mariadb
|
||||
##
|
||||
chartName: ""
|
||||
## @param db.host Database Hostname. Ignored when `db.chartName` is set.
|
||||
## e.g:
|
||||
## host: foo
|
||||
##
|
||||
host: "mysql.{{ .Values.globals.mysql.namespace }}.svc.cluster.local"
|
||||
## @param db.bundleTestDB Deploy a MariaDB instance for testing purposes
|
||||
##
|
||||
bundleTestDB: false
|
||||
## @param db.enableSsl Enable SSL for the connection between phpMyAdmin and the database
|
||||
##
|
||||
enableSsl: false
|
||||
ssl:
|
||||
## @param db.ssl.clientKey Client key file when using SSL
|
||||
##
|
||||
clientKey: ""
|
||||
## @param db.ssl.clientCertificate Client certificate file when using SSL
|
||||
##
|
||||
clientCertificate: ""
|
||||
## @param db.ssl.caCertificate CA file when using SSL
|
||||
##
|
||||
caCertificate: ""
|
||||
## @param db.ssl.ciphers List of allowable ciphers for connections when using SSL
|
||||
##
|
||||
ciphers: []
|
||||
## @param db.ssl.verify Enable SSL certificate validation
|
||||
##
|
||||
verify: true
|
||||
## @param mariadb MariaDB chart configuration
|
||||
## https://github.com/bitnami/charts/blob/main/bitnami/mariadb/values.yaml
|
||||
##
|
||||
mariadb: {}
|
||||
## @section Other Parameters
|
||||
|
||||
## Service account for PhpMyAdmin to use.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
|
||||
##
|
||||
serviceAccount:
|
||||
## @param serviceAccount.create Enable creation of ServiceAccount for PhpMyAdmin pod
|
||||
##
|
||||
create: true
|
||||
## @param serviceAccount.name The name of the ServiceAccount to use.
|
||||
## If not set and create is true, a name is generated using the common.names.fullname template
|
||||
##
|
||||
name: ""
|
||||
## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created
|
||||
## Can be set to false if pods using this serviceAccount do not need to use K8s API
|
||||
##
|
||||
automountServiceAccountToken: false
|
||||
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
|
||||
##
|
||||
annotations: {}
|
||||
## @section Metrics parameters
|
||||
|
||||
## Prometheus Exporter / Metrics
|
||||
##
|
||||
metrics:
|
||||
## @param metrics.enabled Start a side-car prometheus exporter
|
||||
##
|
||||
enabled: true
|
||||
## @param metrics.image.registry [default: REGISTRY_NAME] Apache exporter image registry
|
||||
## @param metrics.image.repository [default: REPOSITORY_NAME/apache-exporter] Apache exporter image repository
|
||||
## @skip metrics.image.tag Apache exporter image tag (immutable tags are recommended)
|
||||
## @param metrics.image.digest Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag
|
||||
## @param metrics.image.pullPolicy Image pull policy
|
||||
## @param metrics.image.pullSecrets Specify docker-registry secret names as an array
|
||||
##
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/apache-exporter
|
||||
tag: 1.0.9-debian-12-r8
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## Example:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production).
|
||||
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
|
||||
##
|
||||
resourcesPreset: "nano"
|
||||
## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
|
||||
## Example:
|
||||
## resources:
|
||||
## requests:
|
||||
## cpu: 2
|
||||
## memory: 512Mi
|
||||
## limits:
|
||||
## cpu: 3
|
||||
## memory: 1024Mi
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
##
|
||||
resources: {}
|
||||
## Prometheus Exporter service configuration
|
||||
##
|
||||
service:
|
||||
## @param metrics.service.type Prometheus metrics service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## @param metrics.service.port Prometheus metrics service port
|
||||
##
|
||||
port: 9117
|
||||
## @param metrics.service.annotations [object] Annotations for Prometheus metrics service
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "{{ `{{ .Values.metrics.service.port }}` }}"
|
||||
## @param metrics.service.clusterIP phpmyadmin service Cluster IP
|
||||
## e.g.:
|
||||
## clusterIP: None
|
||||
##
|
||||
clusterIP: ""
|
||||
## @param metrics.service.loadBalancerIP Load Balancer IP if the Prometheus metrics server type is `LoadBalancer`
|
||||
## Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank
|
||||
##
|
||||
loadBalancerIP: ""
|
||||
## @param metrics.service.loadBalancerSourceRanges phpmyadmin service Load Balancer sources
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## e.g:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## @param metrics.service.externalTrafficPolicy phpmyadmin service external traffic policy
|
||||
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
##
|
||||
externalTrafficPolicy: Cluster
|
||||
## @param metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
|
||||
## If "ClientIP", consecutive client requests will be directed to the same Pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
##
|
||||
sessionAffinity: None
|
||||
## @param metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
|
||||
## sessionAffinityConfig:
|
||||
## clientIP:
|
||||
## timeoutSeconds: 300
|
||||
##
|
||||
sessionAffinityConfig: {}
|
||||
## Prometheus Service Monitor
|
||||
## ref: https://github.com/coreos/prometheus-operator
|
||||
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
|
||||
##
|
||||
serviceMonitor:
|
||||
## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
|
||||
##
|
||||
enabled: false
|
||||
## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created
|
||||
##
|
||||
namespace: ""
|
||||
## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
|
||||
##
|
||||
jobLabel: ""
|
||||
## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped
|
||||
##
|
||||
interval: 30s
|
||||
## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
|
||||
## e.g:
|
||||
## scrapeTimeout: 30s
|
||||
##
|
||||
scrapeTimeout: ""
|
||||
## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
||||
##
|
||||
relabelings: []
|
||||
## @param metrics.serviceMonitor.metricRelabelings Specify Metric Relabelings to add to the scrape endpoint
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
|
||||
##
|
||||
metricRelabelings: []
|
||||
## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor
|
||||
##
|
||||
labels: {}
|
||||
## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
|
||||
##
|
||||
honorLabels: false
|
||||
## @param metrics.serviceMonitor.selector ServiceMonitor selector labels
|
||||
## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration
|
||||
##
|
||||
## selector:
|
||||
## prometheus: my-prometheus
|
||||
##
|
||||
selector: {}
|
||||
## @section NetworkPolicy parameters
|
||||
|
||||
## Network Policy configuration
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
##
|
||||
networkPolicy:
|
||||
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
|
||||
##
|
||||
enabled: true
|
||||
## @param networkPolicy.allowExternal Don't require server label for connections
|
||||
## The Policy model to apply. When set to false, only pods with the correct
|
||||
## server label will have network access to the ports server is listening
|
||||
## on. When true, server will accept connections from any source
|
||||
## (with the correct destination port).
|
||||
##
|
||||
allowExternal: true
|
||||
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
|
||||
##
|
||||
allowExternalEgress: true
|
||||
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraIngress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## from:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
extraIngress: []
|
||||
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
|
||||
## e.g:
|
||||
## extraEgress:
|
||||
## - ports:
|
||||
## - port: 1234
|
||||
## to:
|
||||
## - podSelector:
|
||||
## - matchLabels:
|
||||
## - role: frontend
|
||||
## - podSelector:
|
||||
## - matchExpressions:
|
||||
## - key: role
|
||||
## operator: In
|
||||
## values:
|
||||
## - frontend
|
||||
##
|
||||
extraEgress: []
|
||||
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
|
||||
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
|
||||
##
|
||||
ingressNSMatchLabels: {}
|
||||
ingressNSPodMatchLabels: {}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,132 +0,0 @@
|
||||
# Additional Trusted CAs.
|
||||
# Enable this flag and add your CA certs as a secret named tls-ca-additional in the namespace.
|
||||
# See README.md for details.
|
||||
additionalTrustedCAs: false
|
||||
|
||||
antiAffinity: preferred
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# Audit Logs
|
||||
# Source: https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/enable-api-audit-log
|
||||
# The audit log is piped to the console of the rancher-audit-log container in the rancher pod.
|
||||
# level: Verbosity of logs, 0 to 3. 0 is off, 3 most verbose.
|
||||
# Docs: https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/enable-api-audit-log#audit-log-levels
|
||||
auditLog:
|
||||
destination: sidecar
|
||||
hostPath: /var/log/rancher/audit/
|
||||
level: 0
|
||||
maxAge: 1
|
||||
maxBackup: 1
|
||||
maxSize: 100
|
||||
|
||||
# Image for collecting rancher audit logs.
|
||||
# Important: update pkg/image/export/resolve.go when this default image is changed, so that it's reflected accordingly in rancher-images.txt generated for air-gapped setups.
|
||||
image:
|
||||
repository: "rancher/mirrored-bci-micro"
|
||||
tag: 15.6.24.2
|
||||
# Override imagePullPolicy image
|
||||
# options: Always, Never, IfNotPresent
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# As of Rancher v2.5.0 this flag is deprecated and must be set to 'true' in order for Rancher to start
|
||||
addLocal: "true"
|
||||
|
||||
# Add debug flag to Rancher server
|
||||
debug: false
|
||||
|
||||
# When starting Rancher for the first time, bootstrap the admin as restricted-admin
|
||||
restrictedAdmin: false
|
||||
|
||||
# Control how the Rancher agents validate TLS connections
|
||||
# Valid options: strict, or system-store
|
||||
# Note, for new installations empty will default to strict on 2.9+, or system-store on 2.8 or older
|
||||
agentTLSMode: ""
|
||||
|
||||
# Extra environment variables passed to the rancher pods.
|
||||
# extraEnv:
|
||||
# - name: CATTLE_TLS_MIN_VERSION
|
||||
# value: "1.0"
|
||||
|
||||
# Fully qualified name to reach your Rancher server
|
||||
hostname: {{ .Values.globals.rancher.hostname }}
|
||||
|
||||
### ingress ###
|
||||
# Readme for details and instruction on adding tls secrets.
|
||||
ingress:
|
||||
# If set to false, ingress will not be created
|
||||
# Defaults to true
|
||||
# options: true, false
|
||||
enabled: true
|
||||
includeDefaultExtraAnnotations: true
|
||||
extraAnnotations:
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.rancher.ingressClass }}
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
ingressClassName: {{ .Values.globals.rancher.ingressClass }}
|
||||
# backend port number
|
||||
servicePort: 80
|
||||
|
||||
tls:
|
||||
# options: rancher, letsEncrypt, secret
|
||||
source: secret
|
||||
secretName: rancher-tls
|
||||
|
||||
### service ###
|
||||
# Override to use NodePort or LoadBalancer service type - default is ClusterIP
|
||||
service:
|
||||
type: ""
|
||||
annotations: {}
|
||||
|
||||
### LetsEncrypt config ###
|
||||
# ProTip: The production environment only allows you to register a name 5 times a week.
|
||||
# Use staging until you have your config right.
|
||||
letsEncrypt:
|
||||
# email: none@example.com
|
||||
environment: {{ .Values.globals.certs.certIssuerMode }}
|
||||
ingress:
|
||||
# options: traefik, nginx
|
||||
class: {{ .Values.globals.rancher.ingressClass }}
|
||||
# If you are using certs signed by a private CA set to 'true' and set the 'tls-ca'
|
||||
# in the 'rancher-system' namespace. See the README.md for details
|
||||
privateCA: false
|
||||
|
||||
# http[s] proxy server passed into rancher server.
|
||||
# proxy: http://<username>@<password>:<url>:<port>
|
||||
|
||||
# comma separated list of domains or ip addresses that will not use the proxy
|
||||
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
|
||||
|
||||
# Override rancher image location for Air Gap installs
|
||||
rancherImage: rancher/rancher
|
||||
# rancher/rancher image tag. https://hub.docker.com/r/rancher/rancher/tags/
|
||||
# Defaults to .Chart.appVersion
|
||||
# rancherImageTag: v2.0.7
|
||||
|
||||
# Override imagePullPolicy for rancher server images
|
||||
# options: Always, Never, IfNotPresent
|
||||
# Defaults to IfNotPresent
|
||||
# rancherImagePullPolicy: <pullPolicy>
|
||||
|
||||
# Number of Rancher server replicas. Setting to negative number will dynamically between 0 and the abs(replicas) based on available nodes.
|
||||
# of available nodes in the cluster
|
||||
replicas: {{ .Values.globals.rancher.replicas }}
|
||||
|
||||
# Set priorityClassName to avoid eviction
|
||||
priorityClassName: rancher-critical
|
||||
|
||||
# Set pod resource requests/limits for Rancher.
|
||||
resources: {}
|
||||
|
||||
#
|
||||
# tls
|
||||
# Where to offload the TLS/SSL encryption
|
||||
# - ingress (default)
|
||||
# - external
|
||||
tls: ingress
|
||||
|
||||
systemDefaultRegistry: ""
|
||||
|
||||
# Set to use the packaged system charts
|
||||
useBundledSystemChart: false
|
||||
|
||||
# Set a bootstrap password. If leave empty, a random password will be generated.
|
||||
bootstrapPassword: {{ .Values.globals.rancher.bootstrapPassword }}
|
File diff suppressed because it is too large
Load Diff
@ -1,61 +0,0 @@
|
||||
globalArguments:
|
||||
- "--global.sendanonymoususage=false"
|
||||
- "--global.checknewversion=false"
|
||||
|
||||
additionalArguments:
|
||||
- "--serversTransport.insecureSkipVerify=true"
|
||||
- "--log.level=INFO"
|
||||
|
||||
deployment:
|
||||
enabled: true
|
||||
replicas: 3
|
||||
annotations: {}
|
||||
podAnnotations: {}
|
||||
additionalContainers: []
|
||||
initContainers: []
|
||||
|
||||
ports:
|
||||
web:
|
||||
redirections:
|
||||
entrypoint:
|
||||
to: websecure
|
||||
scheme: https
|
||||
permanent: true
|
||||
websecure:
|
||||
http3:
|
||||
enabled: true
|
||||
advertisedPort: 4443
|
||||
tls:
|
||||
enabled: true
|
||||
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: false
|
||||
|
||||
ingressClass:
|
||||
name: {{ .Values.globals.traefik.ingressClass }}
|
||||
providers:
|
||||
kubernetesCRD:
|
||||
enabled: true
|
||||
ingressClass: {{ .Values.globals.traefik.ingressClass }}
|
||||
allowExternalNameServices: true
|
||||
kubernetesIngress:
|
||||
enabled: true
|
||||
ingressClass: {{ .Values.globals.traefik.ingressClass }}
|
||||
allowExternalNameServices: true
|
||||
publishedService:
|
||||
enabled: false
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
|
||||
service:
|
||||
enabled: true
|
||||
type: LoadBalancer
|
||||
annotations: {}
|
||||
labels: {}
|
||||
spec:
|
||||
loadBalancerIP: {{ .Values.globals.traefik.loadBalancerIP }}
|
||||
loadBalancerSourceRanges: []
|
||||
externalIPs: []
|
||||
|
@ -1,234 +0,0 @@
|
||||
# Default values for uptime-kuma.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: louislam/uptime-kuma
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "1.23.13-debian"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
# -- A custom namespace to override the default namespace for the deployed resources.
|
||||
namespaceOverride: ""
|
||||
|
||||
# If this option is set to false a StateFulset instead of a Deployment is used
|
||||
useDeploy: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels:
|
||||
{}
|
||||
# app: uptime-kuma
|
||||
podEnv: []
|
||||
# optional additional environment variables
|
||||
# - name: "A_VARIABLE"
|
||||
# value: "a-value"
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 3001
|
||||
nodePort:
|
||||
annotations: {}
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
className: {{ .Values.globals.uptimeKuma.ingressClass }}
|
||||
extraLabels:
|
||||
{}
|
||||
# vhost: uptime-kuma.company.corp
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: {{ .Values.globals.certs.issuerName }}
|
||||
kubernetes.io/ingress.class: {{ .Values.globals.uptimeKuma.ingressClass }}
|
||||
# nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
|
||||
# nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
|
||||
# nginx.ingress.kubernetes.io/server-snippets: |
|
||||
# location / {
|
||||
# proxy_set_header Upgrade $http_upgrade;
|
||||
# proxy_http_version 1.1;
|
||||
# proxy_set_header X-Forwarded-Host $http_host;
|
||||
# proxy_set_header X-Forwarded-Proto $scheme;
|
||||
# proxy_set_header X-Forwarded-For $remote_addr;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header Connection "upgrade";
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header Upgrade $http_upgrade;
|
||||
# proxy_cache_bypass $http_upgrade;
|
||||
# }
|
||||
hosts:
|
||||
{{- range .Values.globals.uptimeKuma.hosts }}
|
||||
- host: {{ . }}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
{{- end}}
|
||||
|
||||
tls:
|
||||
[]
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
failureThreshold: 3
|
||||
# Uptime-Kuma recommends to configure a delay of 180 seconds until the server fully started.
|
||||
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.go#L3
|
||||
initialDelaySeconds: 180
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
# The NodeJS Version of this Healthcheck is no longer supported, therefore we don't specify a node command.
|
||||
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.js#L6
|
||||
exec:
|
||||
command:
|
||||
- "extra/healthcheck"
|
||||
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
exec:
|
||||
command: []
|
||||
httpGet:
|
||||
path: /
|
||||
port: 3001
|
||||
scheme: HTTP
|
||||
httpHeaders: []
|
||||
|
||||
volume:
|
||||
enabled: true
|
||||
accessMode: ReadWriteMany
|
||||
size: 4Gi
|
||||
# If you want to use a storage class other than the default, uncomment this
|
||||
# line and define the storage class name
|
||||
storageClassName: {{ .Values.globals.uptimeKuma.storageClass }}
|
||||
# Reuse your own pre-existing PVC.
|
||||
existingClaim: ""
|
||||
|
||||
# -- A list of additional volumes to be added to the pod
|
||||
additionalVolumes:
|
||||
[]
|
||||
# - name: "additional-certificates"
|
||||
# configMap:
|
||||
# name: "additional-certificates"
|
||||
# optional: true
|
||||
# defaultMode: 420
|
||||
|
||||
# -- A list of additional volumeMounts to be added to the pod
|
||||
additionalVolumeMounts:
|
||||
[]
|
||||
# - name: "additional-certificates"
|
||||
# mountPath: "/etc/ssl/certs/additional/additional-ca.pem"
|
||||
# readOnly: true
|
||||
# subPath: "additional-ca.pem"
|
||||
|
||||
strategy:
|
||||
type: Recreate
|
||||
|
||||
# Prometheus ServiceMonitor configuration
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||||
interval: 60s
|
||||
# -- Timeout if metrics can't be retrieved in given time interval
|
||||
scrapeTimeout: 10s
|
||||
# -- Scheme to use when scraping, e.g. http (default) or https.
|
||||
scheme: ~
|
||||
# -- TLS configuration to use when scraping, only applicable for scheme https.
|
||||
tlsConfig: {}
|
||||
# -- Prometheus [RelabelConfigs] to apply to samples before scraping
|
||||
relabelings: []
|
||||
# -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
|
||||
metricRelabelings: []
|
||||
# -- Prometheus ServiceMonitor selector, only select Prometheus's with these
|
||||
# labels (if not set, select any Prometheus)
|
||||
selector: {}
|
||||
|
||||
# -- Namespace where the ServiceMonitor resource should be created, default is
|
||||
# the same as the release namespace
|
||||
namespace: ~
|
||||
# -- Additional labels to add to the ServiceMonitor
|
||||
additionalLabels: {}
|
||||
# -- Additional annotations to add to the ServiceMonitor
|
||||
annotations: {}
|
||||
|
||||
# -- BasicAuth credentials for scraping metrics, use API token and any string for username
|
||||
# basicAuth:
|
||||
# username: "metrics"
|
||||
# password: ""
|
||||
|
||||
# -- Use this option to set a custom DNS policy to the created deployment
|
||||
dnsPolicy: ""
|
||||
|
||||
# -- Use this option to set custom DNS configurations to the created deployment
|
||||
dnsConfig: {}
|
||||
|
||||
# -- Use this option to set custom PriorityClass to the created deployment
|
||||
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
|
||||
priorityClassName: ""
|
||||
|
||||
# -- Create a NetworkPolicy
|
||||
networkPolicy:
|
||||
# -- Enable/disable Network Policy
|
||||
enabled: false
|
||||
# -- Enable/disable Ingress policy type
|
||||
ingress: true
|
||||
# -- Enable/disable Egress policy type
|
||||
egress: true
|
||||
# -- Allow incoming connections only from specific Pods
|
||||
# When set to true, the geoserver will accept connections from any source.
|
||||
# When false, only Pods with the label \{\{ include "geoserver.fullname" . \}\}-client=true will have network access
|
||||
allowExternal: true
|
||||
# -- Selects particular namespaces for which all Pods are allowed as ingress sources
|
||||
namespaceSelector: {}
|
||||
# matchLabels:
|
||||
# role: frontend
|
||||
# matchExpressions:
|
||||
# - {key: role, operator: In, values: [frontend]}
|
||||
|
@ -64,21 +64,29 @@ spec:
|
||||
tls:
|
||||
enabled: true
|
||||
|
||||
# Not publicly accessible though. To view it, run
|
||||
# `kubectl port-forward -n traefik $(kubectl get pods -n traefik --selector "app.kubernetes.io/name=traefik" --output=name | head -n1) 8080:8080`
|
||||
# and then visit http://127.0.0.1:8080 (the 127.0.0.1 CAN'T be interchanged
|
||||
# with localhost! idk why)
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
|
||||
ingressClass:
|
||||
name: traefik
|
||||
|
||||
providers:
|
||||
kubernetesCRD:
|
||||
enabled: true
|
||||
ingressClass: traefik
|
||||
# I thought we should be specifying this, but if we do, it prevents
|
||||
# proxying external services from working for some reason.
|
||||
# ingressClass: traefik
|
||||
allowExternalNameServices: true
|
||||
allowCrossNamespace: true
|
||||
kubernetesIngress:
|
||||
enabled: true
|
||||
ingressClass: traefik
|
||||
# Ditto above
|
||||
# ingressClass: traefik
|
||||
allowExternalNameServices: true
|
||||
publishedService:
|
||||
enabled: false
|
||||
@ -95,7 +103,44 @@ spec:
|
||||
loadBalancerIP: 10.0.185.128
|
||||
loadBalancerSourceRanges: []
|
||||
externalIPs: []
|
||||
|
||||
tlsStore:
|
||||
default:
|
||||
defaultCertificate:
|
||||
secretName: wildcard-mnke-org-tls
|
||||
|
||||
metrics:
|
||||
prometheus:
|
||||
service:
|
||||
enabled: true
|
||||
disableAPICheck: false
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
metricRelabelings:
|
||||
- sourceLabels: [__name__]
|
||||
separator: ;
|
||||
regex: ^fluentd_output_status_buffer_(oldest|newest)_.+
|
||||
replacement: $1
|
||||
action: drop
|
||||
relabelings:
|
||||
- sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
separator: ;
|
||||
regex: ^()$
|
||||
targetLabel: nodename
|
||||
replacement: $1
|
||||
action: replace
|
||||
jobLabel: traefik
|
||||
interval: 30s
|
||||
honorLabels: true
|
||||
prometheusRule:
|
||||
enabled: true
|
||||
rules:
|
||||
- alert: TraefikDown
|
||||
expr: up{job="traefik"} == 0
|
||||
for: 5m
|
||||
labels:
|
||||
context: traefik
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Traefik Down"
|
||||
description: "{{ $labels.pod }} on {{ $labels.nodename }} is down"
|
||||
|
Loading…
x
Reference in New Issue
Block a user